diff --git a/ansible/TestbedProcessing.py b/ansible/TestbedProcessing.py index f4dbf6658c..6e0075407d 100644 --- a/ansible/TestbedProcessing.py +++ b/ansible/TestbedProcessing.py @@ -492,47 +492,62 @@ def makeLab(data, devices, testbed, outfile): except AttributeError: print("\t\t" + host + " asics_host_ipv6 not found") - try: #get voq_inband_ip - voq_inband_ip = dev.get("voq_inband_ip") - if voq_inband_ip is not None: - entry += "\tvoq_inband_ip=" + str( voq_inband_ip ) - except AttributeError: - print("\t\t" + host + " voq_inband_ip not found") - - try: #get voq_inband_ipv6 - voq_inband_ipv6 = dev.get("voq_inband_ipv6") - if voq_inband_ipv6 is not None: - entry += "\tvoq_inband_ipv6=" + str( voq_inband_ipv6 ) - except AttributeError: - print("\t\t" + host + " voq_inband_ipv6 not found") - - try: #get voq_inband_intf - voq_inband_intf = dev.get("voq_inband_intf") - if voq_inband_intf is not None: - entry += "\tvoq_inband_intf=" + str( voq_inband_intf ) - except AttributeError: - print("\t\t" + host + " voq_inband_intf not found") - - try: #get voq_inband_type - voq_inband_type = dev.get("voq_inband_type") - if voq_inband_type is not None: - entry += "\tvoq_inband_type=" + str( voq_inband_type ) - except AttributeError: - print("\t\t" + host + " voq_inband_type not found") - try: #get switch_type switch_type = dev.get("switch_type") if switch_type is not None: - entry += "\tswitch_type=" + str( switch_type ) - except AttributeError: - print("\t\t" + host + " switch_type not found") + entry += "\tswitch_type=" + str( switch_type ) + if switch_type == 'voq' and card_type != 'supervisor': + if num_asics is None: + num_asics = 1 + + # All fields are a list. For single asic the list is of size 1. + switchids = dev.get("switchids") # switchids, single asic example "[4]", 3 asic example "[4,6,8]" + voq_inband_ip = dev.get("voq_inband_ip") # voq_inband_ip + voq_inband_ipv6 = dev.get("voq_inband_ipv6") # voq_inband_ipv6 + voq_inband_intf = dev.get("voq_inband_intf") # voq_inband_intf + voq_inband_type = dev.get("voq_inband_type") # voq_inband_type + max_cores = dev.get("max_cores") # max cores + lo4096_ip = dev.get("loopback4096_ip") # loopback4096_ip + lo4096_ipv6 = dev.get("loopback4096_ipv6") # loopback4096_ipv6 + num_cores_per_asic = dev.get("num_cores_per_asic", 1) # number of cores per asic - to be used in calculating the switchids, assuming to be the same for all linecards + + # Add fields + if switchids is None: + switchids = [start_switchid + (asic_id * num_cores_per_asic) for asic_id in range(num_asics)] + entry += "\tswitchids=" + str(switchids) + + if voq_inband_ip is None: + voq_inband_ip = ["1.1.1.{}/32".format(start_switchid + asic_id) for asic_id in range(num_asics)] + entry += "\tvoq_inband_ip=" + str(voq_inband_ip) + + if voq_inband_ipv6 is None: + voq_inband_ip = ["1111::1:{}/128".format(start_switchid + asic_id) for asic_id in range(num_asics)] + entry += "\tvoq_inband_ipv6=" + str(voq_inband_ip) + + if voq_inband_intf is None: + voq_inband_intf = ["Ethernet-IB{}".format(asic_id) for asic_id in range(num_asics)] + entry += "\tvoq_inband_intf=" + str(voq_inband_intf) + + if voq_inband_type is None: + voq_inband_type = "port" + entry += "\tvoq_inband_type=" + voq_inband_type + + if max_cores is None: + max_cores = 48 + entry += "\tmax_cores=" + str(max_cores) + + if lo4096_ip is None: + lo4096_ip = ["8.0.0.{}/32".format(start_switchid + asic_id) for asic_id in range(num_asics)] + entry += "\tloopback4096_ip=" + lo4096_ip + + if lo4096_ipv6 is None: + lo4096_ipv6 = ["2603:10e2:400::{}/128".format(start_switchid + asic_id) for asic_id in range(num_asics)] + entry += "\tloopback4096_ipv6=" + lo4096_ipv6 + + start_switchid += (num_asics * num_cores_per_asic) - try: #get max_cores - max_cores = dev.get("max_cores") - if max_cores is not None: - entry += "\tmax_cores=" + str( max_cores ) except AttributeError: - print("\t\t" + host + " max_cores not found") + print("\t\t" + host + " switch_type not found") try: #get os os = dev.get("os") diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index 16c0a059c3..0948a47420 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -83,7 +83,7 @@ hwsku: "{{ hwsku }}" card_type: "{{ card_type | default('fixed') }}" hostname: "{{ inventory_hostname | default('') }}" - start_switchid: "{{ start_switchid | default(0) }}" + switchids: "{{ switchids | default([]) }}" when: deploy is defined and deploy|bool == true - name: find interface name mapping and individual interface speed if defined with local data @@ -92,7 +92,8 @@ num_asic: "{{ num_asics }}" card_type: "{{ card_type | default('fixed') }}" hostname: "{{ inventory_hostname | default('') }}" - start_switchid: "{{ start_switchid | default(0) }}" + switchids: "{{ switchids | default([]) }}" + slotid: "{{ slot_num | default(None) }}" delegate_to: localhost when: deploy is not defined or deploy|bool == false @@ -101,6 +102,7 @@ num_fabric_asic: "{{ num_fabric_asics | default(0) }}" asics_host_basepfx: "{{ asics_host_ip | default(None) }}" asics_host_basepfx6: "{{ asics_host_ipv6 | default(None) }}" + delegate_to: localhost - name: set all VoQ system ports information set_fact: @@ -108,13 +110,36 @@ when: hostvars[item]['sysports'] is defined loop: "{{ ansible_play_batch }}" - - name: set all VoQ information for iBGP + - name: set all v4 Inband Ip information for iBGP chassis voq set_fact: - all_inbands: "{{ all_inbands | default( [] ) + [ hostvars[item]['voq_inband_ip'] ] }}" - all_hostnames: "{{ all_hostnames | default( [] ) + [ item ] }}" + all_inbands: "{{ all_inbands | default( {} ) | combine( { item : hostvars[item]['voq_inband_ip']}) }}" when: hostvars[item]['voq_inband_ip'] is defined loop: "{{ ansible_play_batch }}" + - name: set all v6 Inband Ip information for iBGP chassis voq + set_fact: + all_inbands_ipv6: "{{ all_inbands_ipv6 | default( {} ) | combine( { item : hostvars[item]['voq_inband_ipv6']}) }}" + when: hostvars[item]['voq_inband_ipv6'] is defined + loop: "{{ ansible_play_batch }}" + + - name: set all Loopback4096 information for iBGP chassis + set_fact: + all_loopback4096: "{{ all_loopback4096 | default( {} ) | combine( { item : hostvars[item]['loopback4096_ip']}) }}" + when: hostvars[item]['loopback4096_ip'] is defined + loop: "{{ ansible_play_batch }}" + + - name: set all Loopback4096 ipv6 information for iBGP chassis + set_fact: + all_loopback4096_ipv6: "{{ all_loopback4096_ipv6 | default( {} ) | combine( { item : hostvars[item]['loopback4096_ipv6']}) }}" + when: hostvars[item]['loopback4096_ipv6'] is defined + loop: "{{ ansible_play_batch }}" + + - name: set all slot information for chassis + set_fact: + all_slots: "{{ all_slots | default( {} ) | combine( { item : hostvars[item]['slot_num']}) }}" + when: hostvars[item]['slot_num'] is defined + loop: "{{ ansible_play_batch }}" + - name: find all enabled host_interfaces set_fact: host_if_indexes: "{{ vm_topo_config['host_interfaces_by_dut'][dut_index|int] | difference(vm_topo_config['disabled_host_interfaces_by_dut'][dut_index|int]) }}" @@ -139,7 +164,7 @@ port_alias: "{{ port_alias }}" vlan_intfs: "{{ vlan_intfs }}" delegate_to: localhost - when: "'dualtor' in topo" + when: "'dualtor' in topo or 'cable' in topo" - name: set default vm file path set_fact: @@ -153,7 +178,7 @@ - name: gather testbed VM informations testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} vm_file={{ vm_file }} delegate_to: localhost - when: "VM_topo | bool" + when: "(VM_topo | bool) and ('cable' not in topo)" - name: find all vlan configurations for T0 topology vlan_config: @@ -179,11 +204,13 @@ set_fact: interface_to_vms: "{{ interface_to_vms|default([]) + [ {'name': item.key, 'ports': item.value['interface_indexes'][dut_index|int] }] }}" with_dict: "{{ vm_topo_config['vm'] }}" + when: "'cable' not in topo" - name: find all interface indexes connecting to VM set_fact: ifindex_to_vms: "{{ ifindex_to_vms|default([]) }} + {{ item.value['interface_indexes'][dut_index|int] }}" with_dict: "{{ vm_topo_config['vm'] }}" + when: "'cable' not in topo" - name: find all interface names set_fact: @@ -191,6 +218,7 @@ with_subelements: - "{{ interface_to_vms }}" - "ports" + when: "'cable' not in topo" # create map of VM to asic interface names - name: find all interface asic names diff --git a/ansible/group_vars/all/mux_simulator_http_port_map.yml b/ansible/group_vars/all/mux_simulator_http_port_map.yml index 01356f3238..053e5d26a4 100644 --- a/ansible/group_vars/all/mux_simulator_http_port_map.yml +++ b/ansible/group_vars/all/mux_simulator_http_port_map.yml @@ -19,7 +19,7 @@ mux_simulator_http_port: # On server1 dualtor-testbed-1: 8080 - dualtor-testbed-1: 8082 + dualtor-testbed-2: 8082 # On server2 dualtor-testbed-3: 8080 diff --git a/ansible/group_vars/sonic/sku-sensors-data.yml b/ansible/group_vars/sonic/sku-sensors-data.yml index 02d1e2cc3b..1047bd0286 100644 --- a/ansible/group_vars/sonic/sku-sensors-data.yml +++ b/ansible/group_vars/sonic/sku-sensors-data.yml @@ -4731,3 +4731,286 @@ sensors_checks: psu_skips: {} sensor_skip_per_version: {} + x86_64-8102_64h_o-r0: + alarms: + voltage: + - tps53679-i2c-20-58/CPU_U17_PVCCIN_VIN/in1_alarm + + - tps53679-i2c-20-58/CPU_U17_P1P05V_VIN/in2_alarm + + - tps53679-i2c-20-58/CPU_U17_PVCCIN_VOUT/in3_lcrit_alarm + - tps53679-i2c-20-58/CPU_U17_PVCCIN_VOUT/in3_crit_alarm + + - tps53679-i2c-20-58/CPU_U17_P1P05V_VOUT/in4_lcrit_alarm + - tps53679-i2c-20-58/CPU_U17_P1P05V_VOUT/in4_crit_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P2V_VIN/in1_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P05V_VIN/in2_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P2V_VOUT/in3_lcrit_alarm + - tps53679-i2c-20-59/CPU_U117_P1P2V_VOUT/in3_crit_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P05V_VOUT/in4_lcrit_alarm + - tps53679-i2c-20-59/CPU_U117_P1P05V_VOUT/in4_crit_alarm + + - tps53679-i2c-24-60/MB_GB_VDDS_L1_VIN/in1_alarm + + - tps53679-i2c-24-60/MB_GB_VDDA_L2_VIN/in2_alarm + + - tps53679-i2c-24-60/MB_GB_VDDS_L1_VOUT/in3_lcrit_alarm + - tps53679-i2c-24-60/MB_GB_VDDS_L1_VOUT/in3_crit_alarm + + - tps53679-i2c-24-60/MB_GB_VDDA_L2_VOUT/in4_lcrit_alarm + - tps53679-i2c-24-60/MB_GB_VDDA_L2_VOUT/in4_crit_alarm + + - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_min_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_max_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_lcrit_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_crit_alarm + + - pmbus-i2c-24-62/MB_GB_CORE_VOUT_L1/in2_min_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VOUT_L1/in2_max_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VOUT_L1/in2_lcrit_alarm + - pmbus-i2c-24-62/MB_GB_CORE_VOUT_L1/in2_crit_alarm + + - tps53679-i2c-24-65/MB_3_3V_R_L1_VIN/in1_alarm + + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_VIN/in2_alarm + + - tps53679-i2c-24-65/MB_3_3V_R_L1_VOUT/in3_lcrit_alarm + - tps53679-i2c-24-65/MB_3_3V_R_L1_VOUT/in3_crit_alarm + + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_VOUT/in4_lcrit_alarm + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_VOUT/in4_crit_alarm + + - tps53679-i2c-24-64/MB_3_3V_L_L1_VIN/in1_alarm + + - tps53679-i2c-24-64/MB_3_3V_L_L1_VOUT/in3_lcrit_alarm + - tps53679-i2c-24-64/MB_3_3V_L_L1_VOUT/in3_crit_alarm + + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_min_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_max_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_lcrit_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_crit_alarm + + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_min_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_max_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_lcrit_alarm + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_crit_alarm + + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_min_alarm + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_max_alarm + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_lcrit_alarm + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_crit_alarm + + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_min_alarm + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_max_alarm + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_lcrit_alarm + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_crit_alarm + + - ltc2979-i2c-25-5e/MB_A1V8/in3_min_alarm + - ltc2979-i2c-25-5e/MB_A1V8/in3_max_alarm + - ltc2979-i2c-25-5e/MB_A1V8/in3_lcrit_alarm + - ltc2979-i2c-25-5e/MB_A1V8/in3_crit_alarm + + - ltc2979-i2c-25-5e/MB_A1V/in4_min_alarm + - ltc2979-i2c-25-5e/MB_A1V/in4_max_alarm + - ltc2979-i2c-25-5e/MB_A1V/in4_lcrit_alarm + - ltc2979-i2c-25-5e/MB_A1V/in4_crit_alarm + + - ltc2979-i2c-25-5e/MB_A3V3/in5_min_alarm + - ltc2979-i2c-25-5e/MB_A3V3/in5_max_alarm + - ltc2979-i2c-25-5e/MB_A3V3/in5_lcrit_alarm + - ltc2979-i2c-25-5e/MB_A3V3/in5_crit_alarm + + - ltc2979-i2c-25-5e/MB_A1V2/in6_min_alarm + - ltc2979-i2c-25-5e/MB_A1V2/in6_max_alarm + - ltc2979-i2c-25-5e/MB_A1V2/in6_lcrit_alarm + - ltc2979-i2c-25-5e/MB_A1V2/in6_crit_alarm + + - ltc2979-i2c-25-5e/MB_P3V3/in7_min_alarm + - ltc2979-i2c-25-5e/MB_P3V3/in7_max_alarm + - ltc2979-i2c-25-5e/MB_P3V3/in7_lcrit_alarm + - ltc2979-i2c-25-5e/MB_P3V3/in7_crit_alarm + + current: + - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_max_alarm + - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_crit_alarm + + - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_max_alarm + - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_crit_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_max_alarm + - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_crit_alarm + + - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_max_alarm + - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_crit_alarm + + - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_max_alarm + - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_max_alarm + + - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_max_alarm + - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_crit_alarm + + - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_max_alarm + - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_crit_alarm + + - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_max_alarm + - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_lcrit_alarm + - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_crit_alarm + + - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_max_alarm + - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_crit_alarm + + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_max_alarm + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_crit_alarm + + - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_max_alarm + - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_crit_alarm + + power: + - pmbus-i2c-24-62/pin/power1_alarm + + compares: + voltage: + - - tps53679-i2c-20-58/CPU_U17_PVCCIN_VIN/in1_input + - tps53679-i2c-20-58/CPU_U17_PVCCIN_VIN/in1_crit + + - - tps53679-i2c-20-58/CPU_U17_P1P05V_VIN/in2_input + - tps53679-i2c-20-58/CPU_U17_P1P05V_VIN/in2_crit + + - - tps53679-i2c-20-59/CPU_U117_P1P2V_VIN/in1_input + - tps53679-i2c-20-59/CPU_U117_P1P2V_VIN/in1_crit + + - - tps53679-i2c-20-59/CPU_U117_P1P05V_VIN/in2_input + - tps53679-i2c-20-59/CPU_U117_P1P05V_VIN/in2_crit + + - - tps53679-i2c-24-60/MB_GB_VDDS_L1_VIN/in1_input + - tps53679-i2c-24-60/MB_GB_VDDS_L1_VIN/in1_crit + + - - tps53679-i2c-24-60/MB_GB_VDDA_L2_VIN/in2_input + - tps53679-i2c-24-60/MB_GB_VDDA_L2_VIN/in2_crit + + - - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_input + - pmbus-i2c-24-62/MB_GB_CORE_VIN_L1/in1_max + + - - tps53679-i2c-24-65/MB_3_3V_R_L1_VIN/in1_input + - tps53679-i2c-24-65/MB_3_3V_R_L1_VIN/in1_crit + + - - tps53679-i2c-24-65/MB_GB_VDDCK_L2_VIN/in2_input + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_VIN/in2_crit + + - - tps53679-i2c-24-64/MB_3_3V_L_L1_VIN/in1_input + - tps53679-i2c-24-64/MB_3_3V_L_L1_VIN/in1_crit + + - - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_input + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_max + - - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_input + - ltc2979-i2c-25-5d/GB_PCIE_VDDH/in2_crit + + - - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_input + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_max + - - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_input + - ltc2979-i2c-25-5d/GB_PCIE_VDDACK/in3_crit + + - - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_input + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_max + - - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_input + - ltc2979-i2c-25-5d/GB_P1V8_VDDIO/in5_crit + + - - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_input + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_max + - - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_input + - ltc2979-i2c-25-5d/GB_P1V8_PLLVDD/in6_crit + + - - ltc2979-i2c-25-5e/MB_A1V8/in3_input + - ltc2979-i2c-25-5e/MB_A1V8/in3_max + - - ltc2979-i2c-25-5e/MB_A1V8/in3_input + - ltc2979-i2c-25-5e/MB_A1V8/in3_crit + + - - ltc2979-i2c-25-5e/MB_A1V/in4_input + - ltc2979-i2c-25-5e/MB_A1V/in4_max + - - ltc2979-i2c-25-5e/MB_A1V/in4_input + - ltc2979-i2c-25-5e/MB_A1V/in4_crit + + - - ltc2979-i2c-25-5e/MB_A3V3/in5_input + - ltc2979-i2c-25-5e/MB_A3V3/in5_max + - - ltc2979-i2c-25-5e/MB_A3V3/in5_input + - ltc2979-i2c-25-5e/MB_A3V3/in5_crit + + - - ltc2979-i2c-25-5e/MB_A1V2/in6_input + - ltc2979-i2c-25-5e/MB_A1V2/in6_max + - - ltc2979-i2c-25-5e/MB_A1V2/in6_input + - ltc2979-i2c-25-5e/MB_A1V2/in6_crit + + - - ltc2979-i2c-25-5e/MB_P3V3/in7_input + - ltc2979-i2c-25-5e/MB_P3V3/in7_max + - - ltc2979-i2c-25-5e/MB_P3V3/in7_input + - ltc2979-i2c-25-5e/MB_P3V3/in7_crit + + current: + - - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_input + - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_max + - - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_input + - tps53679-i2c-20-58/CPU_U17_PVCCIN_IOUT/curr1_crit + + - - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_input + - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_max + - - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_input + - tps53679-i2c-20-58/CPU_U17_P1P05V_IOUT/curr2_crit + + - - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_input + - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_max + - - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_input + - tps53679-i2c-20-59/CPU_U117_P1P2V_IOUT/curr1_crit + + - - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_input + - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_max + - - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_input + - tps53679-i2c-20-59/CPU_U117_P1P05V_IOUT/curr2_crit + + - - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_input + - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_max + - - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_input + - tps53679-i2c-24-60/MB_GB_VDDS_L1_IOUT/curr1_crit + + - - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_input + - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_max + - - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_input + - tps53679-i2c-24-60/MB_GB_VDDA_L2_IOUT/curr2_crit + + - - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_input + - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_max + - - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_input + - pmbus-i2c-24-62/MB_GB_CORE_IIN_L1/curr1_crit + + - - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_input + - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_max + - - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_input + - pmbus-i2c-24-62/MB_GB_CORE_IOUT_L1/curr2_crit + + - - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_input + - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_max + - - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_input + - tps53679-i2c-24-65/MB_3_3V_R_L1_IOUT/curr1_crit + + - - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_input + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_max + - - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_input + - tps53679-i2c-24-65/MB_GB_VDDCK_L2_IOUT/curr2_crit + + - - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_input + - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_max + - - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_input + - tps53679-i2c-24-64/MB_3_3V_L_L1_IOUT/curr1_crit + + power: + - - pmbus-i2c-24-62/pin/power1_input + - pmbus-i2c-24-62/pin/power1_max + non_zero: + fan: [] + power: [] + temp: [] + psu_skips: {} + sensor_skip_per_version: {} diff --git a/ansible/lab b/ansible/lab index aded3d6508..6dd2631131 100644 --- a/ansible/lab +++ b/ansible/lab @@ -12,6 +12,8 @@ all: sonic_a7260: sonic_multi_asic: sonic_multi_asic_2: + sonic_msft_sup: + sonic_msft_lc_100G: fanout: hosts: str-7260-10: @@ -148,3 +150,39 @@ sonic_multi_asic_2: vlab-08: ansible_host: 10.250.0.112 ansible_hostv6: fec0::ffff:afa:c + +sonic_msft_sup: + vars: + HwSku: msft-RP-O + slot_num: slot0 + card_type: supervisor + hosts: + lab-msft-sup-1: + ansible_host: 2.2.2.2 + hwsku: msft-RP-O + num_asics: 2 + +sonic_msft_lc_100G: + vars: + switch_type: chassis-packet + num_asics: 2 + frontend_asics: [0,1] + hosts: + lab-msft-lc0-1: + hwsku: msft-LC-48H-O + slot_num: slot1 + loopback4096_ip: [3.3.3.3/32,3.3.3.4/32] + loopback4096_ipv6: [2603:10e2:400::3/128,2603:10e2:400::4/128] + ansible_host: 2.2.2.3 + lab-msft-lc1-1: + hwsku: msft-LC-48H-O + slot_num: slot2 + loopback4096_ip: [3.3.3.5/32,3.3.3.6/32] + loopback4096_ipv6: [2603:10e2:400::5/128,2603:10e2:400::6/128] + ansible_host: 2.2.2.4 + lab-msft-lc2-1: + hwsku: msft-LC-48H-O + slot_num: slot3 + loopback4096_ip: [3.3.3.7/32,3.3.3.8/32] + loopback4096_ipv6: [2603:10e2:400::7/128,2603:10e2:400::8/128] + ansible_host: 2.2.2.5 diff --git a/ansible/library/announce_routes.py b/ansible/library/announce_routes.py index 2c1c68b635..0dd0f9091d 100644 --- a/ansible/library/announce_routes.py +++ b/ansible/library/announce_routes.py @@ -45,6 +45,7 @@ NHIPV4 = '10.10.246.254' NHIPV6 = 'fc0a::ff' SPINE_ASN = 65534 +CORE_RA_ASN = 65900 LEAF_ASN_START = 64600 TOR_ASN_START = 65500 IPV4_BASE_PORT = 5000 @@ -126,10 +127,10 @@ def get_uplink_router_as_path(uplink_router_type, spine_asn): def generate_routes(family, podset_number, tor_number, tor_subnet_number, - spine_asn, leaf_asn_start, tor_asn_start, - nexthop, nexthop_v6, - tor_subnet_size, max_tor_subnet_number, topo, - router_type = "leaf", tor_index=None, set_num=None, no_default_route=False): + spine_asn, leaf_asn_start, tor_asn_start, nexthop, + nexthop_v6, tor_subnet_size, max_tor_subnet_number, topo, + router_type = "leaf", tor_index=None, set_num=None, + no_default_route=False, core_ra_asn=CORE_RA_ASN): routes = [] if not no_default_route and router_type != "tor": default_route_as_path = get_uplink_router_as_path(router_type, spine_asn) @@ -206,7 +207,7 @@ def generate_routes(family, podset_number, tor_number, tor_subnet_number, aspath = None if router_type == "core": - aspath = "{} {}".format(leaf_asn, tor_asn) + aspath = "{} {}".format(leaf_asn, core_ra_asn) elif router_type == "spine": aspath = "{} {}".format(leaf_asn, tor_asn) elif router_type == "leaf": @@ -372,6 +373,7 @@ def generate_t2_routes(dut_vm_dict, topo, ptf_ip): nhipv6 = common_config.get("nhipv6", NHIPV6) leaf_asn_start = common_config.get("leaf_asn_start", LEAF_ASN_START) tor_asn_start = common_config.get("tor_asn_start", TOR_ASN_START) + core_ra_asn = common_config.get("core_ra_asn", CORE_RA_ASN) # generate routes for t1 vms for a_dut_index in dut_vm_dict: @@ -404,11 +406,13 @@ def generate_t2_routes(dut_vm_dict, topo, ptf_ip): routes_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, common_config['dut_asn'], leaf_asn_start, tor_asn_start, nhipv4, nhipv6, tor_subnet_size, max_tor_subnet_number, "t2", - router_type=router_type, tor_index=tor_index, set_num=set_num) + router_type=router_type, tor_index=tor_index, set_num=set_num, + core_ra_asn=core_ra_asn) routes_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, common_config['dut_asn'], leaf_asn_start, tor_asn_start, nhipv4, nhipv6, tor_subnet_size, max_tor_subnet_number, "t2", - router_type=router_type, tor_index=tor_index, set_num=set_num) + router_type=router_type, tor_index=tor_index, set_num=set_num, + core_ra_asn=core_ra_asn) announce_routes(ptf_ip, port, routes_v4) announce_routes(ptf_ip, port6, routes_v6) diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py index 65b97c08f6..e4feb6f801 100644 --- a/ansible/library/config_facts.py +++ b/ansible/library/config_facts.py @@ -1,5 +1,6 @@ #!/usr/bin/env python import json +import traceback from collections import defaultdict from natsort import natsorted from ansible.module_utils.port_utils import get_port_indices_for_asic @@ -164,7 +165,8 @@ def main(): results = get_facts(config, namespace) module.exit_json(ansible_facts=results) except Exception as e: - module.fail_json(msg=e.message) + tb = traceback.format_exc() + module.fail_json(msg=str(e) + "\n" + tb) from ansible.module_utils.basic import AnsibleModule diff --git a/ansible/library/dut_basic_facts.py b/ansible/library/dut_basic_facts.py new file mode 100644 index 0000000000..b6261a8c5f --- /dev/null +++ b/ansible/library/dut_basic_facts.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# This ansible module is for gathering basic facts from DUT of specified testbed. +# +# Example output: + +from ansible.module_utils.basic import * + + +DOCUMENTATION = ''' +--- +module: dut_basic_facts +author: Xin Wang (xiwang5@microsoft.com) +short_description: Retrive basic facts from DUT. +description: + - Retrive basic facts from DUT. This module should only be applied to a SONiC device. +options: + N/A +''' + +EXAMPLES = ''' +# Gather DUT basic facts +- name: Gathering DUT basic facts + dut_basic_facts: +''' + +from sonic_py_common import device_info + + +def main(): + + module = AnsibleModule(argument_spec=dict(), supports_check_mode=False) + + results = {} + + try: + results['platform'], results['hwsku'] = device_info.get_platform_and_hwsku() + results['is_multi_asic'] = device_info.is_multi_npu() + results['num_asic'] = device_info.get_num_npus() + results.update(device_info.get_sonic_version_info()) + + # In case a image does not have /etc/sonic/sonic_release, guess release from 'build_version' + if 'release' not in results or not results['release'] or results['release'] == 'none': + if 'build_version' in results: + if '201811' in results['build_version']: + results['release'] = '201811' + elif '201911' in results['build_version']: + results['release'] = '201911' + elif 'master' in results['build_version']: + results['release'] = 'master' + else: + results['release'] = 'unknown' + + module.exit_json(ansible_facts={'dut_basic_facts': results}) + except Exception as e: + module.fail_json(msg='Gather DUT facts failed, exception: {}'.format(repr(e))) + +if __name__ == '__main__': + main() diff --git a/ansible/library/extract_log.py b/ansible/library/extract_log.py index b8c2d9b3c7..e0bdafc87b 100644 --- a/ansible/library/extract_log.py +++ b/ansible/library/extract_log.py @@ -80,6 +80,7 @@ import hashlib import logging import logging.handlers +import traceback from datetime import datetime from functools import cmp_to_key from ansible.module_utils.basic import * @@ -91,7 +92,7 @@ def extract_lines(directory, filename, target_string): path = os.path.join(directory, filename) file = None if 'gz' in path: - file = gzip.GzipFile(path) + file = gzip.open(path, mode='rt') else: file = open(path) result = None @@ -258,7 +259,7 @@ def extract_log(directory, prefixname, target_string, target_filename): logger.debug("extract_log from files {}".format(filenames)) file_with_latest_line, file_create_time, latest_line, file_size = extract_latest_line_with_string(directory, filenames, target_string) m = hashlib.md5() - m.update(latest_line) + m.update(latest_line.encode('utf-8')) logger.debug("extract_log start file {} size {}, ctime {}, latest line md5sum {}".format(file_with_latest_line, file_size, file_create_time, m.hexdigest())) files_to_copy = calculate_files_to_copy(filenames, file_with_latest_line) logger.debug("extract_log subsequent files {}".format(files_to_copy)) @@ -286,8 +287,8 @@ def main(): try: extract_log(p['directory'], p['file_prefix'], p['start_string'], p['target_filename']) except: - err = str(sys.exc_info()) - module.fail_json(msg="Error: %s" % err) + tb = traceback.format_exc() + module.fail_json(msg=tb) module.exit_json() diff --git a/ansible/library/fabric_info.py b/ansible/library/fabric_info.py index 7b7442c203..9385231c1e 100644 --- a/ansible/library/fabric_info.py +++ b/ansible/library/fabric_info.py @@ -23,8 +23,8 @@ RETURN = ''' ansible_facts{ - fabric_info: [{'asicname': 'ASIC0', 'ip_prefix': '10.1.0.1/32', 'ip6_prefix': 'FC00:1::1/128'}, - {'asicname': 'ASIC1', 'ip_prefix': '10.1.0.2/32', 'ip6_prefix': 'FC00:1::2/128'}] + fabric_info: [{'asicname': 'ASIC0', 'asic_id': 0, 'ip_prefix': '10.1.0.1/32', 'ip6_prefix': 'FC00:1::1/128'}, + {'asicname': 'ASIC1', 'asic_id': 1, 'ip_prefix': '10.1.0.2/32', 'ip6_prefix': 'FC00:1::2/128'}] } ''' @@ -55,6 +55,7 @@ def main(): next_v4addr = str( ipaddress.IPv4Address(v4base + asic_id) ) next_v6addr = str( ipaddress.IPv6Address(v6base + asic_id) ) data = { 'asicname': key, + 'asic_id' : asic_id, 'ip_prefix': next_v4addr + "/" + v4pfx[-1], 'ip6_prefix': next_v6addr + "/" + v6pfx[-1] } fabric_info.append( data ) diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py index c27ca2e1db..e3d73233ce 100644 --- a/ansible/library/minigraph_facts.py +++ b/ansible/library/minigraph_facts.py @@ -325,9 +325,9 @@ def _parse_intf(intfname, ipprefix): intfs = [] for ipintf in ipintfs.findall(str(QName(ns, "IPInterface"))): intfalias = ipintf.find(str(QName(ns, "AttachTo"))).text - if port_alias_to_name_map.has_key(intfalias): + if intfalias in port_alias_to_name_map: intfname = port_alias_to_name_map[intfalias] - elif port_alias_asic_map.has_key(intfalias): + elif intfalias in port_alias_asic_map: intfname = port_alias_asic_map[intfalias] else: intfname = intfalias @@ -831,7 +831,7 @@ def main(): except Exception as e: tb = traceback.format_exc() # all attempts to find a minigraph failed. - module.fail_json(msg=e.message + "\n" + tb) + module.fail_json(msg=str(e) + "\n" + tb) def print_parse_xml(hostname): diff --git a/ansible/library/port_alias.py b/ansible/library/port_alias.py index 13fd30d610..f469670b43 100755 --- a/ansible/library/port_alias.py +++ b/ansible/library/port_alias.py @@ -78,20 +78,22 @@ def get_platform_type(self): return value return None - def get_portconfig_path(self, asic_id=None): + def get_portconfig_path(self, slotid=None, asic_id=None): platform = self.get_platform_type() if platform is None: return None - if asic_id is None: + if asic_id is None or asic_id == '': portconfig = os.path.join(FILE_PATH, platform, self.hwsku, PORTMAP_FILE) - else: + elif slotid is None or slotid == '': portconfig = os.path.join(FILE_PATH, platform, self.hwsku, str(asic_id), PORTMAP_FILE) + else: + portconfig = os.path.join(FILE_PATH, platform, self.hwsku, str(slotid), str(asic_id), PORTMAP_FILE) if os.path.exists(portconfig): return portconfig return None def get_portmap(self, asic_id=None, include_internal=False, - hostname=None, switchid=None): + hostname=None, switchid=None, slotid=None): aliases = [] portmap = {} aliasmap = {} @@ -104,10 +106,10 @@ def get_portmap(self, asic_id=None, include_internal=False, port_coreid_index = -1 port_core_portid_index = -1 num_voq_index = -1 - # default to ASIC0 as minigraph.py parsing code has that assumption. - asic_name = "ASIC0" if asic_id is None else "ASIC" + str(asic_id) + # default to Asic0 as minigraph.py parsing code has that assumption. + asic_name = "Asic0" if asic_id is None else "asic" + str(asic_id) - filename = self.get_portconfig_path(asic_id) + filename = self.get_portconfig_path(slotid, asic_id) if filename is None: raise Exception("Something wrong when trying to find the portmap file, either the hwsku is not available or file location is not correct") with open(filename) as f: @@ -209,7 +211,8 @@ def main(): include_internal=dict(required=False, type='bool', default=False), card_type=dict(type='str', required=False), hostname=dict(type='str', required=False), - start_switchid=dict(type='int', required=False) + switchids=dict(type='list', required=False), + slotid=dict(type='str', required=False) ), supports_check_mode=True ) @@ -235,10 +238,14 @@ def main(): 'sysports': sysports}) return allmap = SonicPortAliasMap(m_args['hwsku']) - start_switchid = 0 - if 'start_switchid' in m_args and m_args['start_switchid'] != None: - start_switchid = int(m_args['start_switchid']) - # When this script is invoked on sonic-mgmt docker, num_asic + switchids = None + slotid = None + if 'switchids' in m_args and m_args['switchids'] != None and len(m_args['switchids']): + switchids = m_args['switchids'] + + if 'slotid' in m_args and m_args['slotid'] != None: + slotid = m_args['slotid'] + # When this script is invoked on sonic-mgmt docker, num_asic # parameter is passed. if m_args['num_asic'] is not None: num_asic = m_args['num_asic'] @@ -264,12 +271,12 @@ def main(): if 'hostname' in m_args: hostname = m_args['hostname'] for asic_id in range(num_asic): - if asic_id is not None: - switchid = start_switchid + asic_id + if switchids and asic_id is not None: + switchid = switchids[asic_id] if num_asic == 1: asic_id = None (aliases_asic, portmap_asic, aliasmap_asic, portspeed_asic, front_panel_asic, asicifnames_asic, - sysport_asic) = allmap.get_portmap(asic_id, include_internal, hostname, switchid) + sysport_asic) = allmap.get_portmap(asic_id, include_internal, hostname, switchid, slotid) if aliases_asic is not None: aliases.extend(aliases_asic) if portmap_asic is not None: diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py index 2f93ebc847..a94608a7c7 100644 --- a/ansible/library/snmp_facts.py +++ b/ansible/library/snmp_facts.py @@ -226,6 +226,9 @@ def __init__(self,dotprefix=False): self.ipCidrRouteEntry = dp + "1.3.6.1.2.1.4.24.4.1.1.0.0.0.0.0.0.0.0.0" # + .next hop IP self.ipCidrRouteStatus = dp + "1.3.6.1.2.1.4.24.4.1.16.0.0.0.0.0.0.0.0.0" # + .next hop IP + # Dot1q MIB + self.dot1qTpFdbEntry = dp + "1.3.6.1.2.1.17.7.1.2.2.1.2" # + .VLAN.MAC + def decode_hex(hexstring): if len(hexstring) < 3: @@ -923,7 +926,6 @@ def main(): for oid, val in varBinds: current_oid = oid.prettyPrint() - current_val = val.prettyPrint() if current_oid == v.sysTotalMemery: results['ansible_sysTotalMemery'] = decode_type(module, current_oid, val) elif current_oid == v.sysTotalFreeMemery: @@ -935,6 +937,31 @@ def main(): elif current_oid == v.sysCachedMemory: results['ansible_sysCachedMemory'] = decode_type(module, current_oid, val) + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.MibVariable(p.dot1qTpFdbEntry,), + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication) + ' querying FdbTable') + + for varBinds in varTable: + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if v.dot1qTpFdbEntry in current_oid: + # extract fdb info from oid + items = current_oid.split(v.dot1qTpFdbEntry + ".")[1].split(".") + # VLAN + MAC(6) + if len(items) != 7: + continue + mac_str = "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}".format( + int(items[1]), int(items[2]), int(items[3]), int(items[4]), int(items[5]), int(items[6])) + # key must be string + key = items[0] + '.' + mac_str + results['snmp_fdb'][key] = current_val + module.exit_json(ansible_facts=results) main() diff --git a/ansible/library/topo_facts.py b/ansible/library/topo_facts.py index e2b166fb9b..00462542b6 100644 --- a/ansible/library/topo_facts.py +++ b/ansible/library/topo_facts.py @@ -83,7 +83,6 @@ def __init__(self): self.asic_topo_config = {} def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs'): - dut_asn = topo_definition['configuration_properties']['common']['dut_asn'] vmconfig = dict() for vm in topo_definition['topology'][neigh_type]: vmconfig[vm] = dict() @@ -106,17 +105,20 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs for asic_intf in topo_definition['topology'][neigh_type][vm]['asic_intfs']: vmconfig[vm]['asic_intfs'][dut_index].append(asic_intf) + # physical interface - for intf in topo_definition['configuration'][vm]['interfaces']: - if (neigh_type == 'VMs' and 'Ethernet' in intf) or \ - (neigh_type == 'NEIGH_ASIC' and re.match("Eth(\d+)-", intf)): - dut_index = 0 - if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]: - dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index'] - if 'lacp' in topo_definition['configuration'][vm]['interfaces'][intf]: - po_map[topo_definition['configuration'][vm]['interfaces'][intf]['lacp']] = dut_index - - vmconfig[vm]['intfs'][dut_index].append(intf) + if 'configuration' in topo_definition: + if 'interfaces' in topo_definition['configuration'][vm]: + for intf in topo_definition['configuration'][vm]['interfaces']: + dut_index = 0 + if neigh_type == 'NEIGH_ASIC' and re.match("Eth(\d+)-", intf): + vmconfig[vm]['intfs'][dut_index].append(intf) + elif 'Ethernet' in intf: + if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]: + dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index'] + if 'lacp' in topo_definition['configuration'][vm]['interfaces'][intf]: + po_map[topo_definition['configuration'][vm]['interfaces'][intf]['lacp']] = dut_index + vmconfig[vm]['intfs'][dut_index].append(intf) # ip interface vmconfig[vm]['ip_intf'] = [None] * dut_num @@ -124,59 +126,72 @@ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs vmconfig[vm]['ipv4mask'] = [None] * dut_num vmconfig[vm]['peer_ipv6'] = [None] * dut_num vmconfig[vm]['ipv6mask'] = [None] * dut_num - - - for intf in topo_definition['configuration'][vm]['interfaces']: - dut_index = 0 - if (neigh_type == 'VMs' and 'Ethernet' in intf) or \ - (neigh_type == 'NEIGH_ASIC' and re.match("Eth(\d+)-", intf)): - if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]: - dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index'] - elif 'Port-Channel' in intf: - m = re.search("(\d+)", intf) - dut_index = po_map[int(m.group(1))] - - if 'ipv4' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()): - (peer_ipv4, ipv4_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv4'].split('/') - vmconfig[vm]['peer_ipv4'][dut_index] = peer_ipv4 - vmconfig[vm]['ipv4mask'][dut_index] = ipv4_mask - vmconfig[vm]['ip_intf'][dut_index] = intf - if 'ipv6' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()): - (ipv6_addr, ipv6_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv6'].split('/') - vmconfig[vm]['peer_ipv6'][dut_index] = ipv6_addr.upper() - vmconfig[vm]['ipv6mask'][dut_index] = ipv6_mask - vmconfig[vm]['ip_intf'][dut_index] = intf - - # bgp vmconfig[vm]['bgp_ipv4'] = [None] * dut_num vmconfig[vm]['bgp_ipv6'] = [None] * dut_num - vmconfig[vm]['bgp_asn'] = topo_definition['configuration'][vm]['bgp']['asn'] - for ipstr in topo_definition['configuration'][vm]['bgp']['peers'][dut_asn]: - if sys.version_info < (3, 0): - ip = ipaddress.ip_address(ipstr.decode('utf8')) - else: - ip = ipaddress.ip_address(ipstr) - for dut_index in range(0, dut_num): - if ip.version == 4: - # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index - if vmconfig[vm]['peer_ipv4'][dut_index]: - ipsubnet_str = vmconfig[vm]['peer_ipv4'][dut_index]+'/'+vmconfig[vm]['ipv4mask'][dut_index] - if sys.version_info < (3, 0): - ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) - else: - ipsubnet = ipaddress.ip_interface(ipsubnet_str) - if ip in ipsubnet.network: + vmconfig[vm]['bgp_asn'] = None + + + if 'configuration' in topo_definition: + if 'interfaces' in topo_definition['configuration'][vm]: + for intf in topo_definition['configuration'][vm]['interfaces']: + dut_index = 0 + if neigh_type == 'NEIGH_ASIC': + pass + elif 'Ethernet' in intf: + if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]: + dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index'] + elif 'Port-Channel' in intf: + m = re.search("(\d+)", intf) + dut_index = po_map[int(m.group(1))] + + if isinstance(topo_definition['configuration'][vm]['interfaces'],dict) and 'ipv4' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()): + (peer_ipv4, ipv4_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv4'].split('/') + vmconfig[vm]['peer_ipv4'][dut_index] = peer_ipv4 + vmconfig[vm]['ipv4mask'][dut_index] = ipv4_mask + vmconfig[vm]['ip_intf'][dut_index] = intf + if isinstance(topo_definition['configuration'][vm]['interfaces'],dict) and 'ipv6' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()): + (ipv6_addr, ipv6_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv6'].split('/') + vmconfig[vm]['peer_ipv6'][dut_index] = ipv6_addr.upper() + vmconfig[vm]['ipv6mask'][dut_index] = ipv6_mask + vmconfig[vm]['ip_intf'][dut_index] = intf + # bgp + vmconfig[vm]['bgp_asn'] = topo_definition['configuration'][vm]['bgp']['asn'] + dut_asn = topo_definition['configuration_properties']['common']['dut_asn'] + for ipstr in topo_definition['configuration'][vm]['bgp']['peers'][dut_asn]: + ip_mask = None + if '/' in ipstr: + (ipstr, ip_mask) = ipstr.split('/') + if sys.version_info < (3, 0): + ip = ipaddress.ip_address(ipstr.decode('utf8')) + else: + ip = ipaddress.ip_address(ipstr) + for dut_index in range(0, dut_num): + if ip.version == 4: + # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index + if vmconfig[vm]['peer_ipv4'][dut_index]: + ipsubnet_str = vmconfig[vm]['peer_ipv4'][dut_index]+'/'+vmconfig[vm]['ipv4mask'][dut_index] + if sys.version_info < (3, 0): + ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + else: + ipsubnet = ipaddress.ip_interface(ipsubnet_str) + if ip in ipsubnet.network: + vmconfig[vm]['bgp_ipv4'][dut_index] = ipstr.upper() + elif neigh_type == "NEIGH_ASIC": vmconfig[vm]['bgp_ipv4'][dut_index] = ipstr.upper() - elif ip.version == 6: - # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index - if vmconfig[vm]['peer_ipv6'][dut_index]: - ipsubnet_str = vmconfig[vm]['peer_ipv6'][dut_index]+'/'+vmconfig[vm]['ipv6mask'][dut_index] - if sys.version_info < (3, 0): - ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) - else: - ipsubnet = ipaddress.ip_interface(ipsubnet_str) - if ip in ipsubnet.network: + vmconfig[vm]['ipv4mask'][dut_index] = ip_mask if ip_mask else '32' + elif ip.version == 6: + # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index + if vmconfig[vm]['peer_ipv6'][dut_index]: + ipsubnet_str = vmconfig[vm]['peer_ipv6'][dut_index]+'/'+vmconfig[vm]['ipv6mask'][dut_index] + if sys.version_info < (3, 0): + ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8')) + else: + ipsubnet = ipaddress.ip_interface(ipsubnet_str) + if ip in ipsubnet.network: + vmconfig[vm]['bgp_ipv6'][dut_index] = ipstr.upper() + elif neigh_type == "NEIGH_ASIC": vmconfig[vm]['bgp_ipv6'][dut_index] = ipstr.upper() + vmconfig[vm]['ipv6mask'][dut_index] = ip_mask if ip_mask else '128' return vmconfig def get_topo_config(self, topo_name, hwsku): @@ -201,10 +216,10 @@ def get_topo_config(self, topo_name, hwsku): topo_definition = yaml.load(f) if not os.path.isfile(asic_topo_filename): - asic_definition = {} + slot_definition = {} else: with open(asic_topo_filename) as f: - asic_definition = yaml.load(f) + slot_definition = yaml.load(f) ### parse topo file specified in vars/ to reverse as dut config dut_num = 1 @@ -220,16 +235,18 @@ def get_topo_config(self, topo_name, hwsku): vm_topo_config['dut_cluster'] = topo_definition['configuration_properties']['common']['dut_cluster'] vm_topo_config['vm'] = self.parse_topo_defintion(topo_definition, po_map, dut_num, 'VMs') - for asic in asic_definition: - po_map_asic = [None] * 16 # maximum 16 port channel interfaces - asic_topo_config[asic] = dict() - asic_topo_config[asic]['dut_asn'] = asic_definition[asic]['configuration_properties']['common']['dut_asn'] - asic_topo_config[asic]['asic_type'] = asic_definition[asic]['configuration_properties']['common']['asic_type'] - asic_topo_config[asic]['Loopback4096'] = [] - for lo4096 in asic_definition[asic]['configuration_properties']['common']['Loopback4096']: - asic_topo_config[asic]['Loopback4096'].append(lo4096) + if 'cable' in topo_name: + dut_asn = topo_definition['configuration_properties']['common']['dut_asn'] + vm_topo_config['dut_type'] = topo_definition['configuration_properties']['common']['dut_type'] + vm_topo_config['dut_asn'] = dut_asn - asic_topo_config[asic]['neigh_asic'] = self.parse_topo_defintion(asic_definition[asic], po_map_asic, 1, 'NEIGH_ASIC') + for slot,asic_definition in slot_definition.items(): + asic_topo_config[slot] = dict() + for asic in asic_definition: + po_map_asic = [None] * 16 # maximum 16 port channel interfaces + asic_topo_config[slot][asic] = dict() + asic_topo_config[slot][asic]['asic_type'] = asic_definition[asic]['configuration_properties']['common']['asic_type'] + asic_topo_config[slot][asic]['neigh_asic'] = self.parse_topo_defintion(asic_definition[asic], po_map_asic, 1, 'NEIGH_ASIC') vm_topo_config['host_interfaces_by_dut'] = [[] for i in range(dut_num)] if 'host_interfaces' in topo_definition['topology']: diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index 1fef141c3a..6218ce8fa8 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -79,7 +79,8 @@ def get_port_alias_to_name_map(hwsku, asic_id=None): elif hwsku == "Arista-7260CX3-C64" or hwsku == "Arista-7170-64C": for i in range(1, 65): port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4) - elif hwsku == "Arista-7060CX-32S-C32" or hwsku == "Arista-7060CX-32S-Q32" or hwsku == "Arista-7060CX-32S-C32-T1" or hwsku == "Arista-7170-32CD-C32": + elif hwsku == "Arista-7060CX-32S-C32" or hwsku == "Arista-7060CX-32S-Q32" or hwsku == "Arista-7060CX-32S-C32-T1" or hwsku == "Arista-7170-32CD-C32" \ + or hwsku == "Arista-7050CX3-32S-C32": for i in range(1, 33): port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4) elif hwsku == "Mellanox-SN2700-D40C8S8": @@ -163,15 +164,24 @@ def get_port_alias_to_name_map(hwsku, asic_id=None): elif hwsku == "Seastone-DX010": for i in range(1, 33): port_alias_to_name_map["Eth%d" % i] = "Ethernet%d" % ((i - 1) * 4) - elif hwsku == "Celestica-E1031-T48S4": + elif hwsku in ["Celestica-E1031-T48S4", "Nokia-7215", "Nokia-M0-7215"]: for i in range(1, 53): port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % ((i - 1)) - elif hwsku == "et6448m" or hwsku == "Nokia-7215": + elif hwsku == "et6448m": for i in range(0, 52): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i elif hwsku == "newport": for i in range(0, 256, 8): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif hwsku == "32x100Gb": + for i in range(0, 32): + port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif hwsku == "36x100Gb": + for i in range(0, 36): + port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif hwsku == "64x100Gb": + for i in range(0, 64): + port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i elif hwsku == "msft_multi_asic_vs": if asic_id is not None and asic_id in namespace_list['front_ns']: asic_offset = int(asic_id) * 16 diff --git a/ansible/roles/fanout/templates/arista_7060_deploy.j2 b/ansible/roles/fanout/templates/arista_7060_deploy.j2 index 46381c71dc..be50be7a9f 100644 --- a/ansible/roles/fanout/templates/arista_7060_deploy.j2 +++ b/ansible/roles/fanout/templates/arista_7060_deploy.j2 @@ -29,8 +29,11 @@ vlan {{ device_vlan_range[inventory_hostname] | list | join(',') }} vrf definition management rd 1:1 ! -{% for i in range(1,33) %} +{% for i in range(1, device_port_vlans[inventory_hostname]|length + 1) %} {% set intf = 'Ethernet' + i|string + '/1' %} +{% if intf not in device_port_vlans[inventory_hostname] %} +{% set intf = 'Ethernet' + i|string %} +{% endif %} interface {{ intf }} {% if intf in device_port_vlans[inventory_hostname] and device_port_vlans[inventory_hostname][intf]['mode'] != "Trunk" %} {% if device_conn[inventory_hostname][intf]['speed'] == "100000" %} @@ -111,7 +114,7 @@ interface {{ intf }} ! interface Management 1 description TO LAB MGMT SWITCH - ip address {{ device_info[inventory_hostname]["ManagementIp"] }} + ip address {{ device_info[inventory_hostname]["ManagementIp"] }}/{{ hostvars[inventory_hostname].mgmt_subnet_mask_length }} no shutdown ! # LACP packets pass through diff --git a/ansible/roles/test/files/ptftests/IP_decap_test.py b/ansible/roles/test/files/ptftests/IP_decap_test.py index ebcc14c21a..2139a9da89 100644 --- a/ansible/roles/test/files/ptftests/IP_decap_test.py +++ b/ansible/roles/test/files/ptftests/IP_decap_test.py @@ -358,10 +358,15 @@ def send_and_verify(self, dst_ip, expected_ports, src_port, dut_index, outer_pkt inner_ttl_info = pkt['IPv6'].payload.hlim inner_tos = pkt['IPv6'].payload.tc + exp_ttl = 'any' if inner_pkt_type == 'ipv4': exp_tos = exp_pkt.tos + if not self.ignore_ttl: + exp_ttl = exp_pkt.ttl else: exp_tos = exp_pkt.tc + if not self.ignore_ttl: + exp_ttl = exp_pkt.hlim #send and verify the return packets send_packet(self, src_port, pkt) @@ -384,7 +389,7 @@ def send_and_verify(self, dst_ip, expected_ports, src_port, dut_index, outer_pkt inner_src_ip, dst_ip, exp_tos, - 'any', + exp_ttl, str(expected_ports))) matched, received = verify_packet_any_port(self, masked_exp_pkt, expected_ports) diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py index f761af15c5..9f54e5270a 100644 --- a/ansible/roles/test/files/ptftests/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/advanced-reboot.py @@ -151,11 +151,12 @@ def __init__(self): self.check_param('inboot_oper', None, required=False) # sad path to inject during warm-reboot self.check_param('nexthop_ips', [], required=False) # nexthops for the routes that will be added during warm-reboot self.check_param('allow_vlan_flooding', False, required=False) - self.check_param('sniff_time_incr', 60, required=False) + self.check_param('sniff_time_incr', 300, required=False) self.check_param('vnet', False, required=False) self.check_param('vnet_pkts', None, required=False) self.check_param('target_version', '', required=False) self.check_param('bgp_v4_v6_time_diff', 40, required=False) + self.check_param('asic_type', '', required=False) self.check_param('logfile_suffix', None, required=False) if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None': self.test_params['preboot_oper'] = None @@ -203,7 +204,7 @@ def __init__(self): # But ptf is not fast enough + swss is slow for FDB and ARP entries insertions self.timeout_thr = None - self.time_to_listen = 180.0 # Listen for more then 180 seconds, to be used in sniff_in_background method. + self.time_to_listen = 240.0 # Listen for more then 240 seconds, to be used in sniff_in_background method. # Inter-packet interval, to be used in send_in_background method. # Improve this interval to gain more precision of disruptions. self.send_interval = 0.0035 @@ -333,6 +334,7 @@ def timeout(self, func, seconds, message): def generate_vlan_servers(self): vlan_host_map = defaultdict(dict) + self.vlan_host_ping_map = defaultdict(dict) self.nr_vl_pkts = 0 # Number of packets from upper layer for vlan, prefix in self.vlan_ip_range.items(): if not self.ports_per_vlan[vlan]: @@ -347,6 +349,13 @@ def generate_vlan_servers(self): vlan_host_map[port][addr] = mac + for counter, i in enumerate( + xrange(n_hosts+2, n_hosts+2+len(self.ports_per_vlan[vlan])), start=n_hosts): + mac = self.VLAN_BASE_MAC_PATTERN.format(counter) + port = self.ports_per_vlan[vlan][i % len(self.ports_per_vlan[vlan])] + addr = self.host_ip(prefix, i) + self.vlan_host_ping_map[port][addr] = mac + self.nr_vl_pkts += n_hosts return vlan_host_map @@ -354,7 +363,9 @@ def generate_vlan_servers(self): def generate_arp_responder_conf(self, vlan_host_map): arp_responder_conf = {} for port in vlan_host_map: - arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port] + arp_responder_conf['eth{}'.format(port)] = {} + arp_responder_conf['eth{}'.format(port)].update(vlan_host_map[port]) + arp_responder_conf['eth{}'.format(port)].update(self.vlan_host_ping_map[port]) return arp_responder_conf @@ -532,7 +543,7 @@ def setUp(self): self.limit = datetime.timedelta(seconds=self.test_params['reboot_limit_in_seconds']) self.reboot_type = self.test_params['reboot_type'] - if self.reboot_type not in ['fast-reboot', 'warm-reboot', 'warm-reboot -f']: + if self.reboot_type in ['soft-reboot', 'reboot']: raise ValueError('Not supported reboot_type %s' % self.reboot_type) self.dut_mac = self.test_params['dut_mac'] @@ -716,9 +727,9 @@ def generate_from_vlan(self): def generate_ping_dut_lo(self): self.ping_dut_packets = [] dut_lo_ipv4 = self.test_params['lo_prefix'].split('/')[0] - for src_port in self.vlan_host_map: - src_addr = random.choice(self.vlan_host_map[src_port].keys()) - src_mac = self.hex_to_mac(self.vlan_host_map[src_port][src_addr]) + for src_port in self.vlan_host_ping_map: + src_addr = random.choice(self.vlan_host_ping_map[src_port].keys()) + src_mac = self.hex_to_mac(self.vlan_host_ping_map[src_port][src_addr]) packet = simple_icmp_packet(eth_src=src_mac, eth_dst=self.dut_mac, ip_src=src_addr, @@ -773,19 +784,24 @@ def generate_bidirectional(self): self.send_interval = self.time_to_listen / self.packets_to_send self.packets_list = [] from_t1_iter = itertools.cycle(self.from_t1) - + sent_count_vlan_to_t1 = 0 + sent_count_t1_to_vlan = 0 for i in xrange(self.packets_to_send): payload = '0' * 60 + str(i) if (i % 5) == 0 : # From vlan to T1. packet = scapyall.Ether(self.from_vlan_packet) packet.load = payload from_port = self.from_server_src_port + sent_count_vlan_to_t1 += 1 else: # From T1 to vlan. src_port, packet = next(from_t1_iter) packet = scapyall.Ether(packet) packet.load = payload from_port = src_port + sent_count_t1_to_vlan += 1 self.packets_list.append((from_port, str(packet))) + self.log("Sent prep count vlan to t1: {}".format(sent_count_vlan_to_t1)) + self.log("Sent prep count t1 to vlan: {}".format(sent_count_t1_to_vlan)) def put_nowait(self, queue, data): try: @@ -901,35 +917,37 @@ def handle_fast_reboot_health_check(self): self.check_alive() self.fails['dut'].clear() - self.send_and_sniff() + self.sniff_thr.join() + self.sender_thr.join() # Stop watching DUT self.watching = False self.log("Stopping reachability state watch thread.") self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped. - self.save_sniffed_packets() - examine_start = datetime.datetime.now() self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start)) self.examine_flow() self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start)) - self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start) - self.log("Dataplane disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id)) - self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \ - (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets)) + if self.lost_packets: + self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start) + self.log("Dataplane disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id)) + self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \ + (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets)) + else: + self.no_routing_start = self.reboot_start + self.no_routing_stop = self.reboot_start def handle_warm_reboot_health_check(self): - self.send_and_sniff() + self.sniff_thr.join() + self.sender_thr.join() # Stop watching DUT self.watching = False self.log("Stopping reachability state watch thread.") self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped. - self.save_sniffed_packets() - examine_start = datetime.datetime.now() self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start)) self.examine_flow() @@ -1154,6 +1172,7 @@ def runTest(self): self.wait_dut_to_warm_up() self.fails['dut'].clear() + self.clear_dut_counters() self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay) thr = threading.Thread(target=self.reboot_dut) thr.setDaemon(True) @@ -1240,6 +1259,14 @@ def extract_no_cpu_replies(self, arr): def reboot_dut(self): time.sleep(self.reboot_delay) + if not self.kvm_test and\ + (self.reboot_type == 'fast-reboot' or 'warm-reboot' in self.reboot_type): + self.sender_thr = threading.Thread(target = self.send_in_background) + self.sniff_thr = threading.Thread(target = self.sniff_in_background) + self.sniffer_started = threading.Event() # Event for the sniff_in_background status. + self.sniff_thr.start() + self.sender_thr.start() + self.log("Rebooting remote side") stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type, timeout=30) if stdout != []: @@ -1304,6 +1331,7 @@ def send_in_background(self, packets_list = None, interval = None): packets_list = self.packets_list self.sniffer_started.wait(timeout=10) with self.dataplane_io_lock: + sent_packet_count = 0 # While running fast data plane sender thread there are two reasons for filter to be applied # 1. filter out data plane traffic which is tcp to free up the load on PTF socket (sniffer thread is using a different one) # 2. during warm neighbor restoration DUT will send a lot of ARP requests which we are not interested in @@ -1317,8 +1345,20 @@ def send_in_background(self, packets_list = None, interval = None): testutils.send_packet(self, entry[0], entry[1].decode("base64")) else: testutils.send_packet(self, *entry) + sent_packet_count += 1 self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start)) - # Remove filter + self.log("Total sent packets by sender: {}".format(sent_packet_count)) + + # Signal sniffer thread to allow early finish. + # Without this signalling mechanism, the sniffer thread can continue for a hardcoded max time. + # Sometimes this max time is too long and sniffer keeps running too long after sender finishes. + # Other times, sniffer finishes too early (when max time is less) while the sender is still sending packets. + # So now: + # 1. sniffer max timeout is increased (to prevent sniffer finish before sender) + # 2. and sender can signal sniffer to end after all packets are sent. + time.sleep(1) + kill_sniffer_cmd = "pkill -SIGINT -f {}".format(self.ptf_sniffer) + subprocess.Popen(kill_sniffer_cmd.split()) self.apply_filter_all_ports('') def sniff_in_background(self, wait = None): @@ -1333,7 +1373,8 @@ def sniff_in_background(self, wait = None): sniffer_start = datetime.datetime.now() self.log("Sniffer started at %s" % str(sniffer_start)) sniff_filter = "tcp and tcp dst port 5000 and tcp src port 1234 and not icmp" - scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'wait': wait, 'sniff_filter': sniff_filter}) + scapy_sniffer = threading.Thread(target=self.scapy_sniff, + kwargs={'wait': wait, 'sniff_filter': sniff_filter}) scapy_sniffer.start() time.sleep(2) # Let the scapy sniff initialize completely. self.sniffer_started.set() # Unblock waiter for the send_in_background. @@ -1341,19 +1382,22 @@ def sniff_in_background(self, wait = None): self.log("Sniffer has been running for %s" % str(datetime.datetime.now() - sniffer_start)) self.sniffer_started.clear() - def save_sniffed_packets(self): - filename = "/tmp/capture_%s.pcap" % self.logfile_suffix if self.logfile_suffix is not None else "/tmp/capture.pcap" - if self.packets: - scapyall.wrpcap(filename, self.packets) - self.log("Pcap file dumped to %s" % filename) - else: - self.log("Pcap file is empty.") - - def scapy_sniff(self, wait = 180, sniff_filter = ''): + def scapy_sniff(self, wait=300, sniff_filter=''): """ - This method exploits native scapy sniff() method. + @summary: PTF runner - runs a sniffer in PTF container. + Args: + wait (int): Duration in seconds to sniff the traffic + sniff_filter (str): Filter that Scapy will use to collect only relevant packets """ - self.packets = scapyall.sniff(timeout = wait, filter = sniff_filter) + capture_pcap = "/tmp/capture_%s.pcap" % self.logfile_suffix if self.logfile_suffix is not None else "/tmp/capture.pcap" + capture_log = "/tmp/capture.log" + self.ptf_sniffer = "/root/ptftests/advanced_reboot_sniffer.py" + sniffer_command = ["python", self.ptf_sniffer, "-f", "'{}'".format(sniff_filter), "-p",\ + capture_pcap, "-l", capture_log, "-t" , str(wait)] + subprocess.call(["rm", "-rf", capture_pcap]) # remove old capture + subprocess.call(sniffer_command) + self.packets = scapyall.rdpcap(capture_pcap) + self.log("Number of all packets captured: {}".format(len(self.packets))) def send_and_sniff(self): """ @@ -1447,17 +1491,27 @@ def examine_flow(self, filename = None): prev_payload, prev_time = 0, 0 sent_payload = 0 received_counter = 0 # Counts packets from dut. + sent_counter = 0 + received_t1_to_vlan = 0 + received_vlan_to_t1 = 0 + missed_vlan_to_t1 = 0 + missed_t1_to_vlan = 0 self.disruption_start, self.disruption_stop = None, None for packet in packets: if packet[scapyall.Ether].dst == self.dut_mac: # This is a sent packet - keep track of it as payload_id:timestamp. sent_payload = int(str(packet[scapyall.TCP].payload)) sent_packets[sent_payload] = packet.time + sent_counter += 1 continue if packet[scapyall.Ether].src == self.dut_mac: # This is a received packet. received_time = packet.time received_payload = int(str(packet[scapyall.TCP].payload)) + if (received_payload % 5) == 0 : # From vlan to T1. + received_vlan_to_t1 += 1 + else: + received_t1_to_vlan += 1 received_counter += 1 if not (received_payload and received_time): # This is the first valid received packet. @@ -1471,11 +1525,24 @@ def examine_flow(self, filename = None): # Add disrupt to the dict: self.lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time) self.log("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt)) + for lost_index in range(prev_payload + 1, received_payload): + if (lost_index % 5) == 0 : # lost received for packet sent from vlan to T1. + missed_vlan_to_t1 += 1 + else: + missed_t1_to_vlan += 1 + self.log("") if not self.disruption_start: self.disruption_start = datetime.datetime.fromtimestamp(prev_time) self.disruption_stop = datetime.datetime.fromtimestamp(received_time) prev_payload = received_payload prev_time = received_time + self.log("**************** Packet received summary: ********************") + self.log("*********** Sent packets captured - {}".format(sent_counter)) + self.log("*********** received packets captured - t1-to-vlan - {}".format(received_t1_to_vlan)) + self.log("*********** received packets captured - vlan-to-t1 - {}".format(received_vlan_to_t1)) + self.log("*********** Missed received packets - t1-to-vlan - {}".format(missed_t1_to_vlan)) + self.log("*********** Missed received packets - vlan-to-t1 - {}".format(missed_vlan_to_t1)) + self.log("**************************************************************") self.fails['dut'].add("Sniffer failed to filter any traffic from DUT") self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT") self.fails['dut'].clear() @@ -1600,6 +1667,21 @@ def wait_dut_to_warm_up(self): # Everything is good + def clear_dut_counters(self): + # Clear the counters after the WARM UP is complete + # this is done so that drops can be accurately calculated + # after reboot test is finished + clear_counter_cmds = [ "sonic-clear counters", + "sonic-clear queuecounters", + "sonic-clear dropcounters", + "sonic-clear rifcounters", + "sonic-clear pfccounters" + ] + if 'broadcom' in self.test_params['asic_type']: + clear_counter_cmds.append("bcmcmd 'clear counters'") + for cmd in clear_counter_cmds: + self.dut_connection.execCommand(cmd) + def check_alive(self): # This function checks that DUT routes the packets in the both directions. # diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py index 44a0a22715..ddd2432cc5 100644 --- a/ansible/roles/test/files/ptftests/arista.py +++ b/ansible/roles/test/files/ptftests/arista.py @@ -81,6 +81,9 @@ def connect(self): version_output = self.do_cmd('show version') self.veos_version = self.parse_version(version_output) + self.show_lacp_command = self.parse_supported_show_lacp_command() + self.show_ip_bgp_command = self.parse_supported_bgp_neighbor_command() + self.show_ipv6_bgp_command = self.parse_supported_bgp_neighbor_command(v4=False) return self.shell def get_arista_prompt(self, first_prompt): @@ -170,15 +173,9 @@ def run(self): cur_time = time.time() info = {} debug_info = {} - lacp_help = self.do_cmd('show lacp ?') - show_lacp_command = self.parse_supported_show_lacp_command(lacp_help) - self.log("show lacp command is %s"%(show_lacp_command)) - # sent 'show lacp ?' in previous step already, so there are 'show lacp' in cmd ssh pipe - # only need to send 'peer' or 'neighbor' to complete the command - # don't send whole command('show lacp peer/neighbor') instead, it will mess the output pipe up - lacp_output = self.do_cmd(show_lacp_command) + lacp_output = self.do_cmd(self.show_lacp_command) info['lacp'] = self.parse_lacp(lacp_output) - bgp_neig_output = self.do_cmd('show ip bgp neighbors') + bgp_neig_output = self.do_cmd(self.show_ip_bgp_command) info['bgp_neig'] = self.parse_bgp_neighbor(bgp_neig_output) v4_routing, bgp_route_v4_output = self.check_bgp_route(self.v4_routes) @@ -429,7 +426,7 @@ def parse_bgp_route(self, output, expects): return set(expects) == prefixes - def parse_supported_show_lacp_command(self, lacp_help): + def parse_supported_show_lacp_command(self): """ 'show lacp neighbor' is deprecated by 'show lacp peer' in high EOS versions, so if 'show lacp neighbor' is supported, use 'show lacp neighbor' @@ -447,11 +444,47 @@ def parse_supported_show_lacp_command(self, lacp_help): Returns: str: rest command of 'show lacp ', neighbor or peer """ - + lacp_help = self.do_cmd('show lacp ?') for line in lacp_help.split('\n'): if re.match('neighbor *Display.*', line.strip()): - return 'neighbor' - return 'peer' + suffix = 'neighbor' + break + else: + suffix = 'peer' + # sent 'show lacp ?' in previous step already, so there are 'show lacp' in cmd ssh pipe + # only need to send 'peer' or 'neighbor' to complete the command + # don't send whole command('show lacp peer/neighbor') instead, it will mess the output pipe up + # Run the command just to complete the waiting prompt, and do nothing with the output. + self.do_cmd(suffix) + show_lacp_command = "show lacp {}".format(suffix) + self.log("show lacp command is '{}'".format(show_lacp_command)) + return show_lacp_command + + + def parse_supported_bgp_neighbor_command(self, v4=True): + help_cmd = "show ip bgp ?" if v4 else "show ipv6 bgp ?" + ip_bgp_help = self.do_cmd(help_cmd) + for line in ip_bgp_help.split('\n'): + if re.match('neighbors *BGP Neighbor information', line.strip()): + # if help regex contains: + # "neighbors BGP Neighbor information" + suffix = 'neighbors' + break + else: + # if help regex contains: + # "peers BGP neighbor information" + suffix = 'peers' + # Run the command just to complete the waiting prompt, and do nothing with the output. + self.do_cmd(suffix) + if v4: + show_bgp_neighbors_cmd = "show ip bgp {}".format(suffix) + self.log("show ip bgp neighbor command is '{}'".format(show_bgp_neighbors_cmd)) + else: + show_bgp_neighbors_cmd = "show ipv6 bgp {}".format(suffix) + self.log("show ipv6 bgp neighbor command is '{}'".format(show_bgp_neighbors_cmd)) + + return show_bgp_neighbors_cmd + def check_bgp_route(self, expects, ipv6=False): cmd = 'show ip route {} | json' @@ -469,7 +502,7 @@ def get_bgp_info(self): # Retreive BGP info (peer addr, AS) for the dut and neighbor neigh_bgp = {} dut_bgp = {} - for cmd, ver in [('show ip bgp neighbors', 'v4'), ('show ipv6 bgp neighbors', 'v6')]: + for cmd, ver in [(self.show_ip_bgp_command, 'v4'), (self.show_ipv6_bgp_command, 'v6')]: output = self.do_cmd(cmd) if ver == 'v6': neigh_bgp[ver], dut_bgp[ver], neigh_bgp['asn'] = self.parse_bgp_info(output) diff --git a/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py b/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py index 511a300dff..25e2be80d9 100644 --- a/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py +++ b/ansible/roles/test/files/ptftests/dhcpv6_relay_test.py @@ -11,9 +11,6 @@ IPv6 = scapy.layers.inet6.IPv6 -isc_solicit_count = 0 -isc_request_count = 0 - class DataplaneBaseTest(BaseTest): def __init__(self): BaseTest.__init__(self) @@ -168,17 +165,6 @@ def create_dhcp_solicit_relay_forward_packet(self): return solicit_relay_forward_packet - def isc_create_dhcp_solicit_relay_forward_packet(self): - - solicit_relay_forward_packet = Ether(src=self.relay_iface_mac) - solicit_relay_forward_packet /= IPv6() - solicit_relay_forward_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) - solicit_relay_forward_packet /= DHCP6_RelayForward(msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) - solicit_relay_forward_packet /= DHCP6OptRelayMsg() - solicit_relay_forward_packet /= DHCP6_Solicit(trid=12345) - - return solicit_relay_forward_packet - def create_dhcp_advertise_packet(self): advertise_packet = Ether(src=self.relay_iface_mac, dst=self.client_mac) @@ -208,17 +194,6 @@ def create_dhcp_request_packet(self): return request_packet - def isc_create_dhcp_request_relay_forward_packet(self): - - request_relay_forward_packet = Ether(src=self.relay_iface_mac) - request_relay_forward_packet /= IPv6() - request_relay_forward_packet /= UDP(sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) - request_relay_forward_packet /= DHCP6_RelayForward(msgtype=12, linkaddr=self.vlan_ip, peeraddr=self.client_link_local) - request_relay_forward_packet /= DHCP6OptRelayMsg() - request_relay_forward_packet /= DHCP6_Request(trid=12345) - - return request_relay_forward_packet - def create_dhcp_request_relay_forward_packet(self): request_relay_forward_packet = Ether(src=self.relay_iface_mac) @@ -263,25 +238,6 @@ def client_send_solicit(self): solicit_packet = self.create_dhcp_solicit_packet() testutils.send_packet(self, self.client_port_index, solicit_packet) - def verify_isc_relayed_solicit_relay_forward(self): - global isc_solicit_count - isc_solicit_relay_forward_packet = self.isc_create_dhcp_solicit_relay_forward_packet() - - # Temporary for isc-dhcp - isc_masked_packet = Mask(isc_solicit_relay_forward_packet) - isc_masked_packet.set_do_not_care_scapy(packet.Ether, "dst") - isc_masked_packet.set_do_not_care_scapy(IPv6, "src") - isc_masked_packet.set_do_not_care_scapy(IPv6, "dst") - isc_masked_packet.set_do_not_care_scapy(IPv6, "fl") - isc_masked_packet.set_do_not_care_scapy(IPv6, "tc") - isc_masked_packet.set_do_not_care_scapy(IPv6, "plen") - isc_masked_packet.set_do_not_care_scapy(IPv6, "nh") - isc_masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") - isc_masked_packet.set_do_not_care_scapy(packet.UDP, "len") - - # Count number of packets relayed by isc-dhcp - isc_solicit_count = testutils.count_matched_packets_all_ports(self, isc_masked_packet, self.server_port_indices) - # Verify that the DHCP relay actually received and relayed the DHCPv6 SOLICIT message to all of # its known DHCP servers. def verify_relayed_solicit_relay_forward(self): @@ -304,8 +260,8 @@ def verify_relayed_solicit_relay_forward(self): # Count the number of these packets received on the ports connected to our leaves solicit_count = testutils.count_matched_packets_all_ports(self, masked_packet, self.server_port_indices) - self.assertTrue((solicit_count + isc_solicit_count) >= 1, - "Failed: Solicit count of %d" % (solicit_count + isc_solicit_count)) + self.assertTrue(solicit_count >= 1, + "Failed: Solicit count of %d" % solicit_count) # Simulate a DHCP server sending a DHCPv6 RELAY-REPLY encapsulating ADVERTISE packet message to client. # We do this by injecting a RELAY-REPLY encapsulating ADVERTISE message on the link connected to one @@ -336,25 +292,6 @@ def client_send_request(self): request_packet = self.create_dhcp_request_packet() testutils.send_packet(self, self.client_port_index, request_packet) - def verify_isc_relayed_request_relay_forward(self): - global isc_request_count - isc_request_relay_forward_packet = self.isc_create_dhcp_request_relay_forward_packet() - - # Temporary for isc-dhcp - isc_masked_packet = Mask(isc_request_relay_forward_packet) - isc_masked_packet.set_do_not_care_scapy(packet.Ether, "dst") - isc_masked_packet.set_do_not_care_scapy(IPv6, "src") - isc_masked_packet.set_do_not_care_scapy(IPv6, "dst") - isc_masked_packet.set_do_not_care_scapy(IPv6, "fl") - isc_masked_packet.set_do_not_care_scapy(IPv6, "tc") - isc_masked_packet.set_do_not_care_scapy(IPv6, "plen") - isc_masked_packet.set_do_not_care_scapy(IPv6, "nh") - isc_masked_packet.set_do_not_care_scapy(packet.UDP, "chksum") - isc_masked_packet.set_do_not_care_scapy(packet.UDP, "len") - - # Count number of packets relayed by isc-dhcp - isc_request_count = testutils.count_matched_packets_all_ports(self, isc_masked_packet, self.server_port_indices) - # Verify that the DHCP relay actually received and relayed the DHCPv6 REQUEST message to all of # its known DHCP servers. def verify_relayed_request_relay_forward(self): @@ -377,8 +314,8 @@ def verify_relayed_request_relay_forward(self): # Count the number of these packets received on the ports connected to our leaves request_count = testutils.count_matched_packets_all_ports(self, masked_packet, self.server_port_indices) - self.assertTrue((request_count + isc_request_count) >= 1, - "Failed: Request count of %d" % (request_count)) + self.assertTrue(request_count >= 1, + "Failed: Request count of %d" % request_count) # Simulate a DHCP server sending a DHCPv6 RELAY-REPLY encapsulating REPLY packet message to client. def server_send_reply_relay_reply(self): @@ -402,15 +339,11 @@ def verify_relayed_reply(self): testutils.verify_packet(self, masked_packet, self.client_port_index) def runTest(self): - self.client_send_solicit() - self.verify_isc_relayed_solicit_relay_forward() self.client_send_solicit() self.verify_relayed_solicit_relay_forward() self.server_send_advertise_relay_reply() self.verify_relayed_advertise() self.client_send_request() - self.verify_isc_relayed_request_relay_forward() - self.client_send_request() self.verify_relayed_request_relay_forward() self.server_send_reply_relay_reply() self.verify_relayed_reply() diff --git a/ansible/roles/test/files/ptftests/dir_bcast_test.py b/ansible/roles/test/files/ptftests/dir_bcast_test.py index 66e40909c8..cd460d4a3f 100644 --- a/ansible/roles/test/files/ptftests/dir_bcast_test.py +++ b/ansible/roles/test/files/ptftests/dir_bcast_test.py @@ -128,7 +128,7 @@ def check_ip_dir_bcast(self, dst_bcast_ip, dst_port_list): Check if broadcast packet is received on all member ports of vlan ''' logging.info("Received " + str(pkt_count) + " broadcast packets, expecting " + str(len(dst_port_list))) - assert (pkt_count == len(dst_port_list)) + assert (pkt_count == len(dst_port_list)), "received {} expected {}".format(pkt_count, len(dst_port_list)) return @@ -171,7 +171,7 @@ def check_bootp_dir_bcast(self, dst_bcast_ip, dst_port_list): Check if broadcast BOOTP packet is received on all member ports of vlan ''' logging.info("Received " + str(pkt_count) + " broadcast BOOTP packets, expecting " + str(len(dst_port_list))) - assert (pkt_count == len(dst_port_list)) + assert (pkt_count == len(dst_port_list)), "received {} expected {}".format(pkt_count, len(dst_port_list)) return diff --git a/ansible/roles/test/files/ptftests/fg_ecmp_test.py b/ansible/roles/test/files/ptftests/fg_ecmp_test.py index c121d48fea..d4469cb314 100644 --- a/ansible/roles/test/files/ptftests/fg_ecmp_test.py +++ b/ansible/roles/test/files/ptftests/fg_ecmp_test.py @@ -27,6 +27,11 @@ import ptf.testutils as testutils from ptf.testutils import * +import lpm + +IPV4_SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255'] +IPV6_SRC_IP_RANGE = ['20D0:A800:0:00::', '20D0:FFFF:0:00::FFFF'] + PERSIST_MAP = '/tmp/fg_ecmp_persist_map.json' class FgEcmpTest(BaseTest): @@ -99,7 +104,8 @@ def setUp(self): self.router_mac = graph['dut_mac'] self.num_flows = graph['num_flows'] self.inner_hashing = graph['inner_hashing'] - + self.src_ipv4_interval = lpm.LpmDict.IpInterval(ipaddress.ip_address(unicode(IPV4_SRC_IP_RANGE[0])), ipaddress.ip_address(unicode(IPV4_SRC_IP_RANGE[1]))) + self.src_ipv6_interval = lpm.LpmDict.IpInterval(ipaddress.ip_address(unicode(IPV6_SRC_IP_RANGE[0])), ipaddress.ip_address(unicode(IPV6_SRC_IP_RANGE[1]))) self.log(self.net_ports) self.log(self.serv_ports) self.log(self.exp_port_set_one) @@ -134,15 +140,6 @@ def test_balancing(self, hit_count_map): def fg_ecmp(self): ipv4 = isinstance(ipaddress.ip_address(self.dst_ip.decode('utf8')), ipaddress.IPv4Address) - - if self.inner_hashing: - base_ip = ipaddress.ip_address(u'8.0.0.0') - else: - if isinstance(ipaddress.ip_address(self.dst_ip.decode('utf8')), ipaddress.IPv4Address): - base_ip = ipaddress.ip_address(u'8.0.0.0') - else: - base_ip = ipaddress.ip_address(u'20D0:A800:0:00::') - # initialize all parameters if self.inner_hashing: dst_ip = '5.5.5.5' @@ -168,7 +165,10 @@ def fg_ecmp(self): # and generate a flow to port map self.log("Creating flow to port map ...") for i in range(0, self.num_flows): - src_ip = str(base_ip + i) + if ipv4: + src_ip = self.src_ipv4_interval.get_random_ip() + else: + src_ip = self.src_ipv6_interval.get_random_ip() if self.inner_hashing: in_port = random.choice(self.net_ports) else: diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py index e5b5bd884e..7e7990fd13 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py @@ -146,7 +146,7 @@ def flush_rsyslogd(self): 17516 /usr/sbin/rsyslogd -n ''' pid = None - out = subprocess.check_output("systemctl status rsyslog", shell=True) + out = str(subprocess.check_output("systemctl status rsyslog", shell=True)) for l in out.split('\n'): m = re.search(re_rsyslog_pid, l) if m: diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index 358712120c..a836150a75 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -8,4 +8,6 @@ r, ".* NOTICE kernel:.*profile=""/usr/sbin/ntpd"" name=""sbin"" pid=.* comm=""nt r, ".* ERR snmp#snmp-subagent.*" r, ".* ERR route_check.py.*" r, ".* INFO mgmt-framework#supervisord: rest-server.*" -r, ".* ERR radv#radvd.* Exiting, privsep_read_loop.*" \ No newline at end of file +r, ".* ERR radv#radvd.* Exiting, privsep_read_loop.*" +r, ".* ERR ntpd.*bind.*AF_INET6.*" +r, ".* ERR ntpd.*unable to create socket on.*" \ No newline at end of file diff --git a/ansible/roles/vm_set/library/ceos_network.py b/ansible/roles/vm_set/library/ceos_network.py index 9f8078cbde..340348279d 100644 --- a/ansible/roles/vm_set/library/ceos_network.py +++ b/ansible/roles/vm_set/library/ceos_network.py @@ -1,6 +1,5 @@ #!/usr/bin/python -import datetime import json import logging import subprocess @@ -9,6 +8,7 @@ import docker +from ansible.module_utils.debug_utils import config_module_logging from ansible.module_utils.basic import * DOCUMENTATION = ''' @@ -52,13 +52,7 @@ MGMT_TAP_TEMPLATE = '%s-m' INT_TAP_TEMPLATE = 'eth%d' - -def config_logging(): - curtime = datetime.datetime.now().isoformat() - logging.basicConfig( - filename=CMD_DEBUG_FNAME % curtime, - format='%(asctime)s %(levelname)s #%(lineno)d: %(message)s', - level=logging.DEBUG) +config_module_logging('ceos_network') class CeosNetwork(object): @@ -364,8 +358,6 @@ def main(): fp_mtu = module.params['fp_mtu'] max_fp_num = module.params['max_fp_num'] - config_logging() - try: cnet = CeosNetwork(name, vm_name, mgmt_bridge, fp_mtu, max_fp_num) diff --git a/ansible/roles/vm_set/library/kickstart.py b/ansible/roles/vm_set/library/kickstart.py index b884ccb6d3..b2e17aa7e3 100644 --- a/ansible/roles/vm_set/library/kickstart.py +++ b/ansible/roles/vm_set/library/kickstart.py @@ -1,7 +1,8 @@ #!/usr/bin/python -import datetime from telnetlib import Telnet +import logging +from ansible.module_utils.debug_utils import config_module_logging def encode(arg): if (sys.version_info.major == 3 and sys.version_info.minor >= 5): @@ -9,35 +10,7 @@ def encode(arg): else: return arg - -class MyDebug(object): - def __init__(self, filename, enabled=True): - if enabled: - self.fp = open(filename, 'w') - else: - self.fp = None - - return - - def cleanup(self): - if self.fp: - self.fp.close() - self.fp = None - - return - - def __del__(self): - self.cleanup() - - return - - def debug(self, msg): - if self.fp: - self.fp.write('%s\n' % msg) - self.fp.flush() - - return - +config_module_logging('kickstart') class EMatchNotFound(Exception): pass @@ -56,10 +29,9 @@ class ENotInEnabled(Exception): class SerialSession(object): - def __init__(self, port, debug): + def __init__(self, port): self.enabled = False - self.d = debug - self.d.debug('Starting') + logging.debug('Starting') self.tn = Telnet('127.0.0.1', port) self.tn.write(encode('\r\n')) @@ -74,17 +46,15 @@ def cleanup(self): if self.tn: self.tn.close() self.tn = None - self.d.cleanup() - return def pair(self, action, wait_for, timeout): - self.d.debug('output: %s' % action) - self.d.debug('match: %s' % ",".join(wait_for)) + logging.debug('output: %s' % action) #lgtm [py/clear-text-logging-sensitive-data] + logging.debug('match: %s' % ",".join(wait_for)) self.tn.write(encode("%s\n" % action)) if wait_for is not None: index, match, text = self.tn.expect([encode(i) for i in wait_for], timeout) - self.d.debug('Result of matching: %d %s %s' % (index, str(match), text)) + logging.debug('Result of matching: %d %s %s' % (index, str(match), text)) if index == -1: raise EMatchNotFound else: @@ -94,20 +64,20 @@ def pair(self, action, wait_for, timeout): def login(self, user, password): try: - self.d.debug('## Getting the login prompt') + logging.debug('## Getting the login prompt') self.pair('\r', [r'login:'], 240) except EMatchNotFound: - self.d.debug('No login prompt is found') + logging.debug('No login prompt is found') raise ELoginPromptNotFound - self.d.debug('## Getting the password prompt') + logging.debug('## Getting the password prompt') index_password = self.pair(user, [r'assword:', r'>'], 20) if index_password == 0: try: - self.d.debug('## Inputing password') + logging.debug('## Inputing password') self.pair(password, [r'>'], 10) except EMatchNotFound: - self.d.debug('The original password "%s" is not working' % password) + logging.debug('The original password "%s" is not working' % password) #lgtm [py/clear-text-logging-sensitive-data] raise EWrongDefaultPassword return @@ -170,9 +140,7 @@ def session(new_params): ('aaa root secret 0 %s' % str(new_params['new_root_password']), [r'\(config\)#']), ] - curtime = datetime.datetime.now().isoformat() - debug = MyDebug('/tmp/debug.%s.%s.txt' % (new_params['hostname'], curtime), enabled=True) - ss = SerialSession(new_params['telnet_port'], debug) + ss = SerialSession(new_params['telnet_port']) ss.login(new_params['login'], new_params['password']) ss.enable() ss.wait_for_warmup() diff --git a/ansible/roles/vm_set/library/sonic_kickstart.py b/ansible/roles/vm_set/library/sonic_kickstart.py index 8d268e98e5..2f5500b4fd 100644 --- a/ansible/roles/vm_set/library/sonic_kickstart.py +++ b/ansible/roles/vm_set/library/sonic_kickstart.py @@ -1,36 +1,10 @@ #!/usr/bin/python -import datetime from telnetlib import Telnet +import logging +from ansible.module_utils.debug_utils import config_module_logging - -class MyDebug(object): - def __init__(self, filename, enabled=True): - if enabled: - self.fp = open(filename, 'w') - else: - self.fp = None - - return - - def cleanup(self): - if self.fp: - self.fp.close() - self.fp = None - - return - - def __del__(self): - self.cleanup() - - return - - def debug(self, msg): - if self.fp: - self.fp.write('%s\n' % msg) - self.fp.flush() - - return +config_module_logging('sonic_kickstart') class EMatchNotFound(Exception): @@ -38,9 +12,8 @@ class EMatchNotFound(Exception): class SerialSession(object): - def __init__(self, port, debug): - self.d = debug - self.d.debug('Starting') + def __init__(self, port): + logging.debug('Starting') self.tn = Telnet('127.0.0.1', port) self.tn.write(b"\r\n") @@ -55,17 +28,16 @@ def cleanup(self): if self.tn: self.tn.close() self.tn = None - self.d.cleanup() return def pair(self, action, wait_for, timeout=60): - self.d.debug('output: %s' % action) - self.d.debug('match: %s' % ",".join(wait_for)) + logging.debug('output: %s' % action) #lgtm [py/clear-text-logging-sensitive-data] + logging.debug('match: %s' % ",".join(wait_for)) self.tn.write(b"%s\n" % action.encode('ascii')) if wait_for is not None: index, match, text = self.tn.expect([ x.encode('ascii') for x in wait_for ], timeout) - self.d.debug('Result of matching: %d %s %s' % (index, str(match), text)) + logging.debug('Result of matching: %d %s %s' % (index, str(match), text)) if index == -1: raise EMatchNotFound else: @@ -126,9 +98,7 @@ def session(new_params): if int(new_params['num_asic']) > 1: seq.pop(0) - curtime = datetime.datetime.now().isoformat() - debug = MyDebug('/tmp/debug.%s.%s.txt' % (new_params['hostname'], curtime), enabled=True) - ss = SerialSession(new_params['telnet_port'], debug) + ss = SerialSession(new_params['telnet_port']) ss.login(new_params['login'], new_params['passwords']) ss.configure(seq) ss.logout() diff --git a/ansible/roles/vm_set/library/vlan_port.py b/ansible/roles/vm_set/library/vlan_port.py index 4120b3f3c1..5182b25b34 100644 --- a/ansible/roles/vm_set/library/vlan_port.py +++ b/ansible/roles/vm_set/library/vlan_port.py @@ -47,6 +47,7 @@ def up_external_port(self): def create_vlan_port(self, port, vlan_id): vlan_port = "%s.%d" % (port, vlan_id) if vlan_port not in self.host_ifaces: + VlanPort.cmd('vconfig rem %s' % vlan_port, True) VlanPort.cmd('vconfig add %s %d' % (port, vlan_id)) VlanPort.iface_up(vlan_port) @@ -101,7 +102,7 @@ def iface_updown(iface_name, state, pid): return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state)) @staticmethod - def cmd(cmdline): + def cmd(cmdline, ignore_error=False): with open(CMD_DEBUG_FNAME, 'a') as fp: pprint("CMD: %s" % cmdline, fp) cmd = cmdline.split(' ') @@ -109,11 +110,14 @@ def cmd(cmdline): stdout, stderr = process.communicate() ret_code = process.returncode - if ret_code != 0: + if ret_code != 0 and not ignore_error: raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline)) with open(CMD_DEBUG_FNAME, 'a') as fp: - pprint("OUTPUT: %s" % stdout, fp) + if ret_code == 0: + pprint("OUTPUT: %s" % stdout, fp) + else: + pprint("ERR: %s" % stderr, fp) return stdout.decode('utf-8') diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index 0c435689d1..ccf7610254 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -1,7 +1,5 @@ #!/usr/bin/python -import datetime -import logging import hashlib import json import re @@ -9,9 +7,10 @@ import shlex import time import traceback - +import logging import docker +from ansible.module_utils.debug_utils import config_module_logging from ansible.module_utils.basic import * DOCUMENTATION = ''' @@ -126,6 +125,8 @@ ROOT_BACK_BR_TEMPLATE = 'br-b-%s' PTF_FP_IFACE_TEMPLATE = 'eth%d' RETRIES = 10 +# name of interface must be less than or equal to 15 bytes. +MAX_INTF_LEN = 15 VS_CHASSIS_INBAND_BRIDGE_NAME = "br-T2Inband" VS_CHASSIS_MIDPLANE_BRIDGE_NAME = "br-T2Midplane" @@ -136,11 +137,7 @@ SUB_INTERFACE_VLAN_ID = '10' -def config_logging(): - curtime = datetime.datetime.now().isoformat() - logging.basicConfig(filename=CMD_DEBUG_FNAME % curtime, - format='%(asctime)s %(levelname)s %(name)s#%(lineno)d: %(message)s', - level=logging.DEBUG) +config_module_logging('vm_topology') def adaptive_name(template, host, index): @@ -260,6 +257,9 @@ def init(self, vm_set_name, vm_base, duts_fp_ports, duts_name, ptf_exists=True): raise Exception("Wrong vlans parameter for hostname %s, vm %s. Too many vlans. Maximum is %d" % (hostname, vmname, len(vm_bridges))) self._is_multi_duts = True if len(self.duts_name) > 1 else False + # For now distinguish a cable topology since it does not contain any vms and there are two ToR's + self._is_cable = True if len(self.duts_name) > 1 and 'VMs' not in self.topo else False + if 'host_interfaces' in self.topo: self.host_interfaces = self.topo['host_interfaces'] else: @@ -387,16 +387,9 @@ def add_mgmt_port_to_docker(self, mgmt_bridge, mgmt_ip, mgmt_gw, mgmt_ipv6_addr= self.pid = api_server_pid if VMTopology.intf_not_exists(MGMT_PORT_NAME, self.pid): if api_server_pid is None: - tmp_mgmt_if = hashlib.md5((PTF_NAME_TEMPLATE % self.vm_set_name).encode("utf-8")).hexdigest()[0:6] + MGMT_PORT_NAME - self.add_br_if_to_docker(mgmt_bridge, PTF_MGMT_IF_TEMPLATE % self.vm_set_name, tmp_mgmt_if) + self.add_br_if_to_docker(mgmt_bridge, PTF_MGMT_IF_TEMPLATE % self.vm_set_name, MGMT_PORT_NAME) else: - tmp_mgmt_if = hashlib.md5(('apiserver').encode("utf-8")).hexdigest()[0:6] + MGMT_PORT_NAME - self.add_br_if_to_docker(mgmt_bridge, 'apiserver', tmp_mgmt_if) - - VMTopology.iface_down(tmp_mgmt_if, self.pid) - VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, tmp_mgmt_if, MGMT_PORT_NAME)) - - VMTopology.iface_up(MGMT_PORT_NAME, self.pid) + self.add_br_if_to_docker(mgmt_bridge, 'apiserver', MGMT_PORT_NAME) self.add_ip_to_docker_if(MGMT_PORT_NAME, mgmt_ip, mgmt_ipv6_addr=mgmt_ipv6_addr, mgmt_gw=mgmt_gw, mgmt_gw_v6=mgmt_gw_v6, api_server_pid=api_server_pid) def add_bp_port_to_docker(self, mgmt_ip, mgmt_ipv6): @@ -405,9 +398,11 @@ def add_bp_port_to_docker(self, mgmt_ip, mgmt_ipv6): VMTopology.iface_disable_txoff(BP_PORT_NAME, self.pid) def add_br_if_to_docker(self, bridge, ext_if, int_if): - logging.info('=== For veth pair, add %s to bridge %s, set %s to PTF docker' % (ext_if, bridge, int_if)) + # add unique suffix to int_if to support multiple tasks run concurrently + tmp_int_if = int_if + VMTopology._generate_fingerprint(ext_if, MAX_INTF_LEN-len(int_if)) + logging.info('=== For veth pair, add %s to bridge %s, set %s to PTF docker, tmp intf %s' % (ext_if, bridge, int_if, tmp_int_if)) if VMTopology.intf_not_exists(ext_if): - VMTopology.cmd("ip link add %s type veth peer name %s" % (ext_if, int_if)) + VMTopology.cmd("ip link add %s type veth peer name %s" % (ext_if, tmp_int_if)) _, if_to_br = VMTopology.brctl_show(bridge) if ext_if not in if_to_br: @@ -415,8 +410,9 @@ def add_br_if_to_docker(self, bridge, ext_if, int_if): VMTopology.iface_up(ext_if) - if VMTopology.intf_exists(int_if) and VMTopology.intf_not_exists(int_if, self.pid): - VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, int_if)) + if VMTopology.intf_exists(tmp_int_if) and VMTopology.intf_not_exists(tmp_int_if, self.pid): + VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, tmp_int_if)) + VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, tmp_int_if, int_if)) VMTopology.iface_up(int_if, self.pid) @@ -772,7 +768,7 @@ def add_host_ports(self): for dual-tor topo, create ovs port and add to ptf docker. """ for i, intf in enumerate(self.host_interfaces): - if self._is_multi_duts: + if self._is_multi_duts and not self._is_cable: if isinstance(intf, list): # create veth link and inject one end into the ptf docker # If host interface index is explicitly specified by "@x" (len(intf[0]==3), use host interface @@ -793,6 +789,26 @@ def add_host_ports(self): fp_port = self.duts_fp_ports[self.duts_name[intf[0]]][str(intf[1])] ptf_if = PTF_FP_IFACE_TEMPLATE % host_ifindex self.add_dut_if_to_docker(ptf_if, fp_port) + elif self._is_multi_duts and self._is_cable: + # Since there could be multiple ToR's in cable topology, some Ports + # can be connected to muxcable and some to a DAC cable. But it could + # be possible that not all ports have cables connected. So for whichever + # port link is connected and has a vlan associated, inject them to container + # with the enumeration in topo file + # essentially mux ports will map to one port and DAC ports will map to different + # ports in a dualtor setup. Here implicit is taken that + # interface index is explicitly specified by "@x" format + host_ifindex = intf[0][2] + if self.duts_fp_ports[self.duts_name[intf[0][0]]].get(str(intf[0][1])) is not None: + fp_port = self.duts_fp_ports[self.duts_name[intf[0][0]]][str(intf[0][1])] + ptf_if = PTF_FP_IFACE_TEMPLATE % host_ifindex + self.add_dut_if_to_docker(ptf_if, fp_port) + + host_ifindex = intf[1][2] + if self.duts_fp_ports[self.duts_name[intf[1][0]]].get(str(intf[1][1])) is not None: + fp_port = self.duts_fp_ports[self.duts_name[intf[1][0]]][str(intf[1][1])] + ptf_if = PTF_FP_IFACE_TEMPLATE % host_ifindex + self.add_dut_if_to_docker(ptf_if, fp_port) else: fp_port = self.duts_fp_ports[self.duts_name[0]][str(intf)] ptf_if = PTF_FP_IFACE_TEMPLATE % intf @@ -827,6 +843,19 @@ def remove_host_ports(self): vlan_id = self.vlan_ids[str(intf)] self.remove_dut_vlan_subif_from_docker(ptf_if, vlan_separator, vlan_id) + @staticmethod + def _generate_fingerprint(name, digit=6): + """ + Generate fingerprint + Args: + name (str): name + digit (int): digit of fingerprint, e.g. 6 + + Returns: + str: fingerprint, e.g. a9d24d + """ + return hashlib.md5(name.encode("utf-8")).hexdigest()[0:digit] + @staticmethod def _intf_cmd(intf, pid=None): if pid: @@ -1188,8 +1217,6 @@ def main(): if cmd == 'bind_keysight_api_server_ip': vm_names = [] - config_logging() - try: topo = module.params['topo'] diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index c4eabaf0e8..23acb9aeae 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -236,6 +236,7 @@ when: - topo != 'fullmesh' - not 'ptf' in topo + - not 'cable' in topo - name: Start mux simulator include_tasks: control_mux_simulator.yml diff --git a/ansible/roles/vm_set/tasks/control_mux_simulator.yml b/ansible/roles/vm_set/tasks/control_mux_simulator.yml index f3b1ef7e46..f5a14b2703 100644 --- a/ansible/roles/vm_set/tasks/control_mux_simulator.yml +++ b/ansible/roles/vm_set/tasks/control_mux_simulator.yml @@ -46,6 +46,5 @@ state: stopped become: yes ignore_errors: yes - when: record_file_content.rc != 0 when: mux_simulator_action == "stop" diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 7d5b4475da..73f123158d 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -206,6 +206,13 @@ - ptf_imagename is defined - ptf_imagename == "docker-ptf-anvl" +- name: VMs not needed for cable test + set_fact: + vm_required: false + VM_hosts: + when: + - topo == "cable-test" + - name: Retrieve a list of the defined VMs virt: command=list_vms uri=qemu:///system diff --git a/ansible/roles/vm_set/tasks/ptf_change_mac.yml b/ansible/roles/vm_set/tasks/ptf_change_mac.yml index 9666ad3d6e..2ecd6951dd 100644 --- a/ansible/roles/vm_set/tasks/ptf_change_mac.yml +++ b/ansible/roles/vm_set/tasks/ptf_change_mac.yml @@ -18,6 +18,15 @@ groups: - ptf_host +- name: wait until ptf is reachable + wait_for: + port: 22 + host: "{{ ptf_host_ip }}" + state: started + delay: 0 + timeout: 300 + delegate_to: "localhost" + - name: Change PTF interface MAC addresses script: change_mac.sh delegate_to: "{{ ptf_host }}" diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index b2a9e947d8..a954bf319c 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -20,9 +20,25 @@ vars: ptf_portchannel_action: stop + - name: Get infos of ptf container + docker_container_info: + name: ptf_{{ vm_set_name }} + register: ptf_docker_info + + - name: Flush ptf network info log + shell: | + date > /tmp/ptf_network_{{ vm_set_name }}.log + + - name: Collect ptf network info before deleting + shell: | + echo "before deleting" >> /tmp/ptf_network_{{ vm_set_name }}.log + ls /proc/{{ ptf_docker_info.container.State.Pid }}/net/vlan/ >> /tmp/ptf_network_{{ vm_set_name }}.log + echo "-----------------------------" >> /tmp/ptf_network_{{ vm_set_name }}.log + - name: Remove ptf container ptf_{{ vm_set_name }} docker_container: name: ptf_{{ vm_set_name }} + force_kill: yes state: absent become: yes @@ -34,6 +50,12 @@ become: yes when: docker_registry_username is defined and docker_registry_password is defined + - name: Collect ptf network info before recreating + shell: | + echo "Before recreating" >> /tmp/ptf_network_{{ vm_set_name }}.log + ls /proc/{{ ptf_docker_info.container.State.Pid }}/net/vlan/ >> /tmp/ptf_network_{{ vm_set_name }}.log + echo "-----------------------------" >> /tmp/ptf_network_{{ vm_set_name }}.log + - name: Create ptf container ptf_{{ vm_set_name }} docker_container: name: ptf_{{ vm_set_name }} diff --git a/ansible/templates/minigraph_cpg.j2 b/ansible/templates/minigraph_cpg.j2 index 3b92d7cbf7..ac5857e74f 100644 --- a/ansible/templates/minigraph_cpg.j2 +++ b/ansible/templates/minigraph_cpg.j2 @@ -1,7 +1,7 @@ - + +{% if card_type is not defined or card_type != 'supervisor' %} -{% if card_type is not defined or card_type != 'supervisor' %} {% for index in range(vms_number) %} {% set vm=vms[index] %} {% if vm_topo_config['vm'][vm]['peer_ipv4'][dut_index|int] %} @@ -51,26 +51,27 @@ {% endif %} {% endif %} {% endfor %} -{% for asic in asic_topo_config %} -{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %} -{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] %} +{% if (asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined) %} +{% for asic,asic_config in asic_topo_config[slot_num|default('slot0')].items() %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{% if asic_config['neigh_asic'][neigh_asic]['peer_ipv4'][0] %} false {{ asic }} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }} + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }} {{ neigh_asic }} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] }} + {{ asic_config['neigh_asic'][neigh_asic]['peer_ipv4'][0] }} 1 0 0 {% endif %} -{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv6'][0] %} +{% if asic_config['neigh_asic'][neigh_asic]['peer_ipv6'][0] %} {{ asic }} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }} + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }} {{ neigh_asic }} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv6'][0] }} + {{ asic_config['neigh_asic'][neigh_asic]['peer_ipv6'][0] }} 1 0 0 @@ -79,20 +80,64 @@ {% endfor %} {% endfor %} {% endif %} -{% if switch_type is defined and switch_type == 'voq' %} -{% for all_idx in range(all_inbands|length) %} -{% if voq_inband_ip != all_inbands[all_idx] %} +{% if switch_type is defined and (switch_type == 'voq' or switch_type == 'chassis-packet') %} +{% set chassis_ibgp_peers = dict() %} +{% for asic_id in range(num_asics) %} +{% if num_asics == 1 %} +{% set start_rtr = inventory_hostname %} +{% else %} +{% set start_rtr = "ASIC" + asic_id|string %} +{% endif %} +{% for a_linecard in all_loopback4096 %} +{% for idx in range(all_loopback4096[a_linecard]|length) %} +{% if loopback4096_ip[asic_id] != all_loopback4096[a_linecard][idx] %} +{% if all_loopback4096[a_linecard]|length == 1 %} +{% set end_rtr = a_linecard %} +{% else %} +{% if a_linecard == inventory_hostname %} +{% set end_rtr = "ASIC" + idx|string %} +{% else %} +{% set end_rtr = a_linecard + "-ASIC" + idx|string %} +{% endif %} +{% endif %} +{% if switch_type == 'voq' %} +{% set _ = chassis_ibgp_peers.update({ all_inbands[a_linecard][idx].split('/')[0] : end_rtr }) %} +{% else %} +{% set _ = chassis_ibgp_peers.update({ all_loopback4096[a_linecard][idx].split('/')[0] : end_rtr }) %} +{% endif %} - {{ inventory_hostname }} - {{ voq_inband_ip.split('/')[0] }} - {{ all_hostnames[all_idx] }} - {{ all_inbands[all_idx].split('/')[0] }} + {{ start_rtr }} + {{ end_rtr }} +{% if switch_type == 'voq' %} + {{ voq_inband_ip[asic_id].split('/')[0] }} + {{ all_inbands[a_linecard][idx].split('/')[0] }} +{% else %} + {{ loopback4096_ip[asic_id].split('/')[0] }} + {{ all_loopback4096[a_linecard][idx].split('/')[0] }} +{% endif %} 1 0 0 - true + {{ switch_type }} + + {{ start_rtr }} + {{ end_rtr }} +{% if switch_type == 'voq' %} + {{ voq_inband_ipv6[asic_id].split('/')[0] }} + {{ all_inbands_ipv6[a_linecard][idx].split('/')[0] }} +{% else %} + {{ loopback4096_ipv6[asic_id].split('/')[0] }} + {{ all_loopback4096_ipv6[a_linecard][idx].split('/')[0] }} {% endif %} + 1 + 0 + 0 + {{ switch_type }} + +{% endif %} +{% endfor %} +{% endfor %} {% endfor %} {% endif %} @@ -112,16 +157,14 @@ {% endif %} {% endfor %} -{% if switch_type is defined and switch_type == 'voq' %} -{% for all_idx in range(all_inbands|length) %} -{% if voq_inband_ip != all_inbands[all_idx] %} +{% if num_asics == 1 and switch_type is defined and (switch_type == 'voq' or switch_type == 'chassis-packet') %} +{% for a_chassis_ibgp_peer in chassis_ibgp_peers %} -
{{ all_inbands[all_idx].split('/')[0] }}
+
{{ a_chassis_ibgp_peer }}
-{% endif %} {% endfor %} {% endif %} {% if 'tor' in vm_topo_config['dut_type'] | lower %} @@ -147,17 +190,6 @@ -{% if switch_type is defined and switch_type == 'voq' %} -{% for all_idx in range(all_inbands|length) %} -{% if voq_inband_ip != all_inbands[all_idx] %} - - {{ vm_topo_config['dut_asn'] }} - {{ all_hostnames[all_idx] }} - - -{% endif %} -{% endfor %} -{% endif %} {% for index in range( vms_number) %} {% if vm_topo_config['vm'][vms[index]]['intfs'][dut_index|int]|length > 0 %} @@ -168,13 +200,14 @@ {% endif %} {% endfor %} {% endif %} -{% for asic in asic_topo_config %} +{% if (asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined) %} +{% for asic,asic_config in asic_topo_config[slot_num|default('slot0')].items() %} {{ vm_topo_config['dut_asn'] }} {{ asic }} {% for index in range( vms_number) %} -{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
{{ vm_topo_config['vm'][vms[index]]['peer_ipv4'][dut_index|int] }}
@@ -183,10 +216,10 @@
{% endif %} {% endfor %} -{% for neigh_asic in asic_topo_config %} -{% if neigh_asic in asic_topo_config[asic]['neigh_asic'] and asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{% if neigh_asic in asic_config['neigh_asic'] and asic_config['neigh_asic'][neigh_asic]['peer_ipv4'][0] %} -
{{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] }}
+
{{ asic_config['neigh_asic'][neigh_asic]['peer_ipv4'][0] }}
@@ -197,6 +230,54 @@
{% endfor %} +{% if switch_type is defined and (switch_type == 'voq' or switch_type == 'chassis-packet') %} +{% for a_linecard in all_loopback4096 %} +{% if a_linecard != inventory_hostname %} +{% for idx in range(all_loopback4096[a_linecard]|length) %} + + {{ vm_topo_config['dut_asn'] }} + {{ chassis_ibgp_peers[all_loopback4096[a_linecard][idx].split('/')[0]] }} + + +{% endfor %} +{% endif %} +{% endfor %} +{% if num_asics > 1 %} +{% for asic_id in range(num_asics) %} +{% set asic_name = "ASIC" + asic_id|string %} + + {{ vm_topo_config['dut_asn'] }} + {{ asic_name }} + +{% for index in range( vms_number) %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic_name %} + +
{{ vm_topo_config['vm'][vms[index]]['peer_ipv4'][dut_index|int] }}
+ + + +
+{% endif %} +{% endfor %} +{% for a_linecard in all_loopback4096 %} +{% for idx in range(all_loopback4096[a_linecard]|length) %} +{% if loopback4096_ip[asic_id] != all_loopback4096[a_linecard][idx] %} + +
{{ all_loopback4096[a_linecard][idx] }}
+ + + +
+{% endif %} +{% endfor %} +{% endfor %} +
+
+{% endfor %} +{% endif %} +{% endif %} +{% endif %} +{% endif %}
diff --git a/ansible/templates/minigraph_device.j2 b/ansible/templates/minigraph_device.j2 index d4f34ab05d..e743123bfc 100644 --- a/ansible/templates/minigraph_device.j2 +++ b/ansible/templates/minigraph_device.j2 @@ -2,7 +2,7 @@ true -{% if card_type is not defined or card_type != 'supervisor' %} +{% if switch_type is not defined or switch_type != 'fabric' %} {% set num_of_intf = port_alias | length %} {% for index in range(num_of_intf) %} diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2 index 821bec08dc..1354f719c6 100644 --- a/ansible/templates/minigraph_dpg.j2 +++ b/ansible/templates/minigraph_dpg.j2 @@ -19,6 +19,26 @@ {{ lp_ipv6 }} + {% if num_asics == 1 and switch_type is defined and (switch_type == 'voq' or switch_type == 'chassis-packet') %} + {% if loopback4096_ip is defined %} + + HostIP1 + Loopback4096 + + {{ loopback4096_ip[0] }} + + {{ loopback4096_ip[0] }} + + + HostIP1 + Loopback4096 + + {{ loopback4096_ipv6[0] }} + + {{ loopback4096_ipv6[0] }} + + {% endif %} + {% endif %} {%- if 'addl_loopbacks' in dual_tor_facts -%} {%- for loopback_num in dual_tor_facts['addl_loopbacks'][inventory_hostname] -%} {%- set loopback_facts = dual_tor_facts['addl_loopbacks'][inventory_hostname][loopback_num] -%} @@ -41,7 +61,7 @@ {%- endfor -%} {%- endif -%} {% endif %} - + HostIP @@ -61,24 +81,24 @@ -{% if voq_inband_ip is defined or voq_inband_ipv6 is defined %} +{% if num_asics == 1 and (voq_inband_ip is defined or voq_inband_ipv6 is defined) %} {% if voq_inband_ip is defined %} - {{ voq_inband_intf }} + {{ voq_inband_intf[0] }} {{ voq_inband_type }} - {{ voq_inband_ip }} + {{ voq_inband_ip[0] }} {% endif %} {% if voq_inband_ipv6 is defined %} - {{ voq_inband_intf }} + {{ voq_inband_intf[0] }} {{ voq_inband_type }} - {{ voq_inband_ipv6 }} + {{ voq_inband_ipv6[0] }} {% endif %} -{% endif %} +{% endif %} diff --git a/ansible/templates/minigraph_dpg_asic.j2 b/ansible/templates/minigraph_dpg_asic.j2 index c20bce69ae..4bc9fde4c3 100644 --- a/ansible/templates/minigraph_dpg_asic.j2 +++ b/ansible/templates/minigraph_dpg_asic.j2 @@ -1,11 +1,16 @@ +{# Note max of 10 Backend Portchannel from one asic #} {% macro port_channel_id(asic_idx, neigh_asic_idx) -%} -{{ ((4000 + asic_idx + (10*neigh_asic_idx))|string) }} +{{ ((40 + 10 * asic_idx + neigh_asic_idx)|string) }} {%- endmacro -%} -{% for asic in asic_topo_config %} +{% if num_asics > 1 %} +{% if (asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined) %} +{% for asic,asic_config in asic_topo_config[slot_num|default('slot0')].items() %} {% set asic_index = asic.split('ASIC')[1]|int %} +{% set asic_name = "ASIC" + asic_index|string %} +{% if card_type is not defined or card_type != 'supervisor' %} HostIP Loopback0 @@ -22,16 +27,25 @@ {{ lp_ipv6 }} -{% for lo4096 in asic_topo_config[asic]['Loopback4096'] %} +{% if loopback4096_ip is defined %} HostIP1 Loopback4096 - {{ lo4096 }} + {{ loopback4096_ip[asic_index] }} - {{ lo4096 }} + {{ loopback4096_ip[asic_index] }} -{% endfor %} + + HostIP1 + Loopback4096 + + {{ loopback4096_ipv6[asic_index] }} + + {{ loopback4096_ipv6[asic_index] }} + +{% endif %} +{% endif %} @@ -52,14 +66,32 @@ +{% if voq_inband_ip is defined or voq_inband_ipv6 is defined %} + +{% if voq_inband_ip is defined %} + + {{ voq_inband_intf[asic_index] }} + {{ voq_inband_type }} + {{ voq_inband_ip[asic_index] }} + +{% endif %} +{% if voq_inband_ipv6 is defined %} + + {{ voq_inband_intf[asic_index] }} + {{ voq_inband_type }} + {{ voq_inband_ipv6[asic_index] }} + +{% endif %} + +{% endif %} - {{ asic }} + {{ asic_name }} {% if card_type is not defined or card_type != 'supervisor' %} {% for index in range(vms_number) %} -{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic_name %} {% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int]|lower %} {% set port_channel_intf=';'.join(vm_asic_ifnames[vms[index]]) %} @@ -70,29 +102,42 @@ {% endif %} {% endif %} {% endfor %} -{% for neigh_asic in asic_topo_config %} +{% endif %} +{%- set vlan_intfs = [] -%} +{% for neigh_asic in asic_config['neigh_asic'] %} {%- set pc_intfs = [] -%} -{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%} -{%- for intf in asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0] %} +{# Assumption - Backed ASIC always have port-channel connectivity #} +{%- for intf in asic_config['neigh_asic'][neigh_asic]['asic_intfs'][0] %} {{- pc_intfs.append(intf) }} {%- endfor -%} {%- set port_channel_intf=pc_intfs|join(';') -%} {% set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %} - PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }} + PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(2) }} {{ port_channel_intf }} -{% endif %} +{% set vlan_intf = 'PortChannel' + port_channel_id(asic_index, neigh_asic_index).zfill(2) %} +{{- vlan_intfs.append(vlan_intf) -}} {% endfor %} -{% endif %} - +{% if card_type is defined and card_type == 'supervisor' and vlan_intfs %} + + + Vlan2 +{% set vlan_intf_str=';'.join(vlan_intfs) %} + {{ vlan_intf_str }} + Tagged + 2 + + +{% else %} +{% endif %} {% if card_type is not defined or card_type != 'supervisor' %} {% for index in range(vms_number) %} -{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic_name %} {% if vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int] is not none %} @@ -115,30 +160,84 @@ {% endif %} {% endif %} {% endfor %} -{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %} - - -{%- if 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower %} +{% if switch_type is defined and switch_type == 'chassis-packet' %} + + +{% for neigh_asic in asic_config['neigh_asic'] %} {%- set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %} - PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }} -{% else %} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] }} + + + PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(2) }} + 2 + dot1q + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }}/{{ asic_config['neigh_asic'][neigh_asic]['ipv4mask'][0] }} + + + + PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(2) }} + 2 + dot1q + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }}/{{ asic_config['neigh_asic'][neigh_asic]['ipv6mask'][0] }} + +{% endfor %} + + +{% for a_linecard in all_loopback4096 %} +{% for idx in range(all_loopback4096[a_linecard]|length) %} +{% set remote_asic_name = "ASIC" + idx|string %} +{% if a_linecard != inventory_hostname or idx != asic_index %} +{%- set nexthop_intfs = [] -%} +{%- set nexthop_v6_intfs = [] -%} +{% for asic,asic_config in asic_topo_config[all_slots[a_linecard]].items() %} +{% if (a_linecard != inventory_hostname and asic == remote_asic_name) or (a_linecard == inventory_hostname and asic != asic_name) %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{% set _ = nexthop_intfs.append(asic_config['neigh_asic'][neigh_asic]['bgp_ipv4'][0]) %} +{% set _ = nexthop_v6_intfs.append(asic_config['neigh_asic'][neigh_asic]['bgp_ipv6'][0]) %} +{% endfor %} +{% endif %} +{% endfor %} + + IPNextHop + + {{ all_loopback4096[a_linecard][idx] }} + StaticRoute +
+{{- nexthop_intfs|join(',') -}} +
+
+ + IPNextHop + + {{ all_loopback4096_ipv6[a_linecard][idx] }} + StaticRoute +
+{{- nexthop_v6_intfs|join(',') -}} +
+
{% endif %} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }}/{{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['ipv4mask'][0] }} +{% endfor %} +{% endfor %} +
+{% else %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{%- set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %} + + + PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(2) }} + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }}/{{ asic_config['neigh_asic'][neigh_asic]['ipv4mask'][0] }} -{%- if 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower %} -{%- set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %} - PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }} -{% else %} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] }} -{% endif %} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }}/{{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['ipv6mask'][0] }} + PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(2) }} + {{ asic_config['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }}/{{ asic_config['neigh_asic'][neigh_asic]['ipv6mask'][0] }} {% endfor %} + {% endif %} +{% else %} + +{% endif %} @@ -166,41 +265,33 @@ {%- set acl_intfs = [] -%} {% if card_type is not defined or card_type != 'supervisor' %} {%- for index in range(vms_number) %} -{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic_name %} {% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][0]|lower %} {% set a_intf = 'PortChannel' + ((index+1) |string).zfill(4) %} {{- acl_intfs.append(a_intf) -}} {% endif %} {% endif %} {% endfor %} -{% for neigh_asic in asic_topo_config %} +{% for neigh_asic in asic_config['neigh_asic'] %} {% set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %} -{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%} -{% set a_intf = 'PortChannel' + port_channel_id(asic_index, neigh_asic_index).zfill(4) %} +{% set a_intf = 'PortChannel' + port_channel_id(asic_index, neigh_asic_index).zfill(2) %} {{- acl_intfs.append(a_intf) -}} -{% endif %} {% endfor %} - {%- for index in range(vms_number) -%} -{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %} -{% if 'port-channel' not in vm_topo_config['vm'][vms[index]]['ip_intf'][0]|lower %} +{% if vms[index] in vm_asic_ifnames and vm_asic_ifnames[vms[index]][0].split('-')[1] == asic_name %} {% if vm_topo_config['vm'][vms[index]]['intfs'][dut_index|int]|length %} {% set a_intf = front_panel_asic_ifnames[vm_topo_config['vm'][vms[index]]['interface_indexes'][dut_index|int][0]] %} {{- acl_intfs.append(a_intf) -}} {% endif %} {% endif %} -{% endif %} {% endfor -%} -{%- for neigh_asic in asic_topo_config -%} -{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' not in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%} -{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['intfs'][0]|length %} -{% set a_intf = asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{% if asic_config['neigh_asic'][neigh_asic]['intfs'][0]|length %} +{% set a_intf = asic_config['neigh_asic'][neigh_asic]['asic_intfs'][0][0] %} {{- acl_intfs.append(a_intf) -}} {% endif %} -{% endif %} {% endfor %} {% endif %} - {{- acl_intfs|join(';') -}} DataAcl @@ -210,6 +301,10 @@
+{% endfor %} +{% endif %} +{% endif %} +{% if switch_type is defined and switch_type == 'fabric' %} {% for asic in fabric_info %} @@ -246,5 +341,4 @@ {% endfor %} -{% endfor %} - +{% endif %} diff --git a/ansible/templates/minigraph_meta.j2 b/ansible/templates/minigraph_meta.j2 index af7a916fee..3f459d61da 100644 --- a/ansible/templates/minigraph_meta.j2 +++ b/ansible/templates/minigraph_meta.j2 @@ -91,17 +91,19 @@ {{ erspan_dest_str }} {% endif %} -{% if switch_type is defined and switch_type == 'voq' %} +{% if switch_type is defined %} SwitchType {{ switch_type }} +{% endif %} +{% if num_asics == 1 and switch_type is defined and switch_type == 'voq' %} SwitchId - {{ start_switchid }} - + {{ switchids[0] }} + {% endif %} {% if max_cores is defined %} @@ -112,18 +114,17 @@ {% endif %} -{% set idx = 0 %} -{% for asic in asic_topo_config %} - - {{ asic }} +{% if num_asics > 1 and switch_type is defined and switch_type == 'voq' %} +{% for asic_index in range(num_asics) %} +{% set asic_name = "ASIC" + asic_index|string %} + + {{ asic_name }} SubRole - {{ asic_topo_config[asic]['asic_type'] }} + FrontEnd - -{% if switch_type is defined and switch_type == 'voq' %} SwitchType @@ -132,20 +133,40 @@ SwitchId - {{ start_switchid + idx }} -{% set idx = idx + 1 %} - -{% endif %} + {{ switchids[asic_index] }} + {% if max_cores is defined %} MaxCores {{ max_cores }} + {% endif %} {% endfor %} - +{% endif %} +{% if (asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined) %} +{% for asic,asic_config in asic_topo_config[slot_num|default('slot0')].items() %} + + {{ asic }} + + + SubRole + + {{ asic_config['asic_type'] }} + +{% if switch_type is defined %} + + SwitchType + + {{ switch_type }} + +{% endif %} + + +{% endfor %} +{% endif %} {% for asic in fabric_info %} {{ asic['asicname'] }} @@ -160,10 +181,14 @@ Fabric + + SwitchId + + {{ switchids[asic['asic_id']] }} + {% endfor %} - diff --git a/ansible/templates/minigraph_png.j2 b/ansible/templates/minigraph_png.j2 index 732c354975..b263cb2e3e 100644 --- a/ansible/templates/minigraph_png.j2 +++ b/ansible/templates/minigraph_png.j2 @@ -55,27 +55,36 @@ {% endfor %} {% endif %} {% endif %} -{% for asic in asic_topo_config %} -{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %} -{% for intf in asic_topo_config[asic]['neigh_asic'][neigh_asic]['intfs'][0] | sort %} +{% if ((asic_topo_config and slot_num is defined and slot_num in asic_topo_config) or (asic_topo_config and slot_num is not defined)) %} +{% for asic,asic_config in asic_topo_config[slot_num|default('slot0')].items() %} +{% for neigh_asic in asic_config['neigh_asic'] %} +{% for intf in asic_config['neigh_asic'][neigh_asic]['intfs'][0] | sort %} DeviceInterfaceLink +{% if switch_type is not defined %} 40000 +{% endif %} true {{ neigh_asic }} {{ intf }} true {{ asic }} - {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][loop.index-1] }} + {{ asic_config['neigh_asic'][neigh_asic]['asic_intfs'][0][loop.index-1] }} true {% endfor %} {% endfor %} {% endfor %} +{% endif %} {% for asic_intf in front_panel_asic_ifnames %} +{% if inventory_hostname not in device_conn or port_alias[loop.index - 1] in device_conn[inventory_hostname] %} DeviceInterfaceLink +{% if port_alias[loop.index - 1] in port_speed %} + {{ port_speed[port_alias[loop.index - 1]] }} +{% else %} 40000 +{% endif %} true {{ asic_intf.split('-')[1] }} {{ asic_intf }} @@ -84,6 +93,7 @@ {{ port_alias[loop.index - 1] }} true +{% endif %} {% endfor %} {% endif %} @@ -190,7 +200,9 @@ {% endif %} {% endfor %} {% endif %} -{% for asic in asic_topo_config %} +{% if num_asics > 1 %} +{% for asic_index in range(num_asics) %} +{% set asic_name = "ASIC" + asic_index|string %} Asic
@@ -213,10 +225,11 @@ ::/0 - {{ asic }} + {{ asic_name }} Broadcom-Trident2 {% endfor %} +{% endif %} {% for asic in fabric_info %} {{ asic['asicname'] }} diff --git a/ansible/templates/minigraph_template.j2 b/ansible/templates/minigraph_template.j2 index 5ea07ece63..4af4afc80a 100644 --- a/ansible/templates/minigraph_template.j2 +++ b/ansible/templates/minigraph_template.j2 @@ -1,6 +1,14 @@ +{% if 'cable' not in topo %} {% set vms=vm_topo_config['vm'].keys() | sort %} +{% endif %} +{% if 'cable' in topo %} +{% set vms_number = 0 %} +{% set enable_data_plane_acl = false %} +{% set neighbor_eosvm_mgmt = {} %} +{% else %} {% set vms_number = vms | length %} +{% endif %} {% if 'loopback' in vm_topo_config['DUT'] %} {% if card_type is not defined or card_type != 'supervisor' %} {% set lp_ipv4 = vm_topo_config['DUT']['loopback']['ipv4'][dut_index|int] %} diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index f208f1a2e0..754e71caf1 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -308,7 +308,9 @@ function renumber_topo read_file ${testbed_name} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e duts_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$vm_set_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" \ + -l "$server" -e testbed_name="$testbed_name" -e duts_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" \ + -e topo="$topo" -e vm_set_name="$vm_set_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6" $@ ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ @@ -326,7 +328,9 @@ function restart_ptf echo "Restart ptf ptf_${vm_set_name} for testbed '${testbed_name}'" - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e duts_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$vm_set_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" \ + -l "$server" -e testbed_name="$testbed_name" -e duts_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" \ + -e topo="$topo" -e vm_set_name="$vm_set_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6" $@ echo Done } diff --git a/ansible/testbed.yaml b/ansible/testbed.yaml index c6278a07ec..46ee0c1edf 100644 --- a/ansible/testbed.yaml +++ b/ansible/testbed.yaml @@ -169,3 +169,19 @@ dut: - vlab-01 comment: Test ptf ANVL SONIC VM + +- conf-name: vms-chassis-packet-dut + group-name: vms-dummy-dut + topo: t2 + ptf_image_name: docker-ptf + ptf: ptf-unknown + ptf_ip: 1.1.1.1/23 + ptf_ipv6: + server: dummy_1 + vm_base: DUMMY0001 + dut: + - lab-msft-lc0-1 + - lab-msft-lc1-1 + - lab-msft-lc2-1 + - lab-msft-sup-1 + comment: Chasiss Testbed diff --git a/ansible/testbed_cleanup.yml b/ansible/testbed_cleanup.yml index af79a5f318..501fd215af 100644 --- a/ansible/testbed_cleanup.yml +++ b/ansible/testbed_cleanup.yml @@ -4,6 +4,19 @@ - hosts: servers:&vm_host gather_facts: no tasks: + + - name: Get absolute path of {{ root_path }} + command: "realpath {{ root_path }}" + register: real_root_path + + - name: Set variable abs_root_path + set_fact: + abs_root_path: "{{ real_root_path.stdout }}" + + - name: Install cleanup script + template: src=roles/vm_set/templates/cleanup.sh.j2 + dest={{ abs_root_path }}/cleanup.sh + - name: run apt update and upgrade include_tasks: update_reboot.yml @@ -13,5 +26,5 @@ with_items: '{{ range(0,3)|list }}' - name: run cleanup script - shell: bash /home/azure/veos-vm/cleanup.sh + shell: bash {{ abs_root_path }}/cleanup.sh become: yes diff --git a/ansible/testbed_renumber_vm_topology.yml b/ansible/testbed_renumber_vm_topology.yml index e11aad8049..43c9271898 100644 --- a/ansible/testbed_renumber_vm_topology.yml +++ b/ansible/testbed_renumber_vm_topology.yml @@ -22,6 +22,10 @@ fail: msg="Please use -l server_X to limit this playbook to one host" when: "{{ play_hosts|length }} != 1" + - name: Check that variable testbed_name is defined + fail: msg="Define testbed_name variable with -e testbed_name=something" + when: testbed_name is not defined + - name: Check that variable vm_set_name is defined fail: msg="Define vm_set_name variable with -e vm_set_name=something" when: vm_set_name is not defined diff --git a/ansible/upgrade_sonic.yml b/ansible/upgrade_sonic.yml index fb7941976b..9c4eedfc7f 100644 --- a/ansible/upgrade_sonic.yml +++ b/ansible/upgrade_sonic.yml @@ -97,6 +97,10 @@ - name: Remove some old sonic image(s) and install new image reduce_and_add_sonic_images: become: true + register: result + retries: 5 + delay: 10 + until: result is not failed args: disk_used_pcent: '{{disk_used_pcent}}' new_image_url: '{{ image_url }}' diff --git a/ansible/vars/topo_cable-test.yml b/ansible/vars/topo_cable-test.yml index 3b3c67d7d7..a662d0f3c6 100644 --- a/ansible/vars/topo_cable-test.yml +++ b/ansible/vars/topo_cable-test.yml @@ -3,85 +3,85 @@ topology: host_interfaces: - 0.0@0,1.0@0 - 0.1@1,1.1@1 - - 0.2@2,1.2@2 - - 0.3@3,1.3@3 - - 0.4@4,1.4@4 - - 0.5@5,1.5@5 - - 0.6@6,1.6@6 - - 0.7@7,1.7@7 - - 0.8@8,1.8@8 - - 0.9@9,1.9@9 - - 0.10@10,1.10@10 - - 0.11@11,1.11@11 - - 0.12@12,1.12@12 - - 0.13@13,1.13@13 - - 0.14@14,1.14@14 - - 0.15@15,1.15@15 - - 0.16@16,1.16@16 - - 0.17@17,1.17@17 - - 0.18@18,1.18@18 - - 0.19@19,1.19@19 - - 0.20@20,1.20@20 - - 0.21@21,1.21@21 - - 0.22@22,1.22@22 - - 0.23@23,1.23@23 - - 0.24@24,1.24@24 - - 0.25@25,1.25@25 - - 0.26@26,1.26@26 - - 0.27@27,1.27@27 - - 0.28@28,1.28@28 - - 0.29@29,1.29@29 - - 0.30@30,1.30@30 - - 0.31@31,1.31@31 - - 0.32@32,1.32@32 - - 0.33@33,1.33@33 - - 0.34@34,1.34@34 - - 0.35@35,1.35@35 - - 0.36@36,1.36@36 - - 0.37@37,1.37@37 - - 0.38@38,1.38@38 - - 0.39@39,1.39@39 - - 0.40@40,1.40@40 - - 0.41@41,1.41@41 - - 0.42@42,1.42@42 - - 0.43@43,1.43@43 - - 0.44@44,1.44@44 - - 0.45@45,1.45@45 - - 0.46@46,1.46@46 - - 0.47@47,1.47@47 - - 0.48@48,1.48@48 - - 0.49@49,1.49@49 - - 0.50@50,1.50@50 - - 0.51@51,1.51@51 - - 0.52@52,1.52@52 - - 0.53@53,1.53@53 - - 0.54@54,1.54@54 - - 0.55@55,1.55@55 + - 0.2@2,1.2@3 + - 0.3@4,1.3@4 + - 0.4@5,1.4@5 + - 0.5@6,1.5@6 + - 0.6@7,1.6@8 + - 0.7@9,1.7@9 + - 0.8@10,1.8@10 + - 0.9@11,1.9@11 + - 0.10@12,1.10@13 + - 0.11@14,1.11@14 + - 0.12@15,1.12@15 + - 0.13@16,1.13@16 + - 0.14@17,1.14@18 + - 0.15@19,1.15@19 + - 0.16@20,1.16@20 + - 0.17@21,1.17@21 + - 0.18@22,1.18@23 + - 0.19@24,1.19@24 + - 0.20@25,1.20@25 + - 0.21@26,1.21@26 + - 0.22@27,1.22@28 + - 0.23@29,1.23@29 + - 0.24@30,1.24@30 + - 0.25@31,1.25@31 + - 0.26@32,1.26@33 + - 0.27@34,1.27@34 + - 0.28@35,1.28@35 + - 0.29@36,1.29@36 + - 0.30@37,1.30@38 + - 0.31@39,1.31@39 + - 0.32@40,1.32@40 + - 0.33@41,1.33@41 + - 0.34@42,1.34@43 + - 0.35@44,1.35@44 + - 0.36@45,1.36@45 + - 0.37@46,1.37@46 + - 0.38@47,1.38@48 + - 0.39@49,1.39@49 + - 0.40@50,1.40@50 + - 0.41@51,1.41@51 + - 0.42@52,1.42@53 + - 0.43@54,1.43@54 + - 0.44@55,1.44@55 + - 0.45@56,1.45@56 + - 0.46@57,1.46@58 + - 0.47@59,1.47@59 + - 0.48@60,1.48@60 + - 0.49@61,1.49@61 + - 0.50@62,1.50@63 + - 0.51@64,1.51@64 + - 0.52@65,1.52@65 + - 0.53@66,1.53@66 + - 0.54@67,1.54@68 + - 0.55@69,1.55@69 disabled_host_interfaces: - 0.1@1,1.1@1 - - 0.3@3,1.3@3 - - 0.5@5,1.5@5 - - 0.7@7,1.7@7 - - 0.9@9,1.9@9 - - 0.11@11,1.11@11 - - 0.17@17,1.17@17 - - 0.19@19,1.19@19 - - 0.21@21,1.21@21 - - 0.23@23,1.23@23 - - 0.25@25,1.25@25 - - 0.27@27,1.27@27 - - 0.29@29,1.29@29 - - 0.31@31,1.31@31 - - 0.33@33,1.33@33 - - 0.35@35,1.35@35 - - 0.37@37,1.37@37 - - 0.39@39,1.39@39 - - 0.45@45,1.45@45 - - 0.47@47,1.47@47 - - 0.49@49,1.49@49 - - 0.51@51,1.51@51 - - 0.53@53,1.53@53 - - 0.55@55,1.55@55 + - 0.3@4,1.3@4 + - 0.5@6,1.5@6 + - 0.7@9,1.7@9 + - 0.9@11,1.9@11 + - 0.11@14,1.11@14 + - 0.17@21,1.17@21 + - 0.19@24,1.19@24 + - 0.21@26,1.21@26 + - 0.23@29,1.23@29 + - 0.25@31,1.25@31 + - 0.27@34,1.27@34 + - 0.29@36,1.29@36 + - 0.31@39,1.31@39 + - 0.33@41,1.33@41 + - 0.35@45,1.35@45 + - 0.37@46,1.37@46 + - 0.39@49,1.39@49 + - 0.45@56,1.45@56 + - 0.47@59,1.47@59 + - 0.49@61,1.49@61 + - 0.51@64,1.51@64 + - 0.53@66,1.53@66 + - 0.55@69,1.55@69 DUT: loopback: ipv4: diff --git a/ansible/vars/topo_msft-LC-48H-O.yml b/ansible/vars/topo_msft-LC-48H-O.yml new file mode 100644 index 0000000000..33077896a3 --- /dev/null +++ b/ansible/vars/topo_msft-LC-48H-O.yml @@ -0,0 +1,134 @@ +slot1: + ASIC0: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth2250-ASIC0 + - Eth2252-ASIC0 + - Eth2254-ASIC0 + ASIC1: + asic_intfs: + - Eth2064-ASIC0 + - Eth2246-ASIC0 + - Eth2248-ASIC0 + - Eth2262-ASIC0 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.1.1/24 + - 2603:10e2:400:1::1/64 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.2.1/24 + - 2603:10e2:400:2::1/64 + ASIC1: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth2504-ASIC1 + - Eth2508-ASIC1 + - Eth2510-ASIC1 + ASIC1: + asic_intfs: + - Eth2320-ASIC1 + - Eth2502-ASIC1 + - Eth2506-ASIC1 + - Eth2518-ASIC1 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.1.2/24 + - 2603:10e2:400:1::2/64 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.2.2/24 + - 2603:10e2:400:2::2/64 +slot2: + ASIC0: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth2250-ASIC0 + - Eth2252-ASIC0 + - Eth2254-ASIC0 + ASIC1: + asic_intfs: + - Eth2064-ASIC0 + - Eth2246-ASIC0 + - Eth2248-ASIC0 + - Eth2262-ASIC0 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.1.3/24 + - 2603:10e2:400:1::3/64 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.2.3/24 + - 2603:10e2:400:2::3/64 + ASIC1: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth2504-ASIC1 + - Eth2508-ASIC1 + - Eth2510-ASIC1 + ASIC1: + asic_intfs: + - Eth2320-ASIC1 + - Eth2502-ASIC1 + - Eth2506-ASIC1 + - Eth2518-ASIC1 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.1.4/24 + - 2603:10e2:400:1::4/64 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.0.2.4/24 + - 2603:10e2:400:2::4/64 diff --git a/ansible/vars/topo_msft-RP-O.yml b/ansible/vars/topo_msft-RP-O.yml new file mode 100644 index 0000000000..a065e677f6 --- /dev/null +++ b/ansible/vars/topo_msft-RP-O.yml @@ -0,0 +1,57 @@ +slot0: + ASIC0: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0194-ASIC0 + - Eth0170-ASIC0 + - Eth0162-ASIC0 + ASIC1: + asic_intfs: + - Eth0164-ASIC0 + - Eth0172-ASIC0 + - Eth0192-ASIC0 + ASIC2: + asic_intfs: + - Eth0126-ASIC0 + - Eth0144-ASIC0 + - Eth0152-ASIC0 + ASIC3: + asic_intfs: + - Eth0128-ASIC0 + - Eth0146-ASIC0 + - Eth0154-ASIC0 + configuration_properties: + common: + asic_type: BackEnd + ASIC1: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0438-ASIC1 + - Eth0454-ASIC1 + - Eth0462-ASIC1 + - Eth0464-ASIC1 + ASIC1: + asic_intfs: + - Eth0440-ASIC1 + - Eth0456-ASIC1 + - Eth0466-ASIC1 + - Eth0468-ASIC1 + ASIC2: + asic_intfs: + - Eth0424-ASIC1 + - Eth0432-ASIC1 + - Eth0444-ASIC1 + - Eth0448-ASIC1 + ASIC3: + asic_intfs: + - Eth0422-ASIC1 + - Eth0430-ASIC1 + - Eth0446-ASIC1 + - Eth0450-ASIC1 + configuration_properties: + common: + asic_type: BackEnd diff --git a/ansible/vars/topo_msft_four_asic_vs.yml b/ansible/vars/topo_msft_four_asic_vs.yml index ee5bc27cb2..bf68d65e4c 100644 --- a/ansible/vars/topo_msft_four_asic_vs.yml +++ b/ansible/vars/topo_msft_four_asic_vs.yml @@ -1,197 +1,185 @@ -ASIC0: - topology: - NEIGH_ASIC: - ASIC2: - asic_intfs: - - Eth4-ASIC0 - - Eth5-ASIC0 - ASIC3: - asic_intfs: - - Eth6-ASIC0 - - Eth7-ASIC0 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.0/32 - - 2603:10e2:400::/128 - configuration: - ASIC2: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.1 - - 2603:10e2:400:1::2 - interfaces: - Eth0-ASIC2: - lacp: 1 - Eth1-ASIC2: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.0/31 - ipv6: 2603:10e2:400:1::1/126 - ASIC3: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.3 - - 2603:10e2:400:1::6 - interfaces: - Eth0-ASIC3: - lacp: 2 - Eth1-ASIC3: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.2/31 - ipv6: 2603:10e2:400:1::5/126 -ASIC1: - topology: - NEIGH_ASIC: - ASIC2: - asic_intfs: - - Eth4-ASIC1 - - Eth5-ASIC1 - ASIC3: - asic_intfs: - - Eth6-ASIC1 - - Eth7-ASIC1 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.1/32 - - 2603:10e2:400::1/128 - configuration: - ASIC2: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.5 - - 2603:10e2:400:1::a - interfaces: - Eth2-ASIC2: - lacp: 1 - Eth3-ASIC2: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.4/31 - ipv6: 2603:10e2:400:1::9/126 - ASIC3: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.7 - - 2603:10e2:400:1::e - interfaces: - Eth2-ASIC3: - lacp: 2 - Eth3-ASIC3: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.6/31 - ipv6: 2603:10e2:400:1::d/126 -ASIC2: - topology: - NEIGH_ASIC: - ASIC0: - asic_intfs: - - Eth0-ASIC2 - - Eth1-ASIC2 - ASIC1: - asic_intfs: - - Eth2-ASIC2 - - Eth3-ASIC2 - configuration_properties: - common: - dut_asn: 65100 - asic_type: BackEnd - Loopback4096: - - 8.0.0.2/32 - - 2603:10e2:400::2/128 - configuration: +slot0: ASIC0: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.0 - - 2603:10e2:400:1::1 - interfaces: - Eth4-ASIC0: - lacp: 1 - Eth5-ASIC0: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.1/31 - ipv6: 2603:10e2:400:1::2/126 + topology: + NEIGH_ASIC: + ASIC2: + asic_intfs: + - Eth4-ASIC0 + - Eth5-ASIC0 + ASIC3: + asic_intfs: + - Eth6-ASIC0 + - Eth7-ASIC0 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC2: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.1 + - 2603:10e2:400:1::2 + interfaces: + Eth0-ASIC2: + lacp: 1 + Eth1-ASIC2: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.0/31 + ipv6: 2603:10e2:400:1::1/126 + ASIC3: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.3 + - 2603:10e2:400:1::6 + interfaces: + Eth0-ASIC3: + lacp: 2 + Eth1-ASIC3: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.2/31 + ipv6: 2603:10e2:400:1::5/126 ASIC1: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.4 - - 2603:10e2:400:1::9 - interfaces: - Eth4-ASIC1: - lacp: 2 - Eth5-ASIC1: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.5/31 - ipv6: 2603:10e2:400:1::a/126 -ASIC3: - topology: - NEIGH_ASIC: - ASIC0: - asic_intfs: - - Eth0-ASIC3 - - Eth1-ASIC3 - ASIC1: - asic_intfs: - - Eth2-ASIC3 - - Eth3-ASIC3 - configuration_properties: - common: - dut_asn: 65100 - asic_type: BackEnd - Loopback4096: - - 8.0.0.3/32 - - 2603:10e2:400::3/128 - - configuration: - ASIC0: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.2 - - 2603:10e2:400:1::5 - interfaces: - Eth6-ASIC0: - lacp: 1 - Eth7-ASIC0: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.3/31 - ipv6: 2603:10e2:400:1::6/126 - ASIC1: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.6 - - 2603:10e2:400:1::d - interfaces: - Eth6-ASIC1: - lacp: 2 - Eth7-ASIC1: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.7/31 - ipv6: 2603:10e2:400:1::e/126 + topology: + NEIGH_ASIC: + ASIC2: + asic_intfs: + - Eth4-ASIC1 + - Eth5-ASIC1 + ASIC3: + asic_intfs: + - Eth6-ASIC1 + - Eth7-ASIC1 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC2: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.5 + - 2603:10e2:400:1::a + interfaces: + Eth2-ASIC2: + lacp: 1 + Eth3-ASIC2: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.4/31 + ipv6: 2603:10e2:400:1::9/126 + ASIC3: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.7 + - 2603:10e2:400:1::e + interfaces: + Eth2-ASIC3: + lacp: 2 + Eth3-ASIC3: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.6/31 + ipv6: 2603:10e2:400:1::d/126 + ASIC2: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0-ASIC2 + - Eth1-ASIC2 + ASIC1: + asic_intfs: + - Eth2-ASIC2 + - Eth3-ASIC2 + configuration_properties: + common: + dut_asn: 65100 + asic_type: BackEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.0 + - 2603:10e2:400:1::1 + interfaces: + Eth4-ASIC0: + lacp: 1 + Eth5-ASIC0: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.1/31 + ipv6: 2603:10e2:400:1::2/126 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.4 + - 2603:10e2:400:1::9 + interfaces: + Eth4-ASIC1: + lacp: 2 + Eth5-ASIC1: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.5/31 + ipv6: 2603:10e2:400:1::a/126 + ASIC3: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0-ASIC3 + - Eth1-ASIC3 + ASIC1: + asic_intfs: + - Eth2-ASIC3 + - Eth3-ASIC3 + configuration_properties: + common: + dut_asn: 65100 + asic_type: BackEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.2 + - 2603:10e2:400:1::5 + interfaces: + Eth6-ASIC0: + lacp: 1 + Eth7-ASIC0: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.3/31 + ipv6: 2603:10e2:400:1::6/126 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.6 + - 2603:10e2:400:1::d + interfaces: + Eth6-ASIC1: + lacp: 2 + Eth7-ASIC1: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.7/31 + ipv6: 2603:10e2:400:1::e/126 diff --git a/ansible/vars/topo_msft_multi_asic_vs.yml b/ansible/vars/topo_msft_multi_asic_vs.yml index 7c2fb0c7ad..85ca47e001 100644 --- a/ansible/vars/topo_msft_multi_asic_vs.yml +++ b/ansible/vars/topo_msft_multi_asic_vs.yml @@ -1,659 +1,641 @@ -ASIC0: - topology: - NEIGH_ASIC: - ASIC4: - asic_intfs: - - Eth16-ASIC0 - - Eth17-ASIC0 - - Eth18-ASIC0 - - Eth19-ASIC0 - - Eth20-ASIC0 - - Eth21-ASIC0 - - Eth22-ASIC0 - - Eth23-ASIC0 - ASIC5: - asic_intfs: - - Eth24-ASIC0 - - Eth25-ASIC0 - - Eth26-ASIC0 - - Eth27-ASIC0 - - Eth28-ASIC0 - - Eth29-ASIC0 - - Eth30-ASIC0 - - Eth31-ASIC0 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.0/32 - - 2603:10e2:400::/128 - configuration: - ASIC4: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.1 - - 2603:10e2:400:1::2 - interfaces: - Eth0-ASIC4: - lacp: 1 - Eth1-ASIC4: - lacp: 1 - Eth2-ASIC4: - lacp: 1 - Eth3-ASIC4: - lacp: 1 - Eth4-ASIC4: - lacp: 1 - Eth5-ASIC4: - lacp: 1 - Eth6-ASIC4: - lacp: 1 - Eth7-ASIC4: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.0/31 - ipv6: 2603:10e2:400:1::1/126 - ASIC5: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.3 - - 2603:10e2:400:1::6 - interfaces: - Eth0-ASIC5: - lacp: 2 - Eth1-ASIC5: - lacp: 2 - Eth2-ASIC5: - lacp: 2 - Eth3-ASIC5: - lacp: 2 - Eth4-ASIC5: - lacp: 2 - Eth5-ASIC5: - lacp: 2 - Eth6-ASIC5: - lacp: 2 - Eth7-ASIC5: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.2/31 - ipv6: 2603:10e2:400:1::5/126 -ASIC1: - topology: - NEIGH_ASIC: - ASIC4: - asic_intfs: - - Eth16-ASIC1 - - Eth17-ASIC1 - - Eth18-ASIC1 - - Eth19-ASIC1 - - Eth20-ASIC1 - - Eth21-ASIC1 - - Eth22-ASIC1 - - Eth23-ASIC1 - ASIC5: - asic_intfs: - - Eth24-ASIC1 - - Eth25-ASIC1 - - Eth26-ASIC1 - - Eth27-ASIC1 - - Eth28-ASIC1 - - Eth29-ASIC1 - - Eth30-ASIC1 - - Eth31-ASIC1 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.1/32 - - 2603:10e2:400::1/128 - configuration: - ASIC4: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.5 - - 2603:10e2:400:1::a - interfaces: - Eth8-ASIC4: - lacp: 1 - Eth9-ASIC4: - lacp: 1 - Eth10-ASIC4: - lacp: 1 - Eth11-ASIC4: - lacp: 1 - Eth12-ASIC4: - lacp: 1 - Eth13-ASIC4: - lacp: 1 - Eth14-ASIC4: - lacp: 1 - Eth15-ASIC4: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.4/31 - ipv6: 2603:10e2:400:1::9/126 - ASIC5: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.7 - - 2603:10e2:400:1::e - interfaces: - Eth8-ASIC5: - lacp: 2 - Eth9-ASIC5: - lacp: 2 - Eth10-ASIC5: - lacp: 2 - Eth11-ASIC5: - lacp: 2 - Eth12-ASIC5: - lacp: 2 - Eth13-ASIC5: - lacp: 2 - Eth14-ASIC5: - lacp: 2 - Eth15-ASIC5: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.6/31 - ipv6: 2603:10e2:400:1::d/126 -ASIC2: - topology: - NEIGH_ASIC: - ASIC4: - asic_intfs: - - Eth16-ASIC2 - - Eth17-ASIC2 - - Eth18-ASIC2 - - Eth19-ASIC2 - - Eth20-ASIC2 - - Eth21-ASIC2 - - Eth22-ASIC2 - - Eth23-ASIC2 - ASIC5: - asic_intfs: - - Eth24-ASIC2 - - Eth25-ASIC2 - - Eth26-ASIC2 - - Eth27-ASIC2 - - Eth28-ASIC2 - - Eth29-ASIC2 - - Eth30-ASIC2 - - Eth31-ASIC2 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.2/32 - - 2603:10e2:400::2/128 - configuration: - ASIC4: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.9 - - 2603:10e2:400:1::11 - interfaces: - Eth24-ASIC4: - lacp: 1 - Eth25-ASIC4: - lacp: 1 - Eth26-ASIC4: - lacp: 1 - Eth27-ASIC4: - lacp: 1 - Eth28-ASIC4: - lacp: 1 - Eth29-ASIC4: - lacp: 1 - Eth30-ASIC4: - lacp: 1 - Eth31-ASIC4: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.8/31 - ipv6: 2603:10e2:400:1::12/126 - ASIC5: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.11 - - 2603:10e2:400:1::16 - interfaces: - Eth24-ASIC5: - lacp: 2 - Eth25-ASIC5: - lacp: 2 - Eth26-ASIC5: - lacp: 2 - Eth27-ASIC5: - lacp: 2 - Eth28-ASIC5: - lacp: 2 - Eth29-ASIC5: - lacp: 2 - Eth30-ASIC5: - lacp: 2 - Eth31-ASIC5: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.10/31 - ipv6: 2603:10e2:400:1::15/126 -ASIC3: - topology: - NEIGH_ASIC: - ASIC4: - asic_intfs: - - Eth16-ASIC3 - - Eth17-ASIC3 - - Eth18-ASIC3 - - Eth19-ASIC3 - - Eth20-ASIC3 - - Eth21-ASIC3 - - Eth22-ASIC3 - - Eth23-ASIC3 - ASIC5: - asic_intfs: - - Eth24-ASIC3 - - Eth25-ASIC3 - - Eth26-ASIC3 - - Eth27-ASIC3 - - Eth28-ASIC3 - - Eth29-ASIC3 - - Eth30-ASIC3 - - Eth31-ASIC3 - configuration_properties: - common: - dut_asn: 65100 - asic_type: FrontEnd - Loopback4096: - - 8.0.0.3/32 - - 2603:10e2:400::3/128 - configuration: - ASIC4: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.13 - - 2603:10e2:400:1::1a - interfaces: - Eth24-ASIC4: - lacp: 1 - Eth25-ASIC4: - lacp: 1 - Eth26-ASIC4: - lacp: 1 - Eth27-ASIC4: - lacp: 1 - Eth28-ASIC4: - lacp: 1 - Eth29-ASIC4: - lacp: 1 - Eth30-ASIC4: - lacp: 1 - Eth31-ASIC4: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.12/31 - ipv6: 2603:10e2:400:1::19/126 - ASIC5: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.15 - - 2603:10e2:400:1::1e - interfaces: - Eth24-ASIC5: - lacp: 2 - Eth25-ASIC5: - lacp: 2 - Eth26-ASIC5: - lacp: 2 - Eth27-ASIC5: - lacp: 2 - Eth28-ASIC5: - lacp: 2 - Eth29-ASIC5: - lacp: 2 - Eth30-ASIC5: - lacp: 2 - Eth31-ASIC5: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.14/31 - ipv6: 2603:10e2:400:1::1d/126 -ASIC4: - topology: - NEIGH_ASIC: - ASIC0: - asic_intfs: - - Eth0-ASIC4 - - Eth1-ASIC4 - - Eth2-ASIC4 - - Eth3-ASIC4 - - Eth4-ASIC4 - - Eth5-ASIC4 - - Eth6-ASIC4 - - Eth7-ASIC4 - ASIC1: - asic_intfs: - - Eth8-ASIC4 - - Eth9-ASIC4 - - Eth10-ASIC4 - - Eth11-ASIC4 - - Eth12-ASIC4 - - Eth13-ASIC4 - - Eth14-ASIC4 - - Eth15-ASIC4 - ASIC2: - asic_intfs: - - Eth16-ASIC4 - - Eth17-ASIC4 - - Eth18-ASIC4 - - Eth19-ASIC4 - - Eth20-ASIC4 - - Eth21-ASIC4 - - Eth22-ASIC4 - - Eth23-ASIC4 - ASIC3: - asic_intfs: - - Eth24-ASIC4 - - Eth25-ASIC4 - - Eth26-ASIC4 - - Eth27-ASIC4 - - Eth28-ASIC4 - - Eth29-ASIC4 - - Eth30-ASIC4 - - Eth31-ASIC4 - configuration_properties: - common: - dut_asn: 65100 - asic_type: BackEnd - Loopback4096: - - 8.0.0.4/32 - - 2603:10e2:400::4/128 - configuration: +slot0: ASIC0: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.0 - - 2603:10e2:400:1::1 - interfaces: - Eth16-ASIC0: - lacp: 1 - Eth17-ASIC0: - lacp: 1 - Eth18-ASIC0: - lacp: 1 - Eth19-ASIC0: - lacp: 1 - Eth20-ASIC0: - lacp: 1 - Eth21-ASIC0: - lacp: 1 - Eth22-ASIC0: - lacp: 1 - Eth23-ASIC0: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.1/31 - ipv6: 2603:10e2:400:1::2/126 + topology: + NEIGH_ASIC: + ASIC4: + asic_intfs: + - Eth16-ASIC0 + - Eth17-ASIC0 + - Eth18-ASIC0 + - Eth19-ASIC0 + - Eth20-ASIC0 + - Eth21-ASIC0 + - Eth22-ASIC0 + - Eth23-ASIC0 + ASIC5: + asic_intfs: + - Eth24-ASIC0 + - Eth25-ASIC0 + - Eth26-ASIC0 + - Eth27-ASIC0 + - Eth28-ASIC0 + - Eth29-ASIC0 + - Eth30-ASIC0 + - Eth31-ASIC0 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC4: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.1 + - 2603:10e2:400:1::2 + interfaces: + Eth0-ASIC4: + lacp: 1 + Eth1-ASIC4: + lacp: 1 + Eth2-ASIC4: + lacp: 1 + Eth3-ASIC4: + lacp: 1 + Eth4-ASIC4: + lacp: 1 + Eth5-ASIC4: + lacp: 1 + Eth6-ASIC4: + lacp: 1 + Eth7-ASIC4: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.0/31 + ipv6: 2603:10e2:400:1::1/126 + ASIC5: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.3 + - 2603:10e2:400:1::6 + interfaces: + Eth0-ASIC5: + lacp: 2 + Eth1-ASIC5: + lacp: 2 + Eth2-ASIC5: + lacp: 2 + Eth3-ASIC5: + lacp: 2 + Eth4-ASIC5: + lacp: 2 + Eth5-ASIC5: + lacp: 2 + Eth6-ASIC5: + lacp: 2 + Eth7-ASIC5: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.2/31 + ipv6: 2603:10e2:400:1::5/126 ASIC1: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.4 - - 2603:10e2:400:1::9 - interfaces: - Eth16-ASIC1: - lacp: 2 - Eth17-ASIC1: - lacp: 2 - Eth18-ASIC1: - lacp: 2 - Eth19-ASIC1: - lacp: 2 - Eth20-ASIC1: - lacp: 2 - Eth21-ASIC1: - lacp: 2 - Eth22-ASIC1: - lacp: 2 - Eth23-ASIC1: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.5/31 - ipv6: 2603:10e2:400:1::a/126 + topology: + NEIGH_ASIC: + ASIC4: + asic_intfs: + - Eth16-ASIC1 + - Eth17-ASIC1 + - Eth18-ASIC1 + - Eth19-ASIC1 + - Eth20-ASIC1 + - Eth21-ASIC1 + - Eth22-ASIC1 + - Eth23-ASIC1 + ASIC5: + asic_intfs: + - Eth24-ASIC1 + - Eth25-ASIC1 + - Eth26-ASIC1 + - Eth27-ASIC1 + - Eth28-ASIC1 + - Eth29-ASIC1 + - Eth30-ASIC1 + - Eth31-ASIC1 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC4: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.5 + - 2603:10e2:400:1::a + interfaces: + Eth8-ASIC4: + lacp: 1 + Eth9-ASIC4: + lacp: 1 + Eth10-ASIC4: + lacp: 1 + Eth11-ASIC4: + lacp: 1 + Eth12-ASIC4: + lacp: 1 + Eth13-ASIC4: + lacp: 1 + Eth14-ASIC4: + lacp: 1 + Eth15-ASIC4: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.4/31 + ipv6: 2603:10e2:400:1::9/126 + ASIC5: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.7 + - 2603:10e2:400:1::e + interfaces: + Eth8-ASIC5: + lacp: 2 + Eth9-ASIC5: + lacp: 2 + Eth10-ASIC5: + lacp: 2 + Eth11-ASIC5: + lacp: 2 + Eth12-ASIC5: + lacp: 2 + Eth13-ASIC5: + lacp: 2 + Eth14-ASIC5: + lacp: 2 + Eth15-ASIC5: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.6/31 + ipv6: 2603:10e2:400:1::d/126 ASIC2: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.8 - - 2603:10e2:400:1::11 - interfaces: - Eth16-ASIC2: - lacp: 3 - Eth17-ASIC2: - lacp: 3 - Eth18-ASIC2: - lacp: 3 - Eth19-ASIC2: - lacp: 3 - Eth20-ASIC2: - lacp: 3 - Eth21-ASIC2: - lacp: 3 - Eth22-ASIC2: - lacp: 3 - Eth23-ASIC2: - lacp: 3 - Port-Channel3: - ipv4: 10.1.0.9/31 - ipv6: 2603:10e2:400:1::12/126 + topology: + NEIGH_ASIC: + ASIC4: + asic_intfs: + - Eth16-ASIC2 + - Eth17-ASIC2 + - Eth18-ASIC2 + - Eth19-ASIC2 + - Eth20-ASIC2 + - Eth21-ASIC2 + - Eth22-ASIC2 + - Eth23-ASIC2 + ASIC5: + asic_intfs: + - Eth24-ASIC2 + - Eth25-ASIC2 + - Eth26-ASIC2 + - Eth27-ASIC2 + - Eth28-ASIC2 + - Eth29-ASIC2 + - Eth30-ASIC2 + - Eth31-ASIC2 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC4: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.9 + - 2603:10e2:400:1::11 + interfaces: + Eth24-ASIC4: + lacp: 1 + Eth25-ASIC4: + lacp: 1 + Eth26-ASIC4: + lacp: 1 + Eth27-ASIC4: + lacp: 1 + Eth28-ASIC4: + lacp: 1 + Eth29-ASIC4: + lacp: 1 + Eth30-ASIC4: + lacp: 1 + Eth31-ASIC4: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.8/31 + ipv6: 2603:10e2:400:1::12/126 + ASIC5: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.11 + - 2603:10e2:400:1::16 + interfaces: + Eth24-ASIC5: + lacp: 2 + Eth25-ASIC5: + lacp: 2 + Eth26-ASIC5: + lacp: 2 + Eth27-ASIC5: + lacp: 2 + Eth28-ASIC5: + lacp: 2 + Eth29-ASIC5: + lacp: 2 + Eth30-ASIC5: + lacp: 2 + Eth31-ASIC5: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.10/31 + ipv6: 2603:10e2:400:1::15/126 ASIC3: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.12 - - 2603:10e2:400:1::19 - interfaces: - Eth16-ASIC3: - lacp: 4 - Eth17-ASIC3: - lacp: 4 - Eth18-ASIC3: - lacp: 4 - Eth19-ASIC3: - lacp: 4 - Eth20-ASIC3: - lacp: 4 - Eth21-ASIC3: - lacp: 4 - Eth22-ASIC3: - lacp: 4 - Eth23-ASIC3: - lacp: 4 - Port-Channel4: - ipv4: 10.1.0.13/31 - ipv6: 2603:10e2:400:1::1a/126 -ASIC5: - topology: - NEIGH_ASIC: - ASIC0: - asic_intfs: - - Eth0-ASIC5 - - Eth1-ASIC5 - - Eth2-ASIC5 - - Eth3-ASIC5 - - Eth4-ASIC5 - - Eth5-ASIC5 - - Eth6-ASIC5 - - Eth7-ASIC5 - ASIC1: - asic_intfs: - - Eth8-ASIC5 - - Eth9-ASIC5 - - Eth10-ASIC5 - - Eth11-ASIC5 - - Eth12-ASIC5 - - Eth13-ASIC5 - - Eth14-ASIC5 - - Eth15-ASIC5 - ASIC2: - asic_intfs: - - Eth16-ASIC5 - - Eth17-ASIC5 - - Eth18-ASIC5 - - Eth19-ASIC5 - - Eth20-ASIC5 - - Eth21-ASIC5 - - Eth22-ASIC5 - - Eth23-ASIC5 - ASIC3: - asic_intfs: - - Eth24-ASIC5 - - Eth25-ASIC5 - - Eth26-ASIC5 - - Eth27-ASIC5 - - Eth28-ASIC5 - - Eth29-ASIC5 - - Eth30-ASIC5 - - Eth31-ASIC5 - configuration_properties: - common: - dut_asn: 65100 - asic_type: BackEnd - Loopback4096: - - 8.0.0.5/32 - - 2603:10e2:400::5/128 - - configuration: - ASIC0: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.2 - - 2603:10e2:400:1::5 - interfaces: - Eth24-ASIC0: - lacp: 1 - Eth25-ASIC0: - lacp: 1 - Eth26-ASIC0: - lacp: 1 - Eth27-ASIC0: - lacp: 1 - Eth28-ASIC0: - lacp: 1 - Eth29-ASIC0: - lacp: 1 - Eth30-ASIC0: - lacp: 1 - Eth31-ASIC0: - lacp: 1 - Port-Channel1: - ipv4: 10.1.0.3/31 - ipv6: 2603:10e2:400:1::6/126 - ASIC1: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.6 - - 2603:10e2:400:1::d - interfaces: - Eth24-ASIC1: - lacp: 2 - Eth25-ASIC1: - lacp: 2 - Eth26-ASIC1: - lacp: 2 - Eth27-ASIC1: - lacp: 2 - Eth28-ASIC1: - lacp: 2 - Eth29-ASIC1: - lacp: 2 - Eth30-ASIC1: - lacp: 2 - Eth31-ASIC1: - lacp: 2 - Port-Channel2: - ipv4: 10.1.0.7/31 - ipv6: 2603:10e2:400:1::e/126 - ASIC2: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.10 - - 2603:10e2:400:1::15 - interfaces: - Eth24-ASIC2: - lacp: 3 - Eth25-ASIC2: - lacp: 3 - Eth26-ASIC2: - lacp: 3 - Eth27-ASIC2: - lacp: 3 - Eth28-ASIC2: - lacp: 3 - Eth29-ASIC2: - lacp: 3 - Eth30-ASIC2: - lacp: 3 - Eth31-ASIC2: - lacp: 3 - Port-Channel3: - ipv4: 10.1.0.11/31 - ipv6: 2603:10e2:400:1::16/126 - ASIC3: - bgp: - asn: 65100 - peers: - 65100: - - 10.1.0.14 - - 2603:10e2:400:1::1d - interfaces: - Eth24-ASIC3: - lacp: 4 - Eth25-ASIC3: - lacp: 4 - Eth26-ASIC3: - lacp: 4 - Eth27-ASIC3: - lacp: 4 - Eth28-ASIC3: - lacp: 4 - Eth29-ASIC3: - lacp: 4 - Eth30-ASIC3: - lacp: 4 - Eth31-ASIC3: - lacp: 4 - Port-Channel4: - ipv4: 10.1.0.15/31 - ipv6: 2603:10e2:400:1::1e/126 + topology: + NEIGH_ASIC: + ASIC4: + asic_intfs: + - Eth16-ASIC3 + - Eth17-ASIC3 + - Eth18-ASIC3 + - Eth19-ASIC3 + - Eth20-ASIC3 + - Eth21-ASIC3 + - Eth22-ASIC3 + - Eth23-ASIC3 + ASIC5: + asic_intfs: + - Eth24-ASIC3 + - Eth25-ASIC3 + - Eth26-ASIC3 + - Eth27-ASIC3 + - Eth28-ASIC3 + - Eth29-ASIC3 + - Eth30-ASIC3 + - Eth31-ASIC3 + configuration_properties: + common: + dut_asn: 65100 + asic_type: FrontEnd + configuration: + ASIC4: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.13 + - 2603:10e2:400:1::1a + interfaces: + Eth24-ASIC4: + lacp: 1 + Eth25-ASIC4: + lacp: 1 + Eth26-ASIC4: + lacp: 1 + Eth27-ASIC4: + lacp: 1 + Eth28-ASIC4: + lacp: 1 + Eth29-ASIC4: + lacp: 1 + Eth30-ASIC4: + lacp: 1 + Eth31-ASIC4: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.12/31 + ipv6: 2603:10e2:400:1::19/126 + ASIC5: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.15 + - 2603:10e2:400:1::1e + interfaces: + Eth24-ASIC5: + lacp: 2 + Eth25-ASIC5: + lacp: 2 + Eth26-ASIC5: + lacp: 2 + Eth27-ASIC5: + lacp: 2 + Eth28-ASIC5: + lacp: 2 + Eth29-ASIC5: + lacp: 2 + Eth30-ASIC5: + lacp: 2 + Eth31-ASIC5: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.14/31 + ipv6: 2603:10e2:400:1::1d/126 + ASIC4: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0-ASIC4 + - Eth1-ASIC4 + - Eth2-ASIC4 + - Eth3-ASIC4 + - Eth4-ASIC4 + - Eth5-ASIC4 + - Eth6-ASIC4 + - Eth7-ASIC4 + ASIC1: + asic_intfs: + - Eth8-ASIC4 + - Eth9-ASIC4 + - Eth10-ASIC4 + - Eth11-ASIC4 + - Eth12-ASIC4 + - Eth13-ASIC4 + - Eth14-ASIC4 + - Eth15-ASIC4 + ASIC2: + asic_intfs: + - Eth16-ASIC4 + - Eth17-ASIC4 + - Eth18-ASIC4 + - Eth19-ASIC4 + - Eth20-ASIC4 + - Eth21-ASIC4 + - Eth22-ASIC4 + - Eth23-ASIC4 + ASIC3: + asic_intfs: + - Eth24-ASIC4 + - Eth25-ASIC4 + - Eth26-ASIC4 + - Eth27-ASIC4 + - Eth28-ASIC4 + - Eth29-ASIC4 + - Eth30-ASIC4 + - Eth31-ASIC4 + configuration_properties: + common: + dut_asn: 65100 + asic_type: BackEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.0 + - 2603:10e2:400:1::1 + interfaces: + Eth16-ASIC0: + lacp: 1 + Eth17-ASIC0: + lacp: 1 + Eth18-ASIC0: + lacp: 1 + Eth19-ASIC0: + lacp: 1 + Eth20-ASIC0: + lacp: 1 + Eth21-ASIC0: + lacp: 1 + Eth22-ASIC0: + lacp: 1 + Eth23-ASIC0: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.1/31 + ipv6: 2603:10e2:400:1::2/126 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.4 + - 2603:10e2:400:1::9 + interfaces: + Eth16-ASIC1: + lacp: 2 + Eth17-ASIC1: + lacp: 2 + Eth18-ASIC1: + lacp: 2 + Eth19-ASIC1: + lacp: 2 + Eth20-ASIC1: + lacp: 2 + Eth21-ASIC1: + lacp: 2 + Eth22-ASIC1: + lacp: 2 + Eth23-ASIC1: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.5/31 + ipv6: 2603:10e2:400:1::a/126 + ASIC2: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.8 + - 2603:10e2:400:1::11 + interfaces: + Eth16-ASIC2: + lacp: 3 + Eth17-ASIC2: + lacp: 3 + Eth18-ASIC2: + lacp: 3 + Eth19-ASIC2: + lacp: 3 + Eth20-ASIC2: + lacp: 3 + Eth21-ASIC2: + lacp: 3 + Eth22-ASIC2: + lacp: 3 + Eth23-ASIC2: + lacp: 3 + Port-Channel3: + ipv4: 10.1.0.9/31 + ipv6: 2603:10e2:400:1::12/126 + ASIC3: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.12 + - 2603:10e2:400:1::19 + interfaces: + Eth16-ASIC3: + lacp: 4 + Eth17-ASIC3: + lacp: 4 + Eth18-ASIC3: + lacp: 4 + Eth19-ASIC3: + lacp: 4 + Eth20-ASIC3: + lacp: 4 + Eth21-ASIC3: + lacp: 4 + Eth22-ASIC3: + lacp: 4 + Eth23-ASIC3: + lacp: 4 + Port-Channel4: + ipv4: 10.1.0.13/31 + ipv6: 2603:10e2:400:1::1a/126 + ASIC5: + topology: + NEIGH_ASIC: + ASIC0: + asic_intfs: + - Eth0-ASIC5 + - Eth1-ASIC5 + - Eth2-ASIC5 + - Eth3-ASIC5 + - Eth4-ASIC5 + - Eth5-ASIC5 + - Eth6-ASIC5 + - Eth7-ASIC5 + ASIC1: + asic_intfs: + - Eth8-ASIC5 + - Eth9-ASIC5 + - Eth10-ASIC5 + - Eth11-ASIC5 + - Eth12-ASIC5 + - Eth13-ASIC5 + - Eth14-ASIC5 + - Eth15-ASIC5 + ASIC2: + asic_intfs: + - Eth16-ASIC5 + - Eth17-ASIC5 + - Eth18-ASIC5 + - Eth19-ASIC5 + - Eth20-ASIC5 + - Eth21-ASIC5 + - Eth22-ASIC5 + - Eth23-ASIC5 + ASIC3: + asic_intfs: + - Eth24-ASIC5 + - Eth25-ASIC5 + - Eth26-ASIC5 + - Eth27-ASIC5 + - Eth28-ASIC5 + - Eth29-ASIC5 + - Eth30-ASIC5 + - Eth31-ASIC5 + configuration_properties: + common: + dut_asn: 65100 + asic_type: BackEnd + configuration: + ASIC0: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.2 + - 2603:10e2:400:1::5 + interfaces: + Eth24-ASIC0: + lacp: 1 + Eth25-ASIC0: + lacp: 1 + Eth26-ASIC0: + lacp: 1 + Eth27-ASIC0: + lacp: 1 + Eth28-ASIC0: + lacp: 1 + Eth29-ASIC0: + lacp: 1 + Eth30-ASIC0: + lacp: 1 + Eth31-ASIC0: + lacp: 1 + Port-Channel1: + ipv4: 10.1.0.3/31 + ipv6: 2603:10e2:400:1::6/126 + ASIC1: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.6 + - 2603:10e2:400:1::d + interfaces: + Eth24-ASIC1: + lacp: 2 + Eth25-ASIC1: + lacp: 2 + Eth26-ASIC1: + lacp: 2 + Eth27-ASIC1: + lacp: 2 + Eth28-ASIC1: + lacp: 2 + Eth29-ASIC1: + lacp: 2 + Eth30-ASIC1: + lacp: 2 + Eth31-ASIC1: + lacp: 2 + Port-Channel2: + ipv4: 10.1.0.7/31 + ipv6: 2603:10e2:400:1::e/126 + ASIC2: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.10 + - 2603:10e2:400:1::15 + interfaces: + Eth24-ASIC2: + lacp: 3 + Eth25-ASIC2: + lacp: 3 + Eth26-ASIC2: + lacp: 3 + Eth27-ASIC2: + lacp: 3 + Eth28-ASIC2: + lacp: 3 + Eth29-ASIC2: + lacp: 3 + Eth30-ASIC2: + lacp: 3 + Eth31-ASIC2: + lacp: 3 + Port-Channel3: + ipv4: 10.1.0.11/31 + ipv6: 2603:10e2:400:1::16/126 + ASIC3: + bgp: + asn: 65100 + peers: + 65100: + - 10.1.0.14 + - 2603:10e2:400:1::1d + interfaces: + Eth24-ASIC3: + lacp: 4 + Eth25-ASIC3: + lacp: 4 + Eth26-ASIC3: + lacp: 4 + Eth27-ASIC3: + lacp: 4 + Eth28-ASIC3: + lacp: 4 + Eth29-ASIC3: + lacp: 4 + Eth30-ASIC3: + lacp: 4 + Eth31-ASIC3: + lacp: 4 + Port-Channel4: + ipv4: 10.1.0.15/31 + ipv6: 2603:10e2:400:1::1e/126 diff --git a/ansible/vars/topo_t2-vs.yml b/ansible/vars/topo_t2-vs.yml index acdc630f19..f1c458d8c2 100644 --- a/ansible/vars/topo_t2-vs.yml +++ b/ansible/vars/topo_t2-vs.yml @@ -54,7 +54,7 @@ configuration_properties: max_tor_subnet_number: 32 tor_subnet_size: 128 dut_asn: 65100 - dut_type: Spine + dut_type: SpineRouter nhipv4: 10.10.246.254 nhipv6: FC0A::FF core: diff --git a/ansible/vars/topo_t2.yml b/ansible/vars/topo_t2.yml index af7291dd85..08dca2d0b4 100644 --- a/ansible/vars/topo_t2.yml +++ b/ansible/vars/topo_t2.yml @@ -337,7 +337,7 @@ configuration_properties: max_tor_subnet_number: 32 tor_subnet_size: 128 dut_asn: 65100 - dut_type: Spine + dut_type: SpineRouter nhipv4: 10.10.246.254 nhipv6: FC0A::FF core: @@ -376,7 +376,7 @@ configuration: - common - core bgp: - asn: 65201 + asn: 65200 peers: 65100: - 10.0.0.4 @@ -402,7 +402,7 @@ configuration: - common - core bgp: - asn: 65202 + asn: 65200 peers: 65100: - 10.0.0.8 @@ -428,7 +428,7 @@ configuration: - common - core bgp: - asn: 65203 + asn: 65200 peers: 65100: - 10.0.0.12 @@ -454,7 +454,7 @@ configuration: - common - core bgp: - asn: 65204 + asn: 65200 peers: 65100: - 10.0.0.16 @@ -480,7 +480,7 @@ configuration: - common - core bgp: - asn: 65205 + asn: 65200 peers: 65100: - 10.0.0.20 @@ -506,7 +506,7 @@ configuration: - common - core bgp: - asn: 65206 + asn: 65200 peers: 65100: - 10.0.0.24 @@ -532,7 +532,7 @@ configuration: - common - core bgp: - asn: 65207 + asn: 65200 peers: 65100: - 10.0.0.28 @@ -558,7 +558,7 @@ configuration: - common - core bgp: - asn: 65208 + asn: 65200 peers: 65100: - 10.0.0.32 @@ -579,7 +579,7 @@ configuration: - common - core bgp: - asn: 65209 + asn: 65200 peers: 65100: - 10.0.0.34 @@ -600,7 +600,7 @@ configuration: - common - core bgp: - asn: 65210 + asn: 65200 peers: 65100: - 10.0.0.36 @@ -621,7 +621,7 @@ configuration: - common - core bgp: - asn: 65211 + asn: 65200 peers: 65100: - 10.0.0.38 @@ -642,7 +642,7 @@ configuration: - common - core bgp: - asn: 65212 + asn: 65200 peers: 65100: - 10.0.0.40 @@ -663,7 +663,7 @@ configuration: - common - core bgp: - asn: 65213 + asn: 65200 peers: 65100: - 10.0.0.42 @@ -684,7 +684,7 @@ configuration: - common - core bgp: - asn: 65214 + asn: 65200 peers: 65100: - 10.0.0.44 @@ -705,7 +705,7 @@ configuration: - common - core bgp: - asn: 65215 + asn: 65200 peers: 65100: - 10.0.0.46 @@ -726,7 +726,7 @@ configuration: - common - core bgp: - asn: 65216 + asn: 65200 peers: 65100: - 10.0.0.48 @@ -747,7 +747,7 @@ configuration: - common - core bgp: - asn: 65217 + asn: 65200 peers: 65100: - 10.0.0.50 @@ -768,7 +768,7 @@ configuration: - common - core bgp: - asn: 65218 + asn: 65200 peers: 65100: - 10.0.0.52 @@ -789,7 +789,7 @@ configuration: - common - core bgp: - asn: 65219 + asn: 65200 peers: 65100: - 10.0.0.54 @@ -810,7 +810,7 @@ configuration: - common - core bgp: - asn: 65220 + asn: 65200 peers: 65100: - 10.0.0.56 @@ -831,7 +831,7 @@ configuration: - common - core bgp: - asn: 65221 + asn: 65200 peers: 65100: - 10.0.0.58 @@ -852,7 +852,7 @@ configuration: - common - core bgp: - asn: 65222 + asn: 65200 peers: 65100: - 10.0.0.60 @@ -873,7 +873,7 @@ configuration: - common - core bgp: - asn: 65223 + asn: 65200 peers: 65100: - 10.0.0.62 @@ -894,7 +894,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65001 peers: 65100: - 10.0.0.64 @@ -920,7 +920,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65002 peers: 65100: - 10.0.0.68 @@ -946,7 +946,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65003 peers: 65100: - 10.0.0.72 @@ -972,7 +972,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65004 peers: 65100: - 10.0.0.76 @@ -998,7 +998,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65005 peers: 65100: - 10.0.0.80 @@ -1024,7 +1024,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65006 peers: 65100: - 10.0.0.84 @@ -1050,7 +1050,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65007 peers: 65100: - 10.0.0.88 @@ -1076,7 +1076,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65008 peers: 65100: - 10.0.0.92 @@ -1102,7 +1102,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65009 peers: 65100: - 10.0.0.96 @@ -1128,7 +1128,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65010 peers: 65100: - 10.0.0.98 @@ -1154,7 +1154,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65011 peers: 65100: - 10.0.0.100 @@ -1175,7 +1175,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65012 peers: 65100: - 10.0.0.102 @@ -1196,7 +1196,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65013 peers: 65100: - 10.0.0.104 @@ -1217,7 +1217,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65014 peers: 65100: - 10.0.0.106 @@ -1238,7 +1238,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65015 peers: 65100: - 10.0.0.108 @@ -1259,7 +1259,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65016 peers: 65100: - 10.0.0.110 @@ -1280,7 +1280,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65017 peers: 65100: - 10.0.0.112 @@ -1301,7 +1301,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65018 peers: 65100: - 10.0.0.114 @@ -1322,7 +1322,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65019 peers: 65100: - 10.0.0.116 @@ -1343,7 +1343,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65020 peers: 65100: - 10.0.0.118 @@ -1364,7 +1364,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65021 peers: 65100: - 10.0.0.120 @@ -1385,7 +1385,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65022 peers: 65100: - 10.0.0.122 @@ -1406,7 +1406,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65023 peers: 65100: - 10.0.0.124 @@ -1427,7 +1427,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65024 peers: 65100: - 10.0.0.126 @@ -1448,7 +1448,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65025 peers: 65100: - 10.0.0.128 @@ -1474,7 +1474,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65026 peers: 65100: - 10.0.0.132 @@ -1500,7 +1500,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65027 peers: 65100: - 10.0.0.136 @@ -1526,7 +1526,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65028 peers: 65100: - 10.0.0.140 @@ -1552,7 +1552,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65029 peers: 65100: - 10.0.0.144 @@ -1578,7 +1578,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65030 peers: 65100: - 10.0.0.148 @@ -1604,7 +1604,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65031 peers: 65100: - 10.0.0.152 @@ -1630,7 +1630,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65032 peers: 65100: - 10.0.0.156 @@ -1656,7 +1656,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65033 peers: 65100: - 10.0.0.160 @@ -1677,7 +1677,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65034 peers: 65100: - 10.0.0.162 @@ -1698,7 +1698,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65035 peers: 65100: - 10.0.0.164 @@ -1719,7 +1719,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65036 peers: 65100: - 10.0.0.166 @@ -1740,7 +1740,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65037 peers: 65100: - 10.0.0.168 @@ -1761,7 +1761,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65038 peers: 65100: - 10.0.0.170 @@ -1782,7 +1782,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65039 peers: 65100: - 10.0.0.172 @@ -1803,7 +1803,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65040 peers: 65100: - 10.0.0.174 @@ -1829,7 +1829,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65041 peers: 65100: - 10.0.0.176 @@ -1855,7 +1855,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65042 peers: 65100: - 10.0.0.178 @@ -1876,7 +1876,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65043 peers: 65100: - 10.0.0.180 @@ -1897,7 +1897,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65044 peers: 65100: - 10.0.0.182 @@ -1918,7 +1918,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65045 peers: 65100: - 10.0.0.184 @@ -1939,7 +1939,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65046 peers: 65100: - 10.0.0.186 @@ -1960,7 +1960,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65047 peers: 65100: - 10.0.0.188 @@ -1981,7 +1981,7 @@ configuration: - common - leaf bgp: - asn: 65000 + asn: 65048 peers: 65100: - 10.0.0.190 diff --git a/ansible/veos_vtb b/ansible/veos_vtb index 9eca241c31..9dc4c6fde2 100644 --- a/ansible/veos_vtb +++ b/ansible/veos_vtb @@ -127,6 +127,8 @@ all: iface_speed: 40000 start_topo_service: True frontend_asics: [0,1,2,3] + loopback4096_ip: [8.0.0.0/32, 8.0.0.1/32, 8.0.0.2/32, 8.0.0.3/32, 8.0.0.4/32, 8.0.0.5/32] + loopback4096_ipv6: [2603:10e2:400::/128, 2603:10e2:400::1/128, 2603:10e2:400::2/128, 2603:10e2:400::3/128, 2603:10e2:400::4/128, 2603:10e2:400::5/128] vlab-08: ansible_host: 10.250.0.112 ansible_hostv6: fec0::ffff:afa:c @@ -139,6 +141,8 @@ all: iface_speed: 40000 start_topo_service: True frontend_asics: [0,1] + loopback4096_ip: [8.0.0.0/32, 8.0.0.1/32, 8.0.0.2/32, 8.0.0.3/32] + loopback4096_ipv6: [2603:10e2:400::/128, 2603:10e2:400::1/128, 2603:10e2:400::2/128, 2603:10e2:400::3/128] vlab-simx-01: ansible_host: 10.250.0.103 ansible_hostv6: fec0::ffff:afa:3 @@ -157,7 +161,7 @@ all: serial_port: 9020 ansible_password: password ansible_user: admin - slot_num: 1 + slot_num: slot1 vlab-t2-02: ansible_host: 10.250.0.121 ansible_hostv6: fec0::ffff:afa:11 @@ -166,7 +170,7 @@ all: serial_port: 9021 ansible_password: password ansible_user: admin - slot_num: 2 + slot_num: slot2 vlab-t2-sup: ansible_host: 10.250.0.122 ansible_hostv6: fec0::ffff:afa:12 @@ -176,7 +180,7 @@ all: ansible_password: password ansible_user: admin card_type: supervisor - slot_num: 3 + slot_num: slot3 # The groups below are helpers to limit running playbooks to a specific server only server_1: diff --git a/docs/testbed/README.md b/docs/testbed/README.md index a28a01508d..0c9ede0f8c 100644 --- a/docs/testbed/README.md +++ b/docs/testbed/README.md @@ -16,3 +16,4 @@ - [FAQ](README.testbed.FAQ.md) - [Internal](README.testbed.Internal.md) - [Kubernetes Setup](README.testbed.k8s.Setup.md) +- [SAI Test Setup](./sai_quality/README.md) diff --git a/docs/testbed/README.testbed.Setup.md b/docs/testbed/README.testbed.Setup.md index 6963888712..ba71e47150 100644 --- a/docs/testbed/README.testbed.Setup.md +++ b/docs/testbed/README.testbed.Setup.md @@ -67,6 +67,7 @@ The PTF docker container is used to send and receive data plane packets to the D make configure PLATFORM=vs ;#takes about 1 hour or more make target/docker-ptf.gz ``` + You can also download a pre-built `docker-ptf` image [here](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&buildId=42750&target=target%2Fdocker-ptf.gz). 2. Setup your own [Docker Registry](https://docs.docker.com/registry/) and upload `docker-ptf` to your registry. @@ -82,7 +83,7 @@ Managing the testbed and running tests requires various dependencies to be insta make target/docker-sonic-mgmt.gz ``` - You can also download a pre-built `docker-sonic-mgmt` image [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/sonic-buildimage/target/docker-sonic-mgmt.gz). + You can also download a pre-built `docker-sonic-mgmt` image [here](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master/docker-sonic-mgmt.gz&definitionId=194&artifactName=docker-sonic-mgmt&buildId=42201&target=target%2Fdocker-sonic-mgmt.gz). 2. Clone the `sonic-mgmt` repo into your working directory: ``` @@ -231,3 +232,7 @@ Our fanout switches deploy using the Arista switch's eosadmin shell login. If yo - To remove a topology run: ```./testbed-cli.sh remove-topo vms-t1 ~/.password``` **NOTE:** The last step in `testbed-cli.sh` is trying to re-deploy the Vlan range in the root fanout switch to match the VLAN range specified in the topology. In other words, it's trying to change the "allowed" Vlan for the Arista switch ports. If you have a different type of switch, this may or may not work. Please review the steps and update accordingly if necessary. If you comment out the last step, you may manually swap Vlan ranges in the root fanout to make the testbed topology switch work. + +## Deploy Minigraph + +Please follow the "Device Minigraph Generation and Deployment" section of the [Device Minigraph Generation and Deployment](README.testbed.Minigraph.md) to finish minigrah deployment. diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index f358a2ac37..2f2b3e9616 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -43,22 +43,23 @@ We currently support EOS-based or SONiC VMs to simulate neighboring devices in t 2. Copy below image files to `~/veos-vm/images` on your testbed host: - `Aboot-veos-serial-8.0.0.iso` - `vEOS-lab-4.20.15M.vmdk` - ### Option 2: cEOS (container-based) image (experimental) #### Option 2.1: Download and import cEOS image manually 1. Download the [cEOS image from Arista](https://www.arista.com/en/support/software-download) 2. Import the cEOS image (it will take several minutes to import, so please be patient!) ``` -docker import cEOS64-lab-4.23.2F.tar.xz ceosimage:4.23.2F +docker import cEOS-lab-4.25.5.1M.tar.xz ceosimage:4.25.5.1M-1 ``` After imported successfully, you can check it by 'docker images' ``` $ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -ceosimage 4.23.2F d53c28e38448 2 hours ago 1.82GB +REPOSITORY TAG IMAGE ID CREATED SIZE +ceosimage 4.25.5.1M-1 fa0df4b01467 9 seconds ago 1.62GB ``` +**Note**: *For time being, the image might be updated, the actual image version that is needed in the installation process is defined in the file [ansible/group_vars/all/ceos.yml](../../ansible/group_vars/all/ceos.yml), please download the corresponding version of image and import it to your local docker repository.* +**Note**: *Please also notice the type of the bit for the image, in the example above, it is a standard 32-bit image. Please import the right image as your needs.* #### Option 2.2: Pull cEOS image automatically 1. Alternatively, you can host the cEOS image on a http server. Specify `vm_images_url` for downloading the image [here](https://github.com/Azure/sonic-mgmt/blob/master/ansible/group_vars/vm_host/main.yml#L2). @@ -259,6 +260,38 @@ Once the topology has been created, we need to give the DUT an initial configura ``` ./testbed-cli.sh -t vtestbed.csv -m veos_vtb deploy-mg vms-kvm-t0 veos_vtb password.txt ``` +Verify the DUT is created successfully +In your host run +``` +~$ virsh list + Id Name State +------------------------- + 3 vlab-01 running + ``` + Then you can try to login to your dut through the command and get logged in as shown below. + For more infomation about how to get the DUT IP address, please refer to doc + [testbed.Example#access-the-dut](README.testbed.Example.Config.md#access-the-dut) + ``` +~$ ssh admin@10.250.0.101 +admin@10.250.0.101's password: +Linux vlab-01 4.19.0-12-2-amd64 #1 SMP Debian 4.19.152-1 (2020-10-18) x86_64 +You are on + ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + +-- Software for Open Networking in the Cloud -- + +Unauthorized access and/or use are prohibited. +All access and/or use are subject to monitoring. + +Help: http://azure.github.io/SONiC/ + +Last login: Thu Jul 29 03:55:53 2021 from 10.250.0.1 +admin@vlab-01:~$ exit +``` 2. Verify that you can login to the SONiC KVM using Mgmt IP = 10.250.0.101 and admin:password. ``` @@ -338,3 +371,11 @@ If neighbor devices are SONiC ``` You should see three sets of tests run and pass. You're now set up and ready to use the KVM testbed! + +## Restore/Remove the testing environment +If you want to clear your testing environment, you can log into your mgmt docker that you created at step three in section [README.testbed.VsSetup.md#prepare-testbed-host](README.testbed.VsSetup.md#prepare-testbed-host). + +Then run command: +``` +./testbed-cli.sh -t vtestbed.csv -m veos_vtb -k ceos remove-topo vms-kvm-t0 password.txt +``` \ No newline at end of file diff --git a/docs/testbed/sai_quality/CheckSAIHeaderVersionAndSONiCBranch.md b/docs/testbed/sai_quality/CheckSAIHeaderVersionAndSONiCBranch.md new file mode 100644 index 0000000000..fd62a7c7eb --- /dev/null +++ b/docs/testbed/sai_quality/CheckSAIHeaderVersionAndSONiCBranch.md @@ -0,0 +1,34 @@ +In this article, you will get known how to get the SAI header version, and how to check and get it from the matching [buildimage](https://github.com/Azure/sonic-buildimage) repo. + +For sai, it has many versions and spread with those versions, different SAI header(SPI) defined across them, for testing different SAI headers, we need different SAI binary and SAI Server + +1. For understanding the testbed topology, make sure you go through the doc at +https://github.com/Azure/sonic-mgmt/tree/master/docs/testbed +2. Registry the device you want to use +``` +Here we need an s6000, t0 +``` +3. Follow this page to get the testbed info +[Example of Testbed Configuration - Overview (azure.com)](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.Example.Config.md) + +4. log in to a sonic device, check the installed sai version within a `syncd` docker. +``` +docker exec -it syncd bash +``` +Then, check the installed SAI +``` +apt list --installed| grep libsai +``` +5. Check out the code from [sonic-buildimage](https://github.com/Azure/sonic-buildimage.git), +*Note: remember to change the branch* +6. Check out the code for the matching sonic version [Example: Check sonic version and build saiserver docker](./ExampleCheckSonicVersionAndBuildSaiserverDocker.md) , +Check the file content at [platform/broadcom/sai.mk](https://github.com/Azure/sonic-buildimage/blob/master/platform/broadcom/sai.mk), there is the link to the binary we installed. +``` +The sai binary file and with its name will be there +cat sai.mk +BRCM_SAI = libsaibcm_4.3.3.8-1_amd64.deb +$(BRCM_SAI)_URL = "https://sonicstorage.blob.core.windows.net/packages/bcmsai/4.3/202012/libsaibcm_4.3.3.8-1_amd64.deb?*******" +BRCM_SAI_DEV = libsaibcm-dev_4.3.3.8-1_amd64.deb +$(eval $(call add_derived_package,$(BRCM_SAI),$(BRCM_SAI_DEV))) +$(BRCM_SAI_DEV)_URL = "https://sonicstorage.blob.core.windows.net/packages/bcmsai/4.3/202012/libsaibcm-dev_4.3.3.8-1_amd64.deb?********" +``` \ No newline at end of file diff --git a/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md b/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md new file mode 100644 index 0000000000..bbd9fa591b --- /dev/null +++ b/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md @@ -0,0 +1,51 @@ +In this article, you will get to know how to use the sonic-mgmt docker to set up the topology for sai testing. + +**Those commands need to be run within a sonic-mgmt docker, or you need to run them within a similar environment.** +This section of the document described how to build a sonic-mgmt docker +https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md#setup-sonic-mgmt-docker +1. install the sonic image in the DUT(device under test) +for example +``` +SONiC Software Version: SONiC.20201231.08 +``` +2. remove the topology for the current testbed +``` +./testbed-cli.sh remove-topo vms12-t0-s6000-1 password.txt +``` +For understanding the topology concept, please refer to the doc +[Topologies](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.Topology.md) + +For how to find the topology info and the related device please refer to the doc +[Example of Testbed Configuration](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.Example.Config.md) + +The example to set up the testbed with the related command please refer to +- Contains how to build the related PTF, sonic-mgmt docker +[Testbed Setup](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.Setup.md) +- More concentrate on a virtual environment with KVM and Docker +[KVM Testbed Setup](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md) +3. Change the topology to 'PTF' by modifying the testbed.yml +In order to get the default configuration without any other noise in the testing, like some interface configured as down, and vlan might be set up, we need to use a non-tology for testing. +For example, we want to use the config `vms-sn2700-t1-lag`, then we need to change it +```git + - conf-name: vms-sn2700-t1 + group-name: vms1-1 +- topo: t1 ++ topo: ptf32 + ptf_image_name: docker-ptf-sai-mlnx +- ptf: ptf-unknown ++ ptf: ptf-docker-name + ptf_ip: 10.255.0.178/24 + ptf_ipv6: + server: server_1 +``` +**for the topo, if it ends with 64, then the topo should be ptf64, please change it according to the actual device port.** + +4. deploy the new topology +``` +./testbed-cli.sh -t testbed.yaml add-topo vms-sn2700-t1 password.txt +``` +**You can change the testbed filename from the testbed.yaml if needed, and the current config name is vms-sn2700-t1** +5. push the minigraph to dut +``` +./testbed-cli.sh -t testbed.yaml deploy-mg vms-sn2700-t1 str password.txt +``` \ No newline at end of file diff --git a/docs/testbed/sai_quality/ExampleCheckSonicVersionAndBuildSaiserverDocker.md b/docs/testbed/sai_quality/ExampleCheckSonicVersionAndBuildSaiserverDocker.md new file mode 100644 index 0000000000..c4da0d4555 --- /dev/null +++ b/docs/testbed/sai_quality/ExampleCheckSonicVersionAndBuildSaiserverDocker.md @@ -0,0 +1,74 @@ +In this article, you will get known how to get a saiserver docker and get a builder to build saiserver binary + +1. Check SONiC version in a DUT +**Old version might hit some issue caused by related package upgrade, you can always use the latest tag of a major version(i.e major is 20201231) but notice the matching image version.** + ``` + show version + + SONiC Software Version: SONiC.20201231.39 + ``` +2. In your dev envrironment, install prerequirment lib, e.g. pip and jinja, re-located code to that tag and resident on a new branch, +here we use repository [sonic-buildimage](https://github.com/Azure/sonic-buildimage) +Follow the doc at [Check SAI header version and SONiC branch](https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/sai_quality/CheckSAIHeaderVersionAndSONiCBranch.md) + ``` + # git checkout tags/ -b + # Example: + + git checkout tags/20201231.39 -b richardyu/20201231-39 + ``` + *note: Check submodule recursively* + ``` + git submodule update --init --recursive + + # Execute make init once after cloning the repo, or after fetching remote repo with submodule updates + + make init + ``` + *Note: Follow the resource to get how to build a binary and docker* + [GitHub - Azure/sonic-buildimage: Scripts which perform an installable binary image build for SONiC](https://github.com/Azure/sonic-buildimage) + +3. Start a local build + ``` + # Clean environment as needed + make reset + # Init env + make init + # NOSTRETCH=y : Current image is buster + # KEEP_SLAVE_ON=yes: Keeps slave container up and active after building process concludes. + #setup environment as broadcom flatform + make configure PLATFORM=broadcom + #start build + NOSTRETCH=y NOJESSIE=y KEEP_SLAVE_ON=yes ENABLE_SYNCD_RPC=y make target/debs/buster/saiserver_0.9.4_amd64.deb + ``` + **You can get this build target by running command like(adjust as needed): NOSTRETCH=y NOJESSIE=y ENABLE_SYNCD_RPC=y make list** + + +4. Wait for the build process +5. In the end, you will get something like this, and prompt as below (inside docker) + ``` + # Check if thrift installed + richardyu@a0363ed6ca36:/sonic$ thrift + Usage: thrift [options] file + + Use thrift -help for a list of options + ``` +6. Keep this terminal and start another terminal, login the same host + - Check the docker, the builder appears with the name as sonic-slave-***, it always the recently created one + ``` + docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS + PORTS NAMES + e1df2df072c4 sonic-slave-buster-richardyu:86ef76a28e6 "bash -c 'make -f sl…" 36 minutes ago Up 36 minutes + 22/tcp condescending_lovelace + ``` + - Commit that docker as a saiserver-docker builder for other bugging or related resource building usages. + ``` + docker commit : + docker commit condescending_lovelace saisever-builder-20201231-39:0.0.1 + ``` +7. Then, exit from the docker above (console as 'richardyu@e1df2df072c4'), you can get your buildout artifacts in folder `./target`, there also contains the logs, and other accessories +8. For building the saiserver binary, you can mount your local SAI repository to that docker and just start that docker for your building purpose. + ``` + # SAI repo is located inside local /code folder + docker run --name saisever-builder-20201231-39 -v /code:/data -di saisever-builder-20201231-39:0.0.1 bash + ``` \ No newline at end of file diff --git a/docs/testbed/sai_quality/ExampleStartSaiServerDockerInDUT.md b/docs/testbed/sai_quality/ExampleStartSaiServerDockerInDUT.md new file mode 100644 index 0000000000..3017fac7fd --- /dev/null +++ b/docs/testbed/sai_quality/ExampleStartSaiServerDockerInDUT.md @@ -0,0 +1,34 @@ +In this article, you will get know how to start a saiserver docker in a dut(Device under test). +1. pull or upload the saiserver docker in your dut. +``` +#if your docker registry have the saisever docker, you can pull it +docker push /docker-saiserver-: +#like +docker pull soniccr1.azurecr.io/docker-saiserver-brcm:20201231.29 +``` +Otherwise, you can upload the docker file from a local building, please reference to doc for how to build a saiserver docker +[Example: Check Sonic Version And Build Saiserver Docker](./ExampleCheckSonicVersionAndBuildSaiserverDocker.md) + +then import and start the docker +```shell +docker load -i ./ +``` + +2. Config saiserver running env and start saiserver in SONiC console + +**Before start the saiserver docker, you'd better stop other services that running in DUT.** +```shell +SONIC_CFGGEN="sonic-cfggen" +SONIC_DB_CLI="sonic-db-cli" +PLATFORM=${PLATFORM:-`$SONIC_CFGGEN -H -v DEVICE_METADATA.localhost.platform`} +HWSKU=${HWSKU:-`$SONIC_CFGGEN -d -v 'DEVICE_METADATA["localhost"]["hwsku"]'`} +DOCKERNAME=saiserver +#DOCKERIMG= <--- set the docker image name +docker create --privileged --net=host \ + -v /usr/share/sonic/device/$PLATFORM/$HWSKU:/usr/share/sonic/hwsku:ro \ + --name=$DOCKERNAME $DOCKERIMG +docker start $DOCKERNAME + +``` + + diff --git a/docs/testbed/sai_quality/GetDockerBuildingRelatedResources.md b/docs/testbed/sai_quality/GetDockerBuildingRelatedResources.md new file mode 100644 index 0000000000..bb81c8d7bb --- /dev/null +++ b/docs/testbed/sai_quality/GetDockerBuildingRelatedResources.md @@ -0,0 +1,31 @@ +### Background +In order to build the artifacts in SONiC, we always need some specific environment, for convenient, we build them in some docker container. The docker container defined in repo [sonic-buildimage](https://github.com/Azure/sonic-buildimage) +After the build process, the build-docker will be deleted. +But we might need some docker environment for building something, like debug code. + +Then we might need to get a build-docker in local environment + +### Steps +1. Check out [sonic-buildimage](https://github.com/Azure/sonic-buildimage/blob/master/README.md) repo +2. Go through the [doc](https://github.com/Azure/sonic-buildimage/blob/master/README.md), to understand the build system. +3. Choice a target (for different purposes, the command might be different) +``` +# build EOS image +make target/sonic-aboot-broadcom.swi +# example: +make configure PLATFORM=marvell-armhf PLATFORM_ARCH=armhf +make target/sonic-marvell-armhf.bin +``` +3. Add the parameters [KEEP_SLAVE_ON](https://github.com/Azure/sonic-buildimage/blob/aa59bfeab7eaa569ecf99c8ba62745126ac92602/Makefile.work#L19) when build the target docker +4. Commit the build docker to local image repository + Then you can use the [docker command](https://docs.docker.com/engine/reference/commandline/save/) to save the docker to a local image +``` +docker commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]] + +Example: musing_payne is the docker name, slave-syncd-4334 is the local image name, 0.0.1 is the image tag + +docker commit musing_payne slave-syncd-4334:0.0.1 +``` +5. Then you can use the local commit docker for other SAI related buildings + +Here is a example [Example: Check sonic version and get a related builder](./ExampleCheckSonicVersionAndBuildSaiserverDocker.md) \ No newline at end of file diff --git a/docs/testbed/sai_quality/README.md b/docs/testbed/sai_quality/README.md new file mode 100644 index 0000000000..ae7fff75e4 --- /dev/null +++ b/docs/testbed/sai_quality/README.md @@ -0,0 +1,16 @@ +# SONiC SAI_quality Testbed + +## Description +Following those instructions on how to setup the SAI testing environment. + + - [Deploy SAI Test Topology With SONiC-MGMT](DeploySAITestTopologyWithSONiC-MGMT.md) + - [Check SAI Header Version And SONiC Branch](CheckSAIHeaderVersionAndSONiCBranch.md) + - [Get Docker Building Related Resources](GetDockerBuildingRelatedResources.md) + + +## Example +Following are the examples for SAI testing +- [SAI Testing Example](SAI.Example.md) +- [Example: Check Sonic Version And Build Saiserver Docker](ExampleCheckSonicVersionAndBuildSaiserverDocker.md) +- [Example:Start SaiServer Docker In DUT](ExampleStartSaiServerDockerInDUT.md) + diff --git a/docs/testbed/sai_quality/SAI.Example.md b/docs/testbed/sai_quality/SAI.Example.md new file mode 100644 index 0000000000..caf2b698e3 --- /dev/null +++ b/docs/testbed/sai_quality/SAI.Example.md @@ -0,0 +1,152 @@ +# SAI Testing Example +### Prerequisites + +Through previous docs make sure the PTF docker, sonic-mgmt docker, DUT (running on SONiC.202012 image in this case) are all set. + +### Prepare test cases on PTF +In this section, we will prepare SAI test cases on the PTF docker. +1. Logon to your PTF docker, make sure the PTF docker could access to GitHub, then type following commands to clone SAI repo: +- SAI **v1.7** will be used for this example since v1.7 is one of supported SAI versions in SONiC.202012, for more information please check [Check SAI header with SONiC branch](CheckSAIHeaderVersionAndSONiCBranch.md) + +``` +rm -rf ./SAI +git init SAI +cd SAI +git remote add origin https://github.com/opencomputeproject/SAI.git +git fetch origin +git checkout -b v1.7 origin/v1.7 +``` +​ It will have the following result: +``` +root@314fb9c4a38f:/tmp# rm -rf ./SAI +root@314fb9c4a38f:/tmp# git init SAI +Initialized empty Git repository in /tmp/SAI/.git/ +root@314fb9c4a38f:/tmp# cd SAI +root@314fb9c4a38f:/tmp/SAI# git remote add origin https://github.com/opencomputeproject/SAI.git +root@314fb9c4a38f:/tmp/SAI# git fetch origin +remote: Enumerating objects: 7962, done. +remote: Counting objects: 100% (534/534), done. +remote: Compressing objects: 100% (302/302), done. +remote: Total 7962 (delta 339), reused 350 (delta 231), pack-reused 7428 +Receiving objects: 100% (7962/7962), 63.32 MiB | 30.71 MiB/s, done. +Resolving deltas: 100% (5490/5490), done. +From https://github.com/opencomputeproject/SAI + * [new branch] master -> origin/master + * [new branch] rajeevsharma1-patch-1 -> origin/rajeevsharma1-patch-1 + * [new branch] revert-648-vlanigmpcontrol -> origin/revert-648-vlanigmpcontrol + * [new branch] v0.9.1 -> origin/v0.9.1 + * [new branch] v0.9.2 -> origin/v0.9.2 + * [new branch] v0.9.4 -> origin/v0.9.4 + * [new branch] v0.9.5 -> origin/v0.9.5 + * [new branch] v0.9.6 -> origin/v0.9.6 + * [new branch] v1.0 -> origin/v1.0 + * [new branch] v1.1 -> origin/v1.1 + * [new branch] v1.2 -> origin/v1.2 + * [new branch] v1.3 -> origin/v1.3 + * [new branch] v1.4 -> origin/v1.4 + * [new branch] v1.5 -> origin/v1.5 + * [new branch] v1.6 -> origin/v1.6 + * [new branch] v1.7 -> origin/v1.7 + * [new branch] v1.8 -> origin/v1.8 + * [new branch] v1.9 -> origin/v1.9 + ........ +``` + +``` +root@314fb9c4a38f:/tmp/SAI# git checkout -b v1.7 origin/v1.7 +Branch v1.7 set up to track remote branch v1.7 from origin. +Switched to a new branch 'v1.7' +root@314fb9c4a38f:/tmp/SAI# ls -l +total 160 +-rw-r--r-- 1 root root 106691 Oct 20 06:40 Doxyfile +-rw-r--r-- 1 root root 2700 Oct 20 06:40 LICENSE.txt +-rw-r--r-- 1 root root 1116 Oct 20 06:40 Makefile +-rw-r--r-- 1 root root 463 Oct 20 06:40 README.md +drwxr-xr-x 5 root root 4096 Oct 20 06:40 bm +drwxr-xr-x 3 root root 4096 Oct 20 06:40 data +drwxr-xr-x 2 root root 4096 Oct 20 06:40 debian +drwxr-xr-x 32 root root 4096 Oct 20 06:40 doc +drwxr-xr-x 2 root root 4096 Oct 20 06:40 experimental +drwxr-xr-x 3 root root 4096 Oct 20 06:40 flexsai +drwxr-xr-x 2 root root 4096 Oct 20 06:40 inc +drwxr-xr-x 2 root root 4096 Oct 20 06:40 meta +drwxr-xr-x 5 root root 4096 Oct 20 06:40 stub +drwxr-xr-x 6 root root 4096 Oct 20 06:40 test +``` + +### Prepare testing environment on DUT +1. Logon to the DUT and stop all services before start saiserver: + +- It'd be better stop other services/containers running on DUT, especially syncd and swss. +``` +sudo systemctl stop +``` +2. After [Example: Check sonic version and get a saiserver docker with a related builder](GetDockerBuildingRelatedResources.md), the docker registry should have the **docker-saiserver-brcm** images. + +- Pull the image from registry, in this case the os version is: 202012 + +``` +admin@s6000:~$ docker pull /docker-saiserver-brcm:202012 +Status: Downloaded newer image for /docker-saiserver-brcm:202012 +``` +- Tag the images with name **docker-saiserver-brcm** +``` +admin@s6000:~$ docker tag ${SONIC_REG}/docker-saiserver-brcm:202012 docker-saiserver-brcm +``` +- Execute step2 from [Example: Start SaiServer Docker from a DUT](ExampleStartSaiServerDockerInDUT.md) + +``` +admin@s6000:~$ SONIC_CFGGEN="sonic-cfggen" +admin@s6000:~$ SONIC_DB_CLI="sonic-db-cli" +admin@s6000:~$ PLATFORM=${PLATFORM:-`$SONIC_CFGGEN -H -v DEVICE_METADATA.localhost.platform`} +admin@s6000:~$ HWSKU=${HWSKU:-`$SONIC_CFGGEN -d -v 'DEVICE_METADATA["localhost"]["hwsku"]'`} +admin@s6000:~$ DOCKERNAME=saiserver +admin@s6000:~$ DOCKERIMG=docker-saiserver-brcm:latest +admin@s6000:~$ docker create --privileged --net=host \ +> -v /usr/share/sonic/device/$PLATFORM/$HWSKU:/usr/share/sonic/hwsku:ro \ +> --name=$DOCKERNAME $DOCKERIMG +59b7fec7645fbf448a5c4db53a474f36695deec98b40dd1656441abd67ac03d0 +admin@s6000:~$ docker start $DOCKERNAME +saiserver +``` +- Check current running docker containers with```docker ps```, **docker-saiserver-brcm:latest** container is running means DUT has the target saiserver docker up. + +``` +admin@s6000:~$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +59b7fec7645f docker-saiserver-brcm:latest "/usr/local/bin/supe…" 25 seconds ago Up 16 seconds saiserver +``` +- Use ```docker exec -it saiserver bash``` to logon to saiserver docker bash + +``` +admin@s6000:~$ docker exec -it saiserver bash +``` +- Also make sure the port **9092** is up, which the saiserver is listening to + +``` +admin@s6000:~$ sudo netstat -tulpn | grep LISTEN + +tcp6 0 0 :::9092 :::* LISTEN 1224720/saiserver +``` +Now the saiserver on DUT should be ready for SAI testing. + +#### Start SAI testing on PTF + +On your **PTF** docker, prepare a file named **default_interface_to_front_map.ini** an example could be found at [Example](https://github.com/opencomputeproject/SAI/blob/master/test/saithrift/src/msn_2700/default_interface_to_front_map.ini). +execute the following command to start SAI test: +``` +ptf --test-dir sail2.L2AccessToAccessVlanTest --interface '1@eth1' --interface '2@eth2' -t "server='';port_map_file=''" +``` +It will have the result like this: +``` +root@314fb9c4a38f:/tmp/SAI/test/saithrift# ptf --test-dir tests sail2.L2AccessToAccessVlanTest --interface '1@eth1' --interface '2@eth2' -t "server='';port_map_file='default_interface_to_front_map.ini'" +WARNING: No route found for IPv6 destination :: (no default route?) +sail2.L2AccessToAccessVlanTest ... +Sending L2 packet port 1 -> port 2 [access vlan=10]) +ok + +---------------------------------------------------------------------- +Ran 1 test in 1.662s + +OK +``` \ No newline at end of file diff --git a/docs/testplan/Next-hop-split-test-plan.md b/docs/testplan/Next-hop-split-test-plan.md new file mode 100644 index 0000000000..9ebea88f39 --- /dev/null +++ b/docs/testplan/Next-hop-split-test-plan.md @@ -0,0 +1,71 @@ +# **Next Hop Split Test Plan** + + - [Introduction](#introduction) + - [Scope](#scope) + - [Test Setup](#test-setup) + - [Test Cases](#test-cases) + +# Introduction + +This is the test plan for the next hop group split enhancement + +The PR covered in this test plan is [Next hop group split HLD PR 712](https://github.com/Azure/SONiC/pull/712) + +## Scope + +This test plan covers a new method of programming routes into APP_DB, where the next hop information is included in a separate NEXT_HOP_GROUP_TABLE referenced by the ROUTE_TABLE and LABEL_ROUTE_TABLE. + +There is no support in the BGP container/fpmsyncd for this feature, so routes and next hop groups are all programmed directly into APP_DB by the test scripts. + +# Test Setup + +These test cases will be run in the T0 topology. + +# Test Cases + +The methods used to support these test cases will be very similar to the methods in test_static_route.py (for adding IP addresses and routes and for checking traffic). + +## Test Case 1. IPv4 routes + +### Test Objective +Verify that IPv4 routes and next hop groups can be programmed and cause the correct data plane behaviour. + +### Test Steps +* Create an IPv4 next hop group with a single next hop +* Create an IPv4 prefix route referencing the next hop +* Create an IPv4 next hop group with multiple next hops +* Update the route to reference the new next hop group + +### Pass/Fail Criteria +* After the route is created verify that traffic for the prefix is routed via the next hop +* After the route is updated verify that traffic for the prefix is routed via a next hop in the group + +## Test Case 2. IPv6 routes + +### Test Objective +Verify that IPv6 routes and next hop groups can be programmed and cause the correct data plane behaviour. + +### Test Steps +* Create an IPv6 next hop group with a single next hop +* Create an IPv6 prefix route referencing the next hop +* Create an IPv6 next hop group with multiple next hops +* Update the route to reference the new next hop group + +### Pass/Fail Criteria +* After the route is created verify that traffic for the prefix is routed via the next hop +* After the route is updated verify that traffic for the prefix is routed via a next hop in the group + +## Test Case 3. MPLS routes + +### Test Objective +Verify that MPLS routes and next hop groups can be programmed and cause the correct data plane behaviour. + +### Test Steps +* Create an IPv4 next hop group with a single labeled next hop +* Create a label route referencing the next hop +* Create an IPv4 next hop group with multiple labeled next hops +* Update the label route to reference the new next hop group + +### Pass/Fail Criteria +* After the route is created verify that traffic for the prefix is routed via the next hop +* After the route is updated verify that traffic for the prefix is routed via a next hop in the group diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index d7da769f30..5fe9c1fbaf 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -534,7 +534,7 @@ def direction(self, request): def check_rule_counters(self, duthost): logger.info('Wait all rule counters are ready') - return wait_until(60, 2, self.check_rule_counters_internal, duthost) + return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost) def check_rule_counters_internal(self, duthost): for asic_id in duthost.get_frontend_asic_ids(): diff --git a/tests/arp/test_neighbor_mac_noptf.py b/tests/arp/test_neighbor_mac_noptf.py index 6b4c66a1a3..1260d735e7 100644 --- a/tests/arp/test_neighbor_mac_noptf.py +++ b/tests/arp/test_neighbor_mac_noptf.py @@ -71,7 +71,7 @@ def setupDutConfig(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] if not duthost.get_facts().get("modular_chassis"): duthost.command("sudo config bgp shutdown all") - if not wait_until(120, 2.0, self._check_no_bgp_routes, duthost): + if not wait_until(120, 2.0, 0, self._check_no_bgp_routes, duthost): pytest.fail('BGP Shutdown Timeout: BGP route removal exceeded 120 seconds.') yield @@ -115,7 +115,7 @@ def find_routed_interface(): testRoutedInterface[asichost.asic_index] = intf return testRoutedInterface - if not wait_until(120, 2, find_routed_interface): + if not wait_until(120, 2, 0, find_routed_interface): pytest.fail('Failed to find routed interface in 120 s') yield testRoutedInterface diff --git a/tests/arp/test_tagged_arp.py b/tests/arp/test_tagged_arp.py new file mode 100644 index 0000000000..e026cc8e82 --- /dev/null +++ b/tests/arp/test_tagged_arp.py @@ -0,0 +1,196 @@ + +import pytest +import ptf.packet as scapy +import ptf.testutils as testutils +from ptf.mask import Mask + +import itertools +import logging +import ipaddress +import pprint + +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # lgtm[py/unused-import] +from tests.common.fixtures.duthost_utils import ports_list, vlan_ports_list +from tests.common.helpers.assertions import pytest_require + + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('t0', 't0-56-po2vlan') +] + +PTF_PORT_MAPPING_MODE = "use_orig_interface" +DUMMY_MAC_PREFIX = "02:11:22:33" +DUMMY_IP_PREFIX = "188.123" +DUMMY_ARP_COUNT = 10 + + +@pytest.fixture(scope="module") +def skip_dualtor(tbinfo): + """Skip running `test_tagged_arp` over dualtor.""" + pytest_require("dualtor" not in tbinfo["topo"]["name"], "Skip 'test_tagged_arp' over dualtor.") + + +@pytest.fixture(scope="module") +def cfg_facts(duthosts, rand_one_dut_hostname, skip_dualtor): + duthost = duthosts[rand_one_dut_hostname] + return duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + + +def enable_arp(duthost, cfg_facts, enable): + vlan_members = cfg_facts.get('VLAN_MEMBER', {}) + on_cmd = "echo 1 > /proc/sys/net/ipv4/conf/%s/arp_accept" + off_cmd = "echo 0 > /proc/sys/net/ipv4/conf/%s/arp_accept" + for vlan in vlan_members.keys(): + if enable: + logger.info("Enable ARP for %s" % vlan) + duthost.shell(on_cmd % vlan) + else: + logger.info("Disable ARP for %s" % vlan) + duthost.shell(off_cmd % vlan) + + +def arp_cleanup(duthost): + """ cleanup ARP entry """ + duthost.command('sonic-clear arp') + + +@pytest.fixture(scope="module", autouse=True) +def setup_arp(duthosts, rand_one_dut_hostname, cfg_facts): + duthost = duthosts[rand_one_dut_hostname] + # --------------------- Setup ----------------------- + try: + enable_arp(duthost, cfg_facts, True) + # --------------------- Testing ----------------------- + yield + # --------------------- Teardown ----------------------- + finally: + enable_arp(duthost, cfg_facts, False) + arp_cleanup(duthost) + + +def build_arp_packet(vlan_id, neighbor_mac, neighbor_ip): + + pkt = testutils.simple_arp_packet(pktlen=60 if vlan_id == 0 else 64, + eth_dst='ff:ff:ff:ff:ff:ff', + eth_src=neighbor_mac, + vlan_vid=vlan_id, + arp_op=2, + hw_snd=neighbor_mac, + ip_snd=neighbor_ip, + ip_tgt=neighbor_ip) + return pkt + + +def verify_packets_with_portchannel(test, pkt, ports=[], portchannel_ports=[], device_number=0, timeout=1): + for port in ports: + result = testutils.dp_poll(test, device_number=device_number, port_number=port, + timeout=timeout, exp_pkt=pkt) + if isinstance(result, test.dataplane.PollFailure): + test.fail("Expected packet was not received on device %d, port %r.\n%s" + % (device_number, port, result.format())) + + for port_group in portchannel_ports: + for port in port_group: + result = testutils.dp_poll(test, device_number=device_number, port_number=port, + timeout=timeout, exp_pkt=pkt) + if isinstance(result, test.dataplane.PollSuccess): + break + else: + test.fail("Expected packet was not received on device %d, ports %s.\n" + % (device_number, str(port_group))) + + +def verify_arp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id, untagged_pkt, masked_tagged_pkt): + untagged_dst_ports = [] + tagged_dst_ports = [] + untagged_dst_pc_ports = [] + tagged_dst_pc_ports = [] + logger.info("Verify packets from ports " + str(vlan_port["port_index"][0])) + for port in vlan_ports_list: + if vlan_port["port_index"] == port["port_index"]: + # Skip src port + continue + if port["pvid"] == vlan_id: + if len(port["port_index"]) > 1: + untagged_dst_pc_ports.append(port["port_index"]) + else: + untagged_dst_ports += port["port_index"] + elif vlan_id in map(int, port["permit_vlanid"]): + if len(port["port_index"]) > 1: + tagged_dst_pc_ports.append(port["port_index"]) + else: + tagged_dst_ports += port["port_index"] + + verify_packets_with_portchannel(test=ptfadapter, + pkt=untagged_pkt, + ports=untagged_dst_ports, + portchannel_ports=untagged_dst_pc_ports) + verify_packets_with_portchannel(test=ptfadapter, + pkt=masked_tagged_pkt, + ports=tagged_dst_ports, + portchannel_ports=tagged_dst_pc_ports) + + +@pytest.mark.bsl +def test_tagged_arp_pkt(ptfadapter, vlan_ports_list, duthosts, rand_one_dut_hostname, toggle_all_simulator_ports_to_rand_selected_tor): + """ + Send tagged GARP packets from each port. + Verify packets egress without tag from ports whose PVID same with ingress port. + Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port. + verify show arp command on DUT. + """ + duthost = duthosts[rand_one_dut_hostname] + for vlan_port in vlan_ports_list: + port_index = vlan_port["port_index"][0] + # Send GARP packets to switch to populate the arp table with dummy MACs for each port + # Totally 10 dummy MACs for each port, send 1 packet for each dummy MAC + # ARP table will be cleaned up before each iteration, so there won't be any conflict MAC and IP + dummy_macs = ['{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, port_index&0xFF, i+1) + for i in range(DUMMY_ARP_COUNT)] + dummy_ips = ['{}.{:d}.{:d}'.format(DUMMY_IP_PREFIX, port_index&0xFF, i+1) + for i in range(DUMMY_ARP_COUNT)] + for permit_vlanid in map(int, vlan_port["permit_vlanid"]): + logger.info('Test ARP: interface %s, VLAN %u' % (vlan_port["dev"], permit_vlanid)) + # Perform ARP clean up + arp_cleanup(duthost) + for i in range(DUMMY_ARP_COUNT): + pkt = build_arp_packet(permit_vlanid, dummy_macs[i], dummy_ips[i]) + exp_untagged_pkt = build_arp_packet(0, dummy_macs[i], dummy_ips[i]) + # vlan priority attached to packets is determined by the port, so we ignore it here + exp_tagged_pkt = Mask(pkt) + exp_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q, "prio") + logger.info("Send tagged({}) packet from {} ...".format(permit_vlanid, port_index)) + testutils.send(ptfadapter, port_index, pkt) + verify_arp_packets(ptfadapter, vlan_ports_list, vlan_port, permit_vlanid, exp_untagged_pkt, exp_tagged_pkt) + + res = duthost.command('show arp') + assert res['rc'] == 0 + logger.info('"show arp" output on DUT:\n{}'.format(pprint.pformat(res['stdout_lines']))) + + arp_cnt = 0 + for l in res['stdout_lines']: + # Address MacAddress Iface Vlan + items = l.split() + if len(items) != 4: + continue + # Vlan must be number + if not items[3].isdigit(): + continue + arp_cnt += 1 + ip = items[0] + mac = items[1] + ifname = items[2] + vlan_id = int(items[3]) + assert ip in dummy_ips + assert mac in dummy_macs + # 'show arp' command gets iface from FDB table, + # if 'show arp' command was earlier than FDB table update, ifname would be '-' + if ifname == '-': + logger.info('Ignore unknown iface...') + else: + assert ifname == vlan_port["dev"] + assert vlan_id == permit_vlanid + assert arp_cnt == DUMMY_ARP_COUNT diff --git a/tests/autorestart/test_container_autorestart.py b/tests/autorestart/test_container_autorestart.py index 8d6c1286e7..9bdbb8abfe 100755 --- a/tests/autorestart/test_container_autorestart.py +++ b/tests/autorestart/test_container_autorestart.py @@ -21,9 +21,9 @@ ] CONTAINER_CHECK_INTERVAL_SECS = 1 -CONTAINER_STOP_THRESHOLD_SECS = 30 +CONTAINER_STOP_THRESHOLD_SECS = 60 CONTAINER_RESTART_THRESHOLD_SECS = 180 -CONTAINER_NAME_REGEX = (r"([a-zA-Z_]+)(\d*)$") +CONTAINER_NAME_REGEX = (r"([a-zA-Z_-]+)(\d*)$") POST_CHECK_INTERVAL_SECS = 1 POST_CHECK_THRESHOLD_SECS = 360 @@ -218,6 +218,7 @@ def clear_failed_flag_and_restart(duthost, container_name): duthost.shell("sudo systemctl start {}.service".format(container_name)) restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, True) pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name)) @@ -240,6 +241,7 @@ def verify_autorestart_with_critical_process(duthost, container_name, program_na logger.info("Waiting until container '{}' is stopped...".format(container_name)) stopped = wait_until(CONTAINER_STOP_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, False) pytest_assert(stopped, "Failed to stop container '{}'".format(container_name)) logger.info("Container '{}' was stopped".format(container_name)) @@ -247,6 +249,7 @@ def verify_autorestart_with_critical_process(duthost, container_name, program_na logger.info("Waiting until container '{}' is restarted...".format(container_name)) restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, True) if not restarted: if is_hiting_start_limit(duthost, container_name): @@ -275,6 +278,7 @@ def verify_no_autorestart_with_non_critical_process(duthost, container_name, pro logger.info("Waiting to ensure container '{}' does not stop...".format(container_name)) stopped = wait_until(CONTAINER_STOP_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, False) pytest_assert(not stopped, "Container '{}' was stopped unexpectedly".format(container_name)) logger.info("Container '{}' did not stop".format(container_name)) @@ -333,7 +337,7 @@ def postcheck_critical_processes_status(duthost, container_autorestart_states, u if is_hiting_start_limit(duthost, container_name): clear_failed_flag_and_restart(duthost, container_name) - return wait_until(POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, + return wait_until(POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, 0, post_test_check, duthost, up_bgp_neighbors) diff --git a/tests/bgp/bgp_helpers.py b/tests/bgp/bgp_helpers.py index 48cb87d2a4..c167093930 100644 --- a/tests/bgp/bgp_helpers.py +++ b/tests/bgp/bgp_helpers.py @@ -44,7 +44,7 @@ def restart_bgp(duthost, asic_index=DEFAULT_ASIC_ID): duthost.asic_instance(asic_index).reset_service("bgp") duthost.asic_instance(asic_index).restart_service("bgp") docker_name = duthost.asic_instance(asic_index).get_docker_name("bgp") - pytest_assert(wait_until(100, 10, duthost.is_service_fully_started, docker_name), "BGP not started.") + pytest_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, docker_name), "BGP not started.") def define_config(duthost, template_src_path, template_dst_path): @@ -128,7 +128,7 @@ def get_routes_not_announced_to_bgpmon(duthost, ptfhost): """ def _dump_fie_exists(host): return host.stat(path=DUMP_FILE).get('stat', {}).get('exists', False) - pytest_assert(wait_until(120, 10, _dump_fie_exists, ptfhost)) + pytest_assert(wait_until(120, 10, 0, _dump_fie_exists, ptfhost)) time.sleep(20) # Wait until all routes announced to bgpmon bgpmon_routes = parse_exabgp_dump(ptfhost) rib_v4 = parse_rib(duthost, 4) diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py index 3527bcf6ed..20ce4c347b 100644 --- a/tests/bgp/conftest.py +++ b/tests/bgp/conftest.py @@ -137,12 +137,12 @@ def restore_nbr_gr(node=None, results=None): logger.info("bgp neighbors: {}".format(bgp_neighbors.keys())) res = True err_msg = "" - if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()): + if not wait_until(300, 10, 0, duthost.check_bgp_session_state, bgp_neighbors.keys()): res = False err_msg = "not all bgp sessions are up after enable graceful restart" is_backend_topo = "backend" in tbinfo["topo"]["name"] - if not is_backend_topo and res and not wait_until(100, 5, duthost.check_bgp_default_route): + if not is_backend_topo and res and not wait_until(100, 5, 0, duthost.check_bgp_default_route): res = False err_msg = "ipv4 or ipv6 bgp default route not available" @@ -157,7 +157,7 @@ def restore_nbr_gr(node=None, results=None): check_results(results) - if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()): + if not wait_until(300, 10, 0, duthost.check_bgp_session_state, bgp_neighbors.keys()): pytest.fail("not all bgp sessions are up after disable graceful restart") @@ -322,7 +322,7 @@ def _setup_interfaces_t0(mg_facts, peer_count): ptfhost.shell("ifconfig %s 0.0.0.0" % conn["neighbor_intf"]) @contextlib.contextmanager - def _setup_interfaces_t1(mg_facts, peer_count): + def _setup_interfaces_t1_or_t2(mg_facts, peer_count): try: connections = [] is_backend_topo = "backend" in tbinfo["topo"]["name"] @@ -410,8 +410,8 @@ def _setup_interfaces_t1(mg_facts, peer_count): setup_func = _setup_interfaces_dualtor elif tbinfo["topo"]["type"] == "t0": setup_func = _setup_interfaces_t0 - elif tbinfo["topo"]["type"] == "t1": - setup_func = _setup_interfaces_t1 + elif tbinfo["topo"]["type"] in set(["t1", "t2"]): + setup_func = _setup_interfaces_t1_or_t2 else: raise TypeError("Unsupported topology: %s" % tbinfo["topo"]["type"]) @@ -530,7 +530,7 @@ def bgpmon_setup_teardown(ptfhost, duthost, localhost, setup_interfaces): pt_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT), "Failed to start bgp monitor session on PTF") - pt_assert(wait_until(20, 5, duthost.check_bgp_session_state, [peer_addr]), 'BGP session {} on duthost is not established'.format(BGP_MONITOR_NAME)) + pt_assert(wait_until(20, 5, 0, duthost.check_bgp_session_state, [peer_addr]), 'BGP session {} on duthost is not established'.format(BGP_MONITOR_NAME)) yield # Cleanup bgp monitor diff --git a/tests/bgp/test_bgp_bbr.py b/tests/bgp/test_bgp_bbr.py index 385c558d81..d82dd436cb 100644 --- a/tests/bgp/test_bgp_bbr.py +++ b/tests/bgp/test_bgp_bbr.py @@ -265,6 +265,8 @@ def check_other_vms(nbrhosts, setup, route, accepted=True, node=None, results=No tor1_asn = setup['tor1_asn'] vm_route = nbrhosts[node]['host'].get_route(route.prefix) + if not isinstance(vm_route, dict): + logging.warn("DEBUG: unexpected vm_route type {}, {}".format(type(vm_route), vm_route)) vm_route['failed'] = False vm_route['message'] = 'Checking route {} on {} passed'.format(str(route), node) if accepted: @@ -290,10 +292,10 @@ def check_other_vms(nbrhosts, setup, route, accepted=True, node=None, results=No bgp_neighbors = json.loads(duthost.shell("sonic-cfggen -d --var-json 'BGP_NEIGHBOR'")['stdout']) # check tor1 - pytest_assert(wait_until(5, 1, check_tor1, nbrhosts, setup, route), 'tor1 check failed') + pytest_assert(wait_until(5, 1, 0, check_tor1, nbrhosts, setup, route), 'tor1 check failed') # check DUT - pytest_assert(wait_until(5, 1, check_dut, duthost, other_vms, bgp_neighbors, setup, route, accepted=accepted), 'DUT check failed') + pytest_assert(wait_until(5, 1, 0, check_dut, duthost, other_vms, bgp_neighbors, setup, route, accepted=accepted), 'DUT check failed') results = parallel_run(check_other_vms, (nbrhosts, setup, route), {'accepted': accepted}, other_vms, timeout=120) diff --git a/tests/bgp/test_bgp_fact.py b/tests/bgp/test_bgp_fact.py index e477ed0197..4b5a356174 100644 --- a/tests/bgp/test_bgp_fact.py +++ b/tests/bgp/test_bgp_fact.py @@ -5,14 +5,11 @@ pytest.mark.device_type('vs') ] -def test_bgp_facts(duthosts, enum_dut_hostname, enum_asic_index): - """compare the bgp facts between observed states and target state""" - duthost = duthosts[enum_dut_hostname] +def test_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index): + """compare the bgp facts between observed states and target state""" - # Check if duthost is 'supervisor' card, and skip the test if dealing with supervisor card. - if duthost.is_supervisor_node(): - pytest.skip("bgp_facts not valid on supervisor card '%s'" % enum_dut_hostname) + duthost = duthosts[enum_frontend_dut_hostname] bgp_facts = duthost.bgp_facts(instance_id=enum_asic_index)['ansible_facts'] namespace = duthost.get_namespace_from_asic_id(enum_asic_index) @@ -32,6 +29,8 @@ def test_bgp_facts(duthosts, enum_dut_hostname, enum_asic_index): nbrs_in_cfg_facts = {} nbrs_in_cfg_facts.update(config_facts.get('BGP_NEIGHBOR', {})) nbrs_in_cfg_facts.update(config_facts.get('BGP_INTERNAL_NEIGHBOR', {})) + # In VoQ Chassis, we would have BGP_VOQ_CHASSIS_NEIGHBOR as well. + nbrs_in_cfg_facts.update(config_facts.get('BGP_VOQ_CHASSIS_NEIGHBOR', {})) for k, v in nbrs_in_cfg_facts.items(): # Compare the bgp neighbors name with config db bgp neighbors name assert v['name'] == bgp_facts['bgp_neighbors'][k]['description'] diff --git a/tests/bgp/test_bgp_gr_helper.py b/tests/bgp/test_bgp_gr_helper.py index 66dc445c33..de8c1879b2 100644 --- a/tests/bgp/test_bgp_gr_helper.py +++ b/tests/bgp/test_bgp_gr_helper.py @@ -160,7 +160,7 @@ def _verify_prefix_counters_from_neighbor_after_graceful_restart(duthost, bgp_ne # wait till DUT enters NSF state for test_bgp_neighbor in test_bgp_neighbors: pytest_assert( - wait_until(60, 5, duthost.check_bgp_session_nsf, test_bgp_neighbor), + wait_until(60, 5, 0, duthost.check_bgp_session_nsf, test_bgp_neighbor), "neighbor {} does not enter NSF state".format(test_bgp_neighbor) ) @@ -187,7 +187,7 @@ def _verify_prefix_counters_from_neighbor_after_graceful_restart(duthost, bgp_ne # wait for exabgp sessions to establish pytest_assert( - wait_until(300, 10, test_neighbor_host.check_bgp_session_state, exabgp_ips, exabgp_sessions), + wait_until(300, 10, 0, test_neighbor_host.check_bgp_session_state, exabgp_ips, exabgp_sessions), "exabgp sessions {} are not coming back".format(exabgp_sessions) ) @@ -199,13 +199,13 @@ def _verify_prefix_counters_from_neighbor_after_graceful_restart(duthost, bgp_ne # confirm BGP session are up pytest_assert( - wait_until(300, 10, duthost.check_bgp_session_state, test_bgp_neighbors), + wait_until(300, 10, 0, duthost.check_bgp_session_state, test_bgp_neighbors), "graceful restarted bgp sessions {} are not coming back".format(test_bgp_neighbors) ) # confirm routes from the neighbor are restored pytest_assert( - wait_until(300, 10, _verify_prefix_counters_from_neighbor_after_graceful_restart, duthost, test_bgp_neighbors), + wait_until(300, 10, 0, _verify_prefix_counters_from_neighbor_after_graceful_restart, duthost, test_bgp_neighbors), "after graceful restart, Rib is not restored" ) diff --git a/tests/bgp/test_bgpmon.py b/tests/bgp/test_bgpmon.py index 52f80bfda0..918e7c1a6b 100644 --- a/tests/bgp/test_bgpmon.py +++ b/tests/bgp/test_bgpmon.py @@ -151,7 +151,7 @@ def bgpmon_peer_connected(duthost, bgpmon_peer): try: pytest_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT), "Failed to start bgp monitor session on PTF") - pytest_assert(wait_until(180, 5, bgpmon_peer_connected, duthost, peer_addr),"BGPMon Peer connection not established") + pytest_assert(wait_until(180, 5, 0, bgpmon_peer_connected, duthost, peer_addr),"BGPMon Peer connection not established") finally: ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent") ptfhost.shell("ip route del %s dev %s" % (local_addr + "/32", ptf_interface)) diff --git a/tests/cacl/test_cacl_application.py b/tests/cacl/test_cacl_application.py index 6375473fed..e5e85409ea 100644 --- a/tests/cacl/test_cacl_application.py +++ b/tests/cacl/test_cacl_application.py @@ -37,8 +37,21 @@ def docker_network(duthost): ipam_info = json.loads(output['stdout'])[0]['IPAM'] docker_network = {} - docker_network['bridge'] = {'IPv4Address' : ipam_info['Config'][0]['Gateway'], - 'IPv6Address' : ipam_info['Config'][1]['Gateway'] } + """ + FIXME: Work around dockerd issue. The Gateway entry might be missing. In that case, use 'Subnet' instead. + Sample output when docker hit the issue (Note that the IPv6 gateway is missing): + "Config": [ + { + "Subnet": "240.127.1.1/24", + "Gateway": "240.127.1.1" + }, + { + "Subnet": "fd00::/80" + } + ] + """ + docker_network['bridge'] = {'IPv4Address' : ipam_info['Config'][0].get('Gateway', ipam_info['Config'][0].get('Subnet')), + 'IPv6Address' : ipam_info['Config'][1].get('Gateway', ipam_info['Config'][1].get('Subnet')) } docker_network['container'] = {} for k,v in docker_containers_info.items(): diff --git a/tests/common/cisco_data.py b/tests/common/cisco_data.py new file mode 100644 index 0000000000..a9b6e6f9bd --- /dev/null +++ b/tests/common/cisco_data.py @@ -0,0 +1,2 @@ +def is_cisco_device(dut): + return dut.facts["asic_type"] == "cisco-8000" diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index 60151cca29..85b7e72592 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -74,4 +74,7 @@ def config_reload(duthost, config_source='config_db', wait=120, start_bgp=True, if config_source == 'config_db': duthost.shell(cmd, executable="/bin/bash") + modular_chassis = duthost.get_facts().get("modular_chassis") + wait = max(wait, 240) if modular_chassis else wait + time.sleep(wait) diff --git a/tests/common/devices/eos.py b/tests/common/devices/eos.py index 174983159f..8c091a4896 100644 --- a/tests/common/devices/eos.py +++ b/tests/common/devices/eos.py @@ -94,8 +94,11 @@ def set_interface_lacp_rate_mode(self, interface_name, mode): out = self.eos_config( lines=['lacp rate %s' % mode], parents='interface %s' % interface_name) - - if out['failed'] == True: + + # FIXME: out['failed'] will be False even when a command is deprecated, so we have to check out['changed'] + # However, if the lacp rate is already in expected state, out['changed'] will be False and treated as + # error. + if out['failed'] == True or out['changed'] == False: # new eos deprecate lacp rate and use lacp timer command out = self.eos_config( lines=['lacp timer %s' % mode], diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index f622af09cd..9b171cd118 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -57,8 +57,16 @@ def critical_services_tracking_list(self): [list]: list of the services running the device """ service_list = [] - service_list+= self._DEFAULT_SERVICES - for asic in self.asics: + active_asics = self.asics + if self.sonichost.is_supervisor_node() and self.get_facts()['asic_type'] != 'vs': + active_asics = [] + sonic_db_cli_out = self.command("sonic-db-cli CHASSIS_STATE_DB keys \"CHASSIS_ASIC_TABLE|asic*\"") + for a_asic_line in sonic_db_cli_out["stdout_lines"]: + a_asic_name = a_asic_line.split("|")[1] + a_asic_instance = self.asic_instance_from_namespace(namespace=a_asic_name) + active_asics.append(a_asic_instance) + service_list += self._DEFAULT_SERVICES + for asic in active_asics: service_list += asic.get_critical_services() self.sonichost.reset_critical_services_tracking_list(service_list) @@ -374,7 +382,11 @@ def disable_syslog_rate_limit(self, feature): services = [feature] if (feature in self.sonichost.DEFAULT_ASIC_SERVICES): - services = [asic.get_docker_name(feature) for asic in self.asics] + services = [] + for asic in self.asics: + service_name = asic.get_docker_name(feature) + if service_name in self.sonichost.critical_services: + services.append(service_name) for docker in services: cmd_disable_rate_limit = ( diff --git a/tests/common/devices/onyx.py b/tests/common/devices/onyx.py index c3f646479e..823881d135 100644 --- a/tests/common/devices/onyx.py +++ b/tests/common/devices/onyx.py @@ -92,7 +92,7 @@ def get_supported_speeds(self, interface_name): # The output should be something like: "Supported speeds:1G 10G 25G 50G" speeds = out.split(':')[-1].split() - return [x[:-1] + '000' for x in speeds] + return list(set([x.split('G')[0] + '000' for x in speeds])) def set_auto_negotiation_mode(self, interface_name, mode): """Set auto negotiation mode for a given interface diff --git a/tests/common/dualtor/control_plane_utils.py b/tests/common/dualtor/control_plane_utils.py index 52ab163b4f..27c4d58a8f 100644 --- a/tests/common/dualtor/control_plane_utils.py +++ b/tests/common/dualtor/control_plane_utils.py @@ -63,11 +63,13 @@ def _dump_db(self, db, key_pattern): command = "redis-dump -d {db} -k \"{key_pattern}\"".format( db=db, key_pattern=key_pattern) lines = self.duthost.shell(command)["stdout_lines"] - return json.loads(lines[0]) + db_dump = json.loads(lines[0]) + logger.debug(json.dumps(db_dump, indent=4)) + return db_dump def verify_db(self, db): pytest_assert( - wait_until(30, 10, self.get_mismatched_ports, db), + wait_until(30, 10, 0, self.get_mismatched_ports, db), "Database states don't match expected state {state}," "incorrect {db_name} values {db_states}" .format(state=self.state, db_name=DB_NAME_MAP[db], diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py index f59c1a5d3b..b6926fb2c1 100644 --- a/tests/common/dualtor/data_plane_utils.py +++ b/tests/common/dualtor/data_plane_utils.py @@ -4,6 +4,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import InterruptableThread from tests.common.utilities import wait_until +from tests.common.plugins.sanity_check import print_logs import threading import logging from natsort import natsorted @@ -152,7 +153,7 @@ def run_test(duthosts, activehost, ptfhost, ptfadapter, action, action() # do not time-wait the test, if early stop is not requested (when stop_after=None) if stop_after is not None: - wait_until(timeout=stop_after, interval=0.5, condition=\ + wait_until(timeout=stop_after, interval=0.5, delay=0, condition=\ lambda: not send_and_sniff.is_alive) if send_and_sniff.is_alive(): logger.info("Sender/Sniffer threads are still running. Sending signal "\ @@ -165,6 +166,7 @@ def run_test(duthosts, activehost, ptfhost, ptfadapter, action, def cleanup(ptfadapter, duthosts_list): + print_logs(duthosts_list) # cleanup torIO ptfadapter.dataplane.flush() for duthost in duthosts_list: @@ -193,7 +195,6 @@ def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo): """ arp_setup(ptfhost) - duthosts_list = [] def t1_to_server_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=None, stop_after=None): @@ -220,7 +221,6 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, Returns: data_plane_test_report (dict): traffic test statistics (sent/rcvd/dropped) """ - duthosts_list.append(activehost) tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, action, tbinfo, tor_vlan_port, send_interval, @@ -235,7 +235,7 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, yield t1_to_server_io_test - cleanup(ptfadapter, duthosts_list) + cleanup(ptfadapter, duthosts) @pytest.fixture @@ -260,7 +260,6 @@ def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo): """ arp_setup(ptfhost) - duthosts_list = [] def server_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=None, stop_after=None): @@ -286,7 +285,6 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, Returns: data_plane_test_report (dict): traffic test statistics (sent/rcvd/dropped) """ - duthosts_list.append(activehost) tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, action, tbinfo, tor_vlan_port, send_interval, @@ -301,4 +299,4 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, yield server_to_t1_io_test - cleanup(ptfadapter, duthosts_list) + cleanup(ptfadapter, duthosts) diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index c122ad7d4d..eaa108a532 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -16,13 +16,14 @@ from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.helpers.dut_ports import encode_dut_port_name from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR -from tests.common.utilities import dump_scapy_packet_show_output +from tests.common.utilities import dump_scapy_packet_show_output, get_intf_by_sub_intf import ipaddress from ptf import mask from ptf import testutils from scapy.all import Ether, IP from tests.common.helpers.generators import generate_ip_through_default_route +from tests.common import constants __all__ = ['tor_mux_intf', 'tor_mux_intfs', 'ptf_server_intf', 't1_upper_tor_intfs', 't1_lower_tor_intfs', 'upper_tor_host', 'lower_tor_host', 'force_active_tor'] @@ -121,6 +122,29 @@ def map_hostname_to_tor_side(tbinfo, hostname): return None +def get_t1_ptf_ports_for_backend_topo(mg_facts): + """ + In backend topology, there isn't any port channel between T0 and T1, + we use sub interface instead. + Args: + mg_facts (dict): mg_facts + Returns: + list: ptf t1 ports, e.g. ['eth10', 'eth11'] + """ + ptf_portmap = mg_facts['minigraph_ptf_indices'] + + ports = set() + for vlan_sub_interface in mg_facts['minigraph_vlan_sub_interfaces']: + sub_intf_name = vlan_sub_interface['attachto'] + vlan_id = vlan_sub_interface['vlan'] + intf_name = get_intf_by_sub_intf(sub_intf_name, vlan_id) + + ptf_port_index = ptf_portmap[intf_name] + ports.add("eth{}".format(ptf_port_index)) + + return list(ports) + + def get_t1_ptf_pc_ports(dut, tbinfo): """Gets the PTF portchannel ports connected to the T1 switchs.""" config_facts = dut.get_running_config_facts() @@ -141,8 +165,13 @@ def get_t1_ptf_ports(dut, tbinfo): ''' Gets the PTF ports connected to a given DUT for the first T1 ''' - pc_ports = get_t1_ptf_pc_ports(dut, tbinfo) + mg_facts = dut.get_extended_minigraph_facts(tbinfo) + is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) + + if is_backend_topology: + return get_t1_ptf_ports_for_backend_topo(mg_facts) + pc_ports = get_t1_ptf_pc_ports(dut, tbinfo) # Always choose the first portchannel portchannel = sorted(pc_ports.keys())[0] ptf_portchannel_intfs = pc_ports[portchannel] @@ -737,8 +766,11 @@ def check_nexthops_balance(rand_selected_dut, # expect this packet to be sent to downlinks (active mux) and uplink (stanby mux) expected_downlink_ports = [get_ptf_server_intf_index(rand_selected_dut, tbinfo, iface) for iface in downlink_ints] expected_uplink_ports = list() - for members in get_t1_ptf_pc_ports(rand_selected_dut, tbinfo).values(): - for member in members: + expected_uplink_portchannels = list() + portchannel_ports = get_t1_ptf_pc_ports(rand_selected_dut, tbinfo) + for pc, intfs in portchannel_ports.items(): + expected_uplink_portchannels.append(pc) + for member in intfs: expected_uplink_ports.append(int(member.strip("eth"))) logging.info("Expecting packets in downlink ports {}".format(expected_downlink_ports)) logging.info("Expecting packets in uplink ports {}".format(expected_uplink_ports)) @@ -777,17 +809,23 @@ def check_nexthops_balance(rand_selected_dut, # Hierarchical ECMP validation (in case of standby MUXs): # Step 1: Calculate total uplink share. total_uplink_share = expect_packet_num * (nexthops_count - len(expected_downlink_ports)) - # Step 2: Divide uplink share among all uplinks - expect_packet_num = total_uplink_share // len(expected_uplink_ports) + # Step 2: Divide uplink share among all portchannels + expect_packet_num = total_uplink_share // len(expected_uplink_portchannels) + pkt_num_lo = expect_packet_num * (1.0 - 0.25) + pkt_num_hi = expect_packet_num * (1.0 + 0.25) # Step 3: Check if uplink distribution (hierarchical ECMP) is balanced - for uplink_int in expected_uplink_ports: - pkt_num_lo = expect_packet_num * (1.0 - 0.25) - pkt_num_hi = expect_packet_num * (1.0 + 0.25) - count = port_packet_count.get(uplink_int, 0) - logging.info("Packets received on uplink port {}: {}".format(uplink_int, count)) + for pc, intfs in portchannel_ports.items(): + count = 0 + # Collect the packets count within a single portchannel + for member in intfs: + uplink_int = int(member.strip("eth")) + count = count + port_packet_count.get(uplink_int, 0) + logging.info("Packets received on portchannel {}: {}".format(pc, count)) + if count < pkt_num_lo or count > pkt_num_hi: balance = False - pt_assert(balance, "Hierarchical ECMP failed: packets not evenly distributed on uplink port {}".format(uplink_int)) + pt_assert(balance, "Hierarchical ECMP failed: packets not evenly distributed on portchannel {}".format( + pc)) def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, pkt_num = 100, drop = False): diff --git a/tests/common/dualtor/mux_simulator_control.py b/tests/common/dualtor/mux_simulator_control.py index cfd369ace4..97cca1542b 100644 --- a/tests/common/dualtor/mux_simulator_control.py +++ b/tests/common/dualtor/mux_simulator_control.py @@ -10,9 +10,25 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR, TOGGLE, RANDOM, NIC, DROP, OUTPUT, FLAP_COUNTER, CLEAR_FLAP_COUNTER, RESET -__all__ = ['check_simulator_read_side', 'mux_server_url', 'url', 'recover_all_directions', 'set_drop', 'set_output', 'toggle_all_simulator_ports_to_another_side', \ - 'toggle_all_simulator_ports_to_lower_tor', 'toggle_all_simulator_ports_to_random_side', 'toggle_all_simulator_ports_to_upper_tor', \ - 'toggle_simulator_port_to_lower_tor', 'toggle_simulator_port_to_upper_tor', 'toggle_all_simulator_ports', 'get_mux_status', 'reset_simulator_port'] +__all__ = [ + 'mux_server_info', + 'restart_mux_simulator', + 'mux_server_url', + 'url', + 'get_mux_status', + 'check_simulator_read_side', + 'set_output', + 'set_drop', + 'recover_all_directions', + 'reset_simulator_port', + 'toggle_all_simulator_ports_to_upper_tor', + 'toggle_all_simulator_ports_to_lower_tor', + 'toggle_all_simulator_ports_to_another_side', + 'toggle_all_simulator_ports_to_random_side', + 'toggle_simulator_port_to_upper_tor', + 'toggle_simulator_port_to_lower_tor', + 'toggle_all_simulator_ports', + ] logger = logging.getLogger(__name__) @@ -20,14 +36,15 @@ @pytest.fixture(scope='session') -def mux_server_url(request, tbinfo): - """ - A session level fixture to retrieve the address of mux simulator address +def mux_server_info(request, tbinfo): + """Fixture for getting ip, port and vmset_name of mux simulator server + Args: - request: A fixture from Ansible - tbinfo: A session level fixture + request (obj): Pytest request object + tbinfo (dict): Testbed info + Returns: - str: The address of mux simulator server + vmset_name, like http://10.0.0.64:8080/mux/vms17-8 + tuple: Tuple with items: ip, port, vmset_name. For non-dualtor testbed, returns None, None, None """ if 'dualtor' in tbinfo['topo']['name']: server = tbinfo['server'] @@ -37,9 +54,45 @@ def mux_server_url(request, tbinfo): ip = utilities.get_test_server_vars(inv_files, server).get('ansible_host') _port_map = utilities.get_group_visible_vars(inv_files, server).get('mux_simulator_http_port') port = _port_map[tbinfo['conf-name']] + return ip, port, vmset_name + return None, None, None + + +@pytest.fixture(scope='session', autouse=True) +def restart_mux_simulator(mux_server_info, vmhost): + """Session level fixture restart mux simulator server + + For dualtor testbed, it would be better to restart the mux simulator server to ensure that it is running in a + healthy state before testing. + + This is a session level and auto used fixture. + + Args: + mux_server_info (tuple): ip, port and vmset_name of mux simulator server + vmhost (obj): The test server object. + """ + ip, port, vmset_name = mux_server_info + if ip is not None and port is not None and vmset_name is not None: + vmhost.command('systemctl restart mux-simulator-{}'.format(port)) + time.sleep(5) # Wait for the mux simulator to initialize + + +@pytest.fixture(scope='session') +def mux_server_url(mux_server_info): + """ + A session level fixture to retrieve the address of mux simulator address + + Args: + mux_server_info: A session scope fixture returns ip, port and vmset_name of mux simulator server + Returns: + str: The address of mux simulator server + vmset_name, like http://10.0.0.64:8080/mux/vms17-8 + """ + ip, port, vmset_name = mux_server_info + if ip is not None and port is not None and vmset_name is not None: return "http://{}:{}/mux/{}".format(ip, port, vmset_name) return "" + @pytest.fixture(scope='module') def url(mux_server_url, duthost, tbinfo): """ @@ -110,6 +163,7 @@ def _post(server_url, data): logger.debug('POST {} with {}'.format(server_url, data)) # lgtm [py/clear-text-logging-sensitive-data] headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} resp = requests.post(server_url, json=data, headers=headers) + logger.debug('Received response {}/{} with content {}'.format(resp.status_code, resp.reason, resp.text)) return resp.status_code == 200 except Exception as e: logger.warn("POST {} with data {} failed, err: {}".format(server_url, data, repr(e))) # lgtm [py/clear-text-logging-sensitive-data] @@ -426,7 +480,7 @@ def _check_mux_status_consistency(): mg_facts = upper_tor_host.get_extended_minigraph_facts(tbinfo) port_indices = mg_facts['minigraph_port_indices'] pytest_assert( - utilities.wait_until(30, 5, _check_mux_status_consistency), + utilities.wait_until(30, 5, 0, _check_mux_status_consistency), "Mux status is inconsistent between the DUTs and mux simulator after toggle" ) diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py index 0b4001fc11..5b47d6ccba 100644 --- a/tests/common/dualtor/tunnel_traffic_utils.py +++ b/tests/common/dualtor/tunnel_traffic_utils.py @@ -143,7 +143,7 @@ def _disassemble_ip_tos(tos): check_res.append("outer packet DSCP not same as inner packet DSCP") exp_queue = derive_queue_id_from_dscp(outer_dscp) - pytest_assert(wait_until(60, 5, queue_stats_check, self.standby_tor, exp_queue)) + pytest_assert(wait_until(60, 5, 0, queue_stats_check, self.standby_tor, exp_queue)) return check_res def __init__(self, standby_tor, active_tor=None, existing=True): diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py index 8e05e25218..c91a41dc0c 100644 --- a/tests/common/fixtures/advanced_reboot.py +++ b/tests/common/fixtures/advanced_reboot.py @@ -11,6 +11,7 @@ from tests.common.reboot import reboot as rebootDut from tests.common.helpers.sad_path import SadOperation from tests.ptf_runner import ptf_runner +from tests.common.helpers.assertions import pytest_assert logger = logging.getLogger(__name__) @@ -18,6 +19,7 @@ HOST_MAX_COUNT = 126 TIME_BETWEEN_SUCCESSIVE_TEST_OPER = 420 PTFRUNNER_QLEN = 1000 +REBOOT_CASE_TIMEOUT = 1800 class AdvancedReboot: ''' @@ -329,7 +331,7 @@ def __setupTestbed(self): logger.info('Copy ARP responder to the PTF container {}'.format(self.ptfhost.hostname)) self.ptfhost.copy(src='scripts/arp_responder.py', dest='/opt') - + self.ptfhost.copy(src='scripts/dual_tor_sniffer.py', dest="/root/ptftests/advanced_reboot_sniffer.py") # Replace fast-reboot script if self.replaceFastRebootScript: logger.info('Replace fast-reboot script on DUT {}'.format(self.duthost.hostname)) @@ -422,21 +424,25 @@ def imageInstall(self, prebootList=None, inbootList=None, prebootFiles=None): def runRebootTest(self): # Run advanced-reboot.ReloadTest for item in preboot/inboot list count = 0 + result = True + failed_list = list() for rebootOper in self.rebootData['sadList']: count += 1 try: self.__setupRebootOper(rebootOper) result = self.__runPtfRunner(rebootOper) self.__verifyRebootOper(rebootOper) + except Exception: + failed_list.append(rebootOper) finally: # always capture the test logs self.__fetchTestLogs(rebootOper) self.__clearArpAndFdbTables() self.__revertRebootOper(rebootOper) - if not result: - return result if len(self.rebootData['sadList']) > 1 and count != len(self.rebootData['sadList']): time.sleep(TIME_BETWEEN_SUCCESSIVE_TEST_OPER) + pytest_assert(len(failed_list) == 0,\ + "Advanced-reboot failure. Failed cases: {}".format(failed_list)) return result def runRebootTestcase(self, prebootList=None, inbootList=None, prebootFiles=None): @@ -509,7 +515,8 @@ def __runPtfRunner(self, rebootOper=None): "setup_fdb_before_test" : True, "vnet" : self.vnet, "vnet_pkts" : self.vnetPkts, - "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff + "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff, + "asic_type": self.duthost.facts["asic_type"] } if not isinstance(rebootOper, SadOperation): @@ -539,8 +546,10 @@ def __runPtfRunner(self, rebootOper=None): platform="remote", params=params, log_file=u'/tmp/advanced-reboot.ReloadTest.log', - module_ignore_errors=self.moduleIgnoreErrors + module_ignore_errors=self.moduleIgnoreErrors, + timeout=REBOOT_CASE_TIMEOUT ) + return result def __restorePrevImage(self): diff --git a/tests/common/fixtures/duthost_utils.py b/tests/common/fixtures/duthost_utils.py index faeb080e75..959bfcb08c 100644 --- a/tests/common/fixtures/duthost_utils.py +++ b/tests/common/fixtures/duthost_utils.py @@ -1,5 +1,8 @@ import pytest import logging +import itertools +import collections + from jinja2 import Template logger = logging.getLogger(__name__) @@ -97,6 +100,7 @@ def disable_route_checker_module(duthosts, rand_one_dut_hostname): for func in _disable_route_checker(duthost): yield func + @pytest.fixture(scope='module') def disable_fdb_aging(duthost): """ @@ -134,3 +138,90 @@ def disable_fdb_aging(duthost): ] duthost.shell_cmds(cmds=cmds) duthost.file(path=TMP_SWITCH_CONFIG_FILE, state="absent") + + +@pytest.fixture(scope="module") +def ports_list(duthosts, rand_one_dut_hostname, rand_selected_dut, tbinfo): + duthost = duthosts[rand_one_dut_hostname] + cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + config_ports = {k: v for k,v in cfg_facts['PORT'].items() if v.get('admin_status', 'down') == 'up'} + config_port_indices = {k: v for k, v in mg_facts['minigraph_ptf_indices'].items() if k in config_ports} + ptf_ports_available_in_topo = {port_index: 'eth{}'.format(port_index) for port_index in config_port_indices.values()} + config_portchannels = cfg_facts.get('PORTCHANNEL', {}) + config_port_channel_members = [port_channel['members'] for port_channel in config_portchannels.values()] + config_port_channel_member_ports = list(itertools.chain.from_iterable(config_port_channel_members)) + ports = [port for port in config_ports + if config_port_indices[port] in ptf_ports_available_in_topo + and config_ports[port].get('admin_status', 'down') == 'up' + and port not in config_port_channel_member_ports] + return ports + + +@pytest.fixture(scope="module") +def vlan_ports_list(duthosts, rand_one_dut_hostname, rand_selected_dut, tbinfo, ports_list): + """ + Get configured VLAN ports + """ + duthost = duthosts[rand_one_dut_hostname] + cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + vlan_ports_list = [] + config_ports = {k: v for k,v in cfg_facts['PORT'].items() if v.get('admin_status', 'down') == 'up'} + config_portchannels = cfg_facts.get('PORTCHANNEL', {}) + config_port_indices = {k: v for k, v in mg_facts['minigraph_ptf_indices'].items() if k in config_ports} + config_ports_vlan = collections.defaultdict(list) + vlan_members = cfg_facts.get('VLAN_MEMBER', {}) + # key is dev name, value is list for configured VLAN member. + for k, v in cfg_facts['VLAN'].items(): + vlanid = v['vlanid'] + for addr in cfg_facts['VLAN_INTERFACE']['Vlan'+vlanid]: + # address could be IPV6 and IPV4, only need IPV4 here + if addr.find(':') == -1: + ip = addr + break + else: + continue + for port in v['members']: + if k in vlan_members and port in vlan_members[k]: + if 'tagging_mode' not in vlan_members[k][port]: + continue + mode = vlan_members[k][port]['tagging_mode'] + config_ports_vlan[port].append({'vlanid':int(vlanid), 'ip':ip, 'tagging_mode':mode}) + + if config_portchannels: + for po in config_portchannels: + vlan_port = { + 'dev' : po, + 'port_index' : [config_port_indices[member] for member in config_portchannels[po]['members']], + 'permit_vlanid' : [] + } + if po in config_ports_vlan: + vlan_port['pvid'] = 0 + for vlan in config_ports_vlan[po]: + if 'vlanid' not in vlan or 'ip' not in vlan or 'tagging_mode' not in vlan: + continue + if vlan['tagging_mode'] == 'untagged': + vlan_port['pvid'] = vlan['vlanid'] + vlan_port['permit_vlanid'].append(vlan['vlanid']) + if 'pvid' in vlan_port: + vlan_ports_list.append(vlan_port) + + for i, port in enumerate(ports_list): + vlan_port = { + 'dev' : port, + 'port_index' : [config_port_indices[port]], + 'permit_vlanid' : [] + } + if port in config_ports_vlan: + vlan_port['pvid'] = 0 + for vlan in config_ports_vlan[port]: + if 'vlanid' not in vlan or 'ip' not in vlan or 'tagging_mode' not in vlan: + continue + if vlan['tagging_mode'] == 'untagged': + vlan_port['pvid'] = vlan['vlanid'] + vlan_port['permit_vlanid'].append(vlan['vlanid']) + if 'pvid' in vlan_port: + vlan_ports_list.append(vlan_port) + + return vlan_ports_list diff --git a/tests/common/helpers/drop_counters/drop_counters.py b/tests/common/helpers/drop_counters/drop_counters.py index a41c4f06d1..61acc65288 100644 --- a/tests/common/helpers/drop_counters/drop_counters.py +++ b/tests/common/helpers/drop_counters/drop_counters.py @@ -94,7 +94,7 @@ def get_drops_across_all_duthosts(): drop_list.append(int(get_pkt_drops(duthost, get_cnt_cli_cmd, asic_index)[dut_iface][column_key].replace(",", ""))) return drop_list check_drops_on_dut = lambda: packets_count in get_drops_across_all_duthosts() - if not wait_until(25, 1, check_drops_on_dut): + if not wait_until(25, 1, 0, check_drops_on_dut): # The actual Drop count should always be equal or 1 or 2 packets more than what is expected due to some other drop may occur # over the interface being examined. When that happens if looking onlyu for exact count it will be a false positive failure. # So do one more check to allow up to 2 packets more dropped than what was expected as an allowed case. diff --git a/tests/common/helpers/dut_utils.py b/tests/common/helpers/dut_utils.py index 0591a77a23..8f9ebd7a1d 100644 --- a/tests/common/helpers/dut_utils.py +++ b/tests/common/helpers/dut_utils.py @@ -88,6 +88,7 @@ def clear_failed_flag_and_restart(duthost, container_name): duthost.shell("sudo systemctl start {}.service".format(container_name)) restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, True) pytest_assert(restarted, "Failed to restart container '{}' after reset-failed was cleared".format(container_name)) diff --git a/tests/common/helpers/pfc_gen.py b/tests/common/helpers/pfc_gen.py index f1d53c650f..ec78a0b022 100755 --- a/tests/common/helpers/pfc_gen.py +++ b/tests/common/helpers/pfc_gen.py @@ -122,7 +122,7 @@ def main(): pause time | 0x0000 | ------------------------- """ - src_addr = "\x01\x02\x03\x04\x05\x06" + src_addr = "\x00\x01\x02\x03\x04\x05" dst_addr = "\x01\x80\xc2\x00\x00\x01" if options.global_pf: opcode = "\x00\x01" diff --git a/tests/common/helpers/snmp_helpers.py b/tests/common/helpers/snmp_helpers.py index 85fb959abd..3dd8bb46ed 100644 --- a/tests/common/helpers/snmp_helpers.py +++ b/tests/common/helpers/snmp_helpers.py @@ -35,7 +35,7 @@ def get_snmp_facts(localhost, host, version, community, is_dell=False, module_ig global global_snmp_facts - pytest_assert(wait_until(timeout, interval, _update_snmp_facts, localhost, host, version, + pytest_assert(wait_until(timeout, interval, 0, _update_snmp_facts, localhost, host, version, community, is_dell), "Timeout waiting for SNMP facts") return global_snmp_facts diff --git a/tests/common/helpers/voq_lag.py b/tests/common/helpers/voq_lag.py index 6dbd5a46d6..49df6feeb6 100644 --- a/tests/common/helpers/voq_lag.py +++ b/tests/common/helpers/voq_lag.py @@ -91,7 +91,7 @@ def add_lag(duthost, asic, portchannel_members=None, portchannel_ip=None, pytest_assert(int_facts['ansible_interface_facts'] [portchannel]['ipv4']['address'] == portchannel_ip.split('/')[0]) - pytest_assert(wait_until(30,5, verify_lag_interface, duthost, asic, portchannel), + pytest_assert(wait_until(30,5, 0, verify_lag_interface, duthost, asic, portchannel), 'For added Portchannel {} link is not up'.format(portchannel)) @@ -209,7 +209,7 @@ def delete_lag_members_ip(duthost, asic, portchannel_members, duthost.shell("config interface {} ip remove {} {}" .format(asic.cli_ns_option, portchannel, portchannel_ip)) - pytest_assert(wait_until(30,5, verify_lag_interface, duthost, asic, portchannel, expected=False), + pytest_assert(wait_until(30,5, 0, verify_lag_interface, duthost, asic, portchannel, expected=False), 'For deleted Portchannel {} ip link is not down'.format(portchannel)) diff --git a/tests/common/mellanox_data.py b/tests/common/mellanox_data.py index 3929105a72..910e7c9df1 100644 --- a/tests/common/mellanox_data.py +++ b/tests/common/mellanox_data.py @@ -1,11 +1,16 @@ -SPC1_HWSKUS = ["ACS-MSN2700", "Mellanox-SN2700", "Mellanox-SN2700-D48C8", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010"] +SPC1_HWSKUS = ["ACS-MSN2700", "Mellanox-SN2700", "Mellanox-SN2700-D48C8", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", + "ACS-MSN2010", "ACS-MSN2201"] SPC2_HWSKUS = ["ACS-MSN3700", "ACS-MSN3700C", "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420"] SPC3_HWSKUS = ["ACS-MSN4700", "ACS-MSN4600C", "ACS-MSN4410", "ACS-MSN4600"] SWITCH_HWSKUS = SPC1_HWSKUS + SPC2_HWSKUS + SPC3_HWSKUS +PSU_CAPABILITIES = [ + ['psu{}_curr', 'psu{}_curr_in', 'psu{}_power', 'psu{}_power_in', 'psu{}_volt', 'psu{}_volt_in', 'psu{}_volt_out'], + ['psu{}_curr', 'psu{}_curr_in', 'psu{}_power', 'psu{}_power_in', 'psu{}_volt', 'psu{}_volt_out2'] +] SWITCH_MODELS = { - "x86_64-mlnx_msn2700-r0": { + "x86_64-mlnx_msn2201-r0": { "chip_type": "spectrum1", "reboot": { "cold_reboot": True, @@ -26,6 +31,58 @@ "cpu_cores": { "number": 2 }, + "ports": { + "number": 52 + }, + "thermals": { + "cpu_core": { + "start": 0, + "number": 2 + }, + "module": { + "start": 1, + "number": 52 + }, + "psu": { + "start": 1, + "number": 2 + }, + "cpu_pack": { + "number": 1 + }, + "asic_ambient": { + "number": 1 + }, + "port_ambient": { + "number": 1 + }, + "fan_ambient": { + "number": 1 + } + } + }, + "x86_64-mlnx_msn2700-r0": { + "chip_type": "spectrum1", + "reboot": { + "cold_reboot": True, + "fast_reboot": True, + "warm_reboot": True + }, + "fans": { + "number": 4, + "hot_swappable": True + }, + "psus": { + "number": 2, + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[0] + }, + "cpu_pack": { + "number": 1 + }, + "cpu_cores": { + "number": 2 + }, "ports": { "number": 32 }, @@ -69,7 +126,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[0] }, "cpu_pack": { "number": 0 @@ -117,7 +175,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[0] }, "cpu_pack": { "number": 1 @@ -256,7 +315,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -314,7 +374,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -368,7 +429,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -422,7 +484,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -476,7 +539,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -530,7 +594,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -584,7 +649,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 @@ -638,7 +704,8 @@ }, "psus": { "number": 2, - "hot_swappable": True + "hot_swappable": True, + "capabilities": PSU_CAPABILITIES[1] }, "cpu_pack": { "number": 1 diff --git a/tests/common/platform/processes_utils.py b/tests/common/platform/processes_utils.py index 3e3d71d08f..6cd45e29d7 100644 --- a/tests/common/platform/processes_utils.py +++ b/tests/common/platform/processes_utils.py @@ -44,6 +44,6 @@ def wait_critical_processes(dut): @param dut: The AnsibleHost object of DUT. For interacting with DUT. """ logging.info("Wait until all critical processes are healthy") - pytest_assert(wait_until(300, 20, _all_critical_processes_healthy, dut), + pytest_assert(wait_until(300, 20, 0, _all_critical_processes_healthy, dut), "Not all critical processes are healthy") diff --git a/tests/common/plugins/conditional_mark/README.md b/tests/common/plugins/conditional_mark/README.md new file mode 100644 index 0000000000..5fef2b996d --- /dev/null +++ b/tests/common/plugins/conditional_mark/README.md @@ -0,0 +1,145 @@ +# Conditional Mark + +This is a plugin for adding any mark to specified test cases based on conditions in a centralized file. + +The centralized file can be supplied in pytest command line option `--mark-conditions-file`. If no conditions file is specified, use the default conditions file located at `tests/common/plugins/conditional_mark/test_mark_conditions.yaml`. + + +## How it works +This plugin works at the collection stage of pytest. It mainly uses two pytest hook function: + * `pytest_collection` + * `pytest_collection_modifyitems` + +In `pytest_collection` hook function, it reads the specified conditions file and collect some basic facts that can be used in condition evaluation. The loaded information is stored in pytest object `session.config.cache`. + +In `pytest_collection_modifyitems`, it checks each collected test item (test case). For each item, it searches for the longest match test case name defined in the conditions content. If a match is found, then it will add the marks specified for this case based on conditions for each of the marks. + +## Format of the conditions file + +The conditions file must be a yaml file. First level of key should be test case name. Parametrized test case name is supported. +Second level of should be mark name that can be added for the test case. Any mark name is supported. For example, we can specify marks like `skip` or `xfail`. +Third level supports two type of keys: +* `reason`: Optional string text. It's for specifying reason of adding this mark. +* `strict`: Optional bool. It is only valid for `xfail` mark. For other marks, it will just be ignored. +* `conditions`: Its value can be a string or list of strings. The condition string should can be evaluated using python's `eval()` function. Issue URL is supported in the condition string. The plugin will query the issue website to get state of the issue. Then in the condition string, issue URLs will be replaced with either `True` or `False` based on its state. When getting issue state failed, it will always be considered as active. And the URL will be replaced as `True`. If this field is a list of condition strings, all the condition evaluation result is combined using `AND` logical operation. + +Example conditions: +``` +folder1/test_file1.py::test_case1: + skip: + reason: "skip file1/case1" + conditions: + - "release in ['master'] or asic_type=='vs'" + - https://github.com/Azure/sonic-mgmt/issues/1234 + - https://github.com/Azure/sonic-mgmt/issues/1235 +folder1/test_file1.py::test_case2[2+4-6]: + skip: + reason: "test file1/case2[2+4-6] skip" +folder1/test_file1.py: + fallback_mark: + reason: "For rest of the test cases in folder1/test_file1.py, add this fallback_mark unconditionally" +folder2/test_file2.py::TestMarkers::test_case1: + xfail: + reason: "test file2/case1 xfail" + conditions: + - https://github.com/Azure/sonic-mgmt/issues/1235 and topo_name == 't1-lag' + - # Empty condition will be ignored. Equivalent to True. +folder2/test_file2.py::TestMarkers::test_case2: + xfail: + reason: "test file2/case2 strict xfail" + strict: + conditions: # Empty conditions will be evaluated to True. It means no condition. +folder2/test_file2.py::TestMarkers::test_case3: + any_mark_is_supported: + reason: "Example for adding any mark to tests" + conditions: "build_number == 36262" +folder3: + skip: + reason: "Skip all the test scripts under subfolder 'folder3'" +``` + +## Longest match rule + +This plugin process each expanded (for parametrized test cases) test cases one by one. For each test case, the marks specified in the longest match entry in the conditions file will take precedence. + +Then we can easily apply a set of marks for specific test case in a script file and another set of marks for rest of the test cases in the same script file. + +Assume we have conditions like below: +``` +feature_a/test_file_1.py: + skip: + reason: "all testcases in test_file_1.py should be skipped for 201911 image" + conditions: + - "release in ['201911']" +feature_a/test_file_1.py::testcase_3: + xfail: + reason: "testcase_i are suppose to fail because an issue" + conditions: + - https://github.com/Azure/sonic-mgmt/issues/1234 +``` + +And assume we have below test script: + +feature_a/test_file_1.py: +``` +def testcase_1 + +def testcase_2 + +def testcase_3 +``` +In this example, `testcase_1` and `testcase_2` will have nodeid like `feature_a/test_file_1.py::testcase_1` and `feature_a/test_file_1.py::testcase_2`. They will match entry `feature_a/test_file_1.py`. So, the `skip` mark will be added to `testcase_1` and `testcase_2` when `release in ['201911']`. +For `testcase_3`, its nodeid will be `feature_a/test_file_1.py::testcase_3`. Then it will only match `feature_a/test_file_1.py::testcase_3`. The `xfail` mark will be added to `testcase_3` when the Github issue is still open. Entry `feature_a/test_file_1.py` also matches its nodeid. But, because it is not the longest match, it will simply be ignored. + +In a summary, under such scenario, the `skip` mark will be conditionally added to `testcase_1` and `testcase_2`. The `xfail` mark will be conditionally added to `testcase_3`. + +If a test case is parameterized, we can even specify different mark for different parameter value combinations for the same test case. + +## Example variables can be used in condition string: + +Example variables can be used in condition string: +``` + { + "commit_id": "db529af20", + "build_date": "Mon Sep 13 17:41:03 UTC 2021", + "sonic_utilities": 1.2, + "kernel_version": "4.19.0-12-2-amd64", + "debian_version": "10.10", + "built_by": "AzDevOps@sonic-build-workers-000OU4", + "libswsscommon": "1.0.0", + "build_version": "master.36262-db529af20", + "branch": "master", + "release": "master", + "topo_type": "t0", + "topo_name": "t0" + "platform": "x86_64-kvm_x86_64-r0", + "hwsku": "Force10-S6000", + "build_number": 36262, + "asic_type": "vs", + "num_asic": 1, + "is_multi_asic": False, + } +``` + +## New pytest options +A new pytest command line option is added for specifying location of the conditions file. If the option is not supplied, default conditions file located at `tests/common/plugins/conditional_mark/test_mark_conditions.yaml` will be used. +``` + parser.addoption( + '--mark-conditions-file', + action='store', + dest='mark_conditions_file', + default='', + help="Location of your own mark conditions file. If it is not specified, the default file will be used.") + + parser.addoption( + '--ignore-conditional-mark', + action='store_true', + dest='ignore_conditional_mark', + default=False, + help="Ignore the conditional mark plugin. No conditional mark will be added.") +``` + +## Possible extensions +The plugin is open for extension in couple of areas: +* Collect more facts. Then more variables can be used in condition string for evaluation. +* Add more arguments for marks, not just the current `reason` argument. diff --git a/tests/common/plugins/conditional_mark/__init__.py b/tests/common/plugins/conditional_mark/__init__.py new file mode 100644 index 0000000000..11c3ee9328 --- /dev/null +++ b/tests/common/plugins/conditional_mark/__init__.py @@ -0,0 +1,307 @@ +"""Plugin for adding any mark to specified test cases based on conditions in a centralized file. + +This plugin supports adding any mark to specified test cases based on conditions. All the information of test cases, +marks, and conditions can be specified in a centralized file. +""" +import imp +import json +import logging +import os +import re +import subprocess +import yaml + +import pytest + +from issue import check_issues + +logger = logging.getLogger(__name__) + +DEFAULT_CONDITIONS_FILE = 'common/plugins/conditional_mark/tests_mark_conditions.yaml' + + +def pytest_addoption(parser): + """Add options for the conditional mark plugin. + """ + parser.addoption( + '--mark-conditions-file', + action='store', + dest='mark_conditions_file', + default='', + help="Location of your own mark conditions file. If it is not specified, the default file will be used.") + + parser.addoption( + '--ignore-conditional-mark', + action='store_true', + dest='ignore_conditional_mark', + default=False, + help="Ignore the conditional mark plugin. No conditional mark will be added.") + + +def load_conditions(session): + """Load the content from mark conditions file + + Args: + session (obj): The pytest session object. + + Returns: + dict or None: Return the mark conditions dict or None if there something went wrong. + """ + conditions_file = session.config.option.mark_conditions_file + if not conditions_file: + conditions_file = DEFAULT_CONDITIONS_FILE + + if not os.path.exists(conditions_file): + # No conditions file supplied, skip adding conditional marks + return None + + try: + with open(conditions_file) as f: + logger.debug('Loaded tests skip conditions from {}'.format(conditions_file)) + return yaml.safe_load(f) + except Exception as e: + logger.error('Failed to load {}, exception: {}'.format(conditions_file, repr(e)), exc_info=True) + + return None + + +def load_dut_basic_facts(session): + """Run 'ansible -m dut_basic_facts' command to get some basic DUT facts. + + The facts will be a 1 level dictionary. The dict keys can be used as variables in condition statements evaluation. + + Args: + session (obj): The pytest session object. + + Returns: + dict or None: Return the dut basic facts dict or None if something went wrong. + """ + results = {} + logger.info('Getting dut basic facts') + try: + testbed_name = session.config.option.testbed + testbed_file = session.config.option.testbed_file + + testbed_module = imp.load_source('testbed', 'common/testbed.py') + tbinfo = testbed_module.TestbedInfo(testbed_file).testbed_topo.get(testbed_name, None) + + results['topo_type'] = tbinfo['topo']['type'] + results['topo_name'] = tbinfo['topo']['name'] + + dut_name = tbinfo['duts'][0] + inv_name = tbinfo['inv_name'] + ansible_cmd = 'ansible -m dut_basic_facts -i ../ansible/{} {} -o'.format(inv_name, dut_name) + + raw_output = subprocess.check_output(ansible_cmd.split()).decode('utf-8') + logger.debug('raw dut basic facts:\n{}'.format(raw_output)) + output_fields = raw_output.split('SUCCESS =>', 1) + if len(output_fields) >= 2: + results.update(json.loads(output_fields[1].strip())['ansible_facts']['dut_basic_facts']) + except Exception as e: + logger.error('Failed to load dut basic facts, exception: {}'.format(repr(e))) + + return results + + +def load_basic_facts(session): + """Load some basic facts that can be used in condition statement evaluation. + + The facts will be a 1 level dictionary. The dict keys can be used as variables in condition statements evaluation. + + Args: + session (obj): Pytest session object. + + Returns: + dict: Dict of facts. + """ + results = {} + + # Load DUT basic facts + _facts = load_dut_basic_facts(session) + if _facts: + results.update(_facts) + + # Load possible other facts here + + return results + + +def find_longest_match(nodeid, case_names): + """Find the longest match of the given test case name in the case_names list. + + This is similar to longest prefix match in routing table. The longest match takes precedence. + + Args: + nodeid (str): Full test case name + case_names (list): List of test case names + + Returns: + str: Longest match test case name or None if not found + """ + longest_match = None + max_length = -1 + for case_name in case_names: + if nodeid.startswith(case_name): + length = len(case_name) + if length > max_length: + max_length = length + longest_match = case_name + return longest_match + + +def update_issue_status(condition_str): + """Replace issue URL with 'True' or 'False' based on its active state. + + If there is an issue URL is found, this function will try to query state of the issue and replace the URL + in the condition string with 'True' or 'False' based on its active state. + + The issue URL may be Github, Jira, Redmine, etc. + + Args: + condition_str (str): Condition string that may contain issue URLs. + + Returns: + str: New condition string with issue URLs already replaced with 'True' or 'False'. + """ + issues = re.findall('https?://[^ ]+', condition_str) + if not issues: + logger.debug('No issue specified in condition') + return condition_str + + results = check_issues(issues) + + for issue_url in issues: + if issue_url in results: + replace_str = str(results[issue_url]) + else: + # Consider the issue as active anyway if unable to get issue state + replace_str = 'True' + + condition_str = condition_str.replace(issue_url, replace_str) + return condition_str + + +def evaluate_condition(condition, basic_facts): + """Evaluate a condition string based on supplied basic facts. + + Args: + condition (str): A raw condition string that can be evaluated using python "eval()" function. The raw condition + string may contain issue URLs that need further processing. + basic_facts (dict): A one level dict with basic facts. Keys of the dict can be used as variables in the + condition string evaluation. + + Returns: + bool: True or False based on condition string evaluation result. + """ + if condition is None or condition.strip() == '': + return True # Empty condition item will be evaluated as True. Equivalent to be ignored. + + condition_str = update_issue_status(condition) + try: + return bool(eval(condition_str, basic_facts)) + except Exception as e: + logger.error('Failed to evaluate condition, raw_condition={}, condition_str={}'.format( + condition, + condition_str)) + return False + + +def evaluate_conditions(conditions, basic_facts): + """Evaluate all the condition strings. + + Evaluate a single condition or multiple conditions. If multiple conditions are supplied, apply AND logical operation + to all of them. + + Args: + conditions (str or list): Condition string or list of condition strings. + basic_facts (dict): A one level dict with basic facts. Keys of the dict can be used as variables in the + condition string evaluation. + + Returns: + bool: True or False based on condition strings evaluation result. + """ + if isinstance(conditions, list): + # Apply 'AND' operation to list of conditions + # Personally, I think it makes more sense to apply 'AND' logical operation to a list of conditions. + return all([evaluate_condition(c, basic_facts) for c in conditions]) + else: + if conditions is None or conditions.strip() == '': + return True + return evaluate_condition(conditions, basic_facts) + + +def pytest_collection(session): + """Hook for loading conditions and basic facts. + + The pytest session.config.cache is used for caching loaded conditions and basic facts for later use. + + Args: + session (obj): Pytest session object. + """ + + # Always clear cached conditions and basic facts of previous run. + session.config.cache.set('TESTS_MARK_CONDITIONS', None) + session.config.cache.set('BASIC_FACTS', None) + + if session.config.option.ignore_conditional_mark: + logger.info('Ignore conditional mark') + return + + conditions = load_conditions(session) + if conditions: + session.config.cache.set('TESTS_MARK_CONDITIONS', conditions) + + # Only load basic facts if conditions are defined. + basic_facts = load_basic_facts(session) + session.config.cache.set('BASIC_FACTS', basic_facts) + + +def pytest_collection_modifyitems(session, config, items): + """Hook for adding marks to test cases based on conditions defind in a centralized file. + + Args: + session (obj): Pytest session object. + config (obj): Pytest config object. + items (obj): List of pytest Item objects. + """ + + conditions = config.cache.get('TESTS_MARK_CONDITIONS', None) + if not conditions: + logger.debug('No mark condition is defined') + return + logger.debug('Predefined mark conditions\n{}'.format(json.dumps(conditions, indent=2))) + + basic_facts = config.cache.get('BASIC_FACTS', None) + if not basic_facts: + logger.debug('No basic facts') + return + logger.info('Available basic facts that can be used in conditional skip:\n{}'.format( + json.dumps(basic_facts, indent=2))) + + for item in items: + logger.info('Processing: {}'.format(item.nodeid)) + longest_match = find_longest_match(item.nodeid, conditions.keys()) + if longest_match: + logger.debug('Found match "{}" for test case "{}"'.format(longest_match, item.nodeid)) + + for mark_name, mark_details in conditions[longest_match].items(): + + add_mark = False + mark_conditions = mark_details.get('conditions', None) + if not mark_conditions: + # Unconditionally add mark + add_mark = True + else: + add_mark = evaluate_conditions(mark_conditions, basic_facts) + + if add_mark: + reason = mark_details.get('reason', '') + + if mark_name == 'xfail': + strict = mark_details.get('strict', False) + mark = getattr(pytest.mark, mark_name)(reason=reason, strict=strict) + else: + mark = getattr(pytest.mark, mark_name)(reason=reason) + + logger.debug('Adding mark {} to {}'.format(mark, item.nodeid)) + item.add_marker(mark) diff --git a/tests/common/plugins/conditional_mark/credentials.yaml b/tests/common/plugins/conditional_mark/credentials.yaml new file mode 100644 index 0000000000..610627b44d --- /dev/null +++ b/tests/common/plugins/conditional_mark/credentials.yaml @@ -0,0 +1,5 @@ +# Below is example - user should uncomment it and replace data with correct credentials + +#GitHub: +# user: example_user +# api_token: example_token diff --git a/tests/common/plugins/conditional_mark/issue.py b/tests/common/plugins/conditional_mark/issue.py new file mode 100644 index 0000000000..3f2d86deb5 --- /dev/null +++ b/tests/common/plugins/conditional_mark/issue.py @@ -0,0 +1,137 @@ +"""For checking issue state based on supplied issue URL. +""" +import logging +import multiprocessing +import os +import re +import yaml + +import requests + +from abc import ABCMeta, abstractmethod + +logger = logging.getLogger(__name__) + +CREDENTIALS_FILE = 'credentials.yaml' + + +class IssueCheckerBase(object): + """Base class for issue checker + """ + __metaclass__ = ABCMeta + + def __init__(self, url): + self.url = url + + @abstractmethod + def is_active(self): + """ + Check if the issue is still active + """ + return True + + +class GitHubIssueChecker(IssueCheckerBase): + """GitHub issue state checker + """ + + NAME = 'GitHub' + + def __init__(self, url): + super(GitHubIssueChecker, self).__init__(url) + self.user = '' + self.api_token = '' + self.api_url = url.replace('github.com', 'api.github.com/repos') + self.get_cred() + + def get_cred(self): + """Get GitHub API credentials + """ + creds_folder_path = os.path.dirname(__file__) + creds_file_path = os.path.join(creds_folder_path, CREDENTIALS_FILE) + try: + with open(creds_file_path) as creds_file: + creds = yaml.safe_load(creds_file).get(self.NAME) + self.user = creds.get('user', '') + self.api_token = creds.get('api_token', '') + except Exception as e: + logger.error('Load credentials from {} failed'.format(creds_file_path)) + + def is_active(self): + """Check if the issue is still active. + + If unable to get issue state, always consider it as active. + + Returns: + bool: False if the issue is closed else True. + """ + try: + response = requests.get(self.api_url, auth=(self.user, self.api_token)) + response.raise_for_status() + issue_data = response.json() + if issue_data.get('state', '') == 'closed': + logger.debug('Issue {} is closed'.format(self.url)) + labels = issue_data.get('labels', []) + if any(['name' in label and 'duplicate' in label['name'].lower() for label in labels]): + logger.warning('GitHub issue: {} looks like duplicate and was closed. Please re-check and ignore' + 'the test on the parent issue'.format(self.url)) + return False + except Exception as e: + logger.error('Get details for {} failed with: {}'.format(self.url, repr(e))) + + logger.debug('Issue {} is active. Or getting issue state failed, consider it as active anyway'.format(self.url)) + return True + + +def issue_checker_factory(url): + """Factory function for creating issue checker object based on the domain name in the issue URL. + + Args: + url (str): Issue URL. + + Returns: + obj: An instance of issue checker. + """ + m = re.match('https?://([^/]+)', url) + if m and len(m.groups()) > 0: + domain_name = m.groups()[0].lower() + if 'github' in domain_name: + return GitHubIssueChecker(url) + else: + logger.error('Unknown issue website: {}'.format(domain_name)) + logger.error('Creating issue checker failed. Bad issue url {}'.format(url)) + return None + + +def check_issues(issues): + """Check state of the specified issues. + + Because issue state checking may involve sending HTTP request. This function uses parallel run to speed up + issue status checking. + + Args: + issues (list of str): List of issue URLs. + + Returns: + dict: Issue state check result. Key is issue URL, value is either True or False based on issue state. + """ + checkers = [c for c in [issue_checker_factory(issue) for issue in issues] if c is not None] + if not checkers: + logger.error('No checker created for issues: {}'.format(issues)) + return {} + + check_results = multiprocessing.Manager().dict() + check_procs = [] + + def _check_issue(checker, results): + results[checker.url] = checker.is_active() + + for checker in checkers: + check_procs.append(multiprocessing.Process(target=_check_issue, args=(checker, check_results,))) + + for proc in check_procs: + proc.start() + for proc in check_procs: + proc.join(timeout=60) + + return check_results diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml new file mode 100644 index 0000000000..b8028962ed --- /dev/null +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -0,0 +1,39 @@ +drop_packets/test_drop_counters.py::test_loopback_filter: + # Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings. + # Default SONiC behavior is to forward the traffic, so loop-back filter does not triggers for IP packets. + # All router interfaces has attribute "sx_interface_attributes_t.loopback_enable" - enabled. + # To enable loop-back filter drops - need to disable that attribute when create RIF. + # To do this can be used SAI attribute SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION, which is not exposed to SONiC + skip: + reason: "SONiC can't enable loop-back filter feature" + +platform_tests/test_sequential_restart.py::test_restart_syncd: + skip: + reason: "Restarting syncd is not supported yet" + +platform_tests/fwutil/test_fwutil.py::test_fwutil_auto: + skip: + reason: "Command not yet merged into sonic-utilites" + +qos/test_pfc_pause.py::test_pfc_pause_lossless: + # For this test, we use the fanout connected to the DUT to send PFC pause frames. + # The fanout needs to send PFC frames fast enough so that the queue remains completely paused for the entire duration + # of the test. The inter packet interval between PFC frames to completely block a queue vary based on link speed and + # we have seen flakiness in our test runs. Since this test is already covered under the 'ixia' folder where we use a + # traffic generator to generate pause frames, skipping this here. + skip: + reason: "Fanout needs to send PFC frames fast enough to completely pause the queue" + +route/test_static_route.py::test_static_route_ecmp_ipv6: + # This test case may fail due to a known issue https://github.com/Azure/sonic-buildimage/issues/4930. + # Temporarily disabling the test case due to the this issue. + skip: + reason: "Test case may fail due to a known issue" + conditions: https://github.com/Azure/sonic-buildimage/issues/4930 + +ssh/test_ssh_stress.py::test_ssh_stress: + # This test is not stable, skip it for now. + # known issue: https://github.com/paramiko/paramiko/issues/1508 + skip: + reason: "This test failed intermittent due to known issue of paramiko, skip for now" + conditions: https://github.com/paramiko/paramiko/issues/1508 diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml.example b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml.example new file mode 100644 index 0000000000..cbe13137a0 --- /dev/null +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml.example @@ -0,0 +1,28 @@ +# Examples +folder1/test_file1.py::test_case1: + skip: + reason: "skip file1/case1" + conditions: + - "release in ['master'] or asic_type=='vs'" + - https://github.com/Azure/sonic-mgmt/issues/1234 + - https://github.com/Azure/sonic-mgmt/issues/1235 +folder1/test_file1.py::test_case2[2+4-6]: + skip: + reason: "test file1/case2[2+4-6] skip" +folder1/test_file1.py: + fallback_mark: + reason: "For rest of the test cases in folder1/test_file1.py, add this fallback_mark unconditionally" +folder2/test_file2.py::TestMarkers::test_case1: + xfail: + reason: "test file2/case1 xfail" + conditions: + - https://github.com/Azure/sonic-mgmt/issues/1235 and topo_name == 't1-lag' +folder2/test_file2.py::TestMarkers::test_case2: + any_mark_is_supported: + reason: "Example for adding any mark to tests" + conditions: "build_number == 36262" +folder3: + skip: + reason: "Skip all the test scripts under subfolder 'folder3'" + +# Add your own conditions below diff --git a/tests/common/plugins/ptfadapter/__init__.py b/tests/common/plugins/ptfadapter/__init__.py index 0381eb09fb..6a20caf811 100644 --- a/tests/common/plugins/ptfadapter/__init__.py +++ b/tests/common/plugins/ptfadapter/__init__.py @@ -6,12 +6,14 @@ import ptf.testutils from tests.common import constants +import random -DEFAULT_PTF_NN_PORT = 10900 +DEFAULT_PTF_NN_PORT_RANGE = [10900, 11000] DEFAULT_DEVICE_NUM = 0 ETH_PFX = 'eth' ETHERNET_PFX = "Ethernet" +MAX_RETRY_TIME = 3 def pytest_addoption(parser): @@ -118,26 +120,36 @@ def ptfadapter(ptfhost, tbinfo, request): ifaces = get_ifaces(res['stdout']) ifaces_map = get_ifaces_map(ifaces, ptf_port_mapping_mode) - # generate supervisor configuration for ptf_nn_agent - ptfhost.host.options['variable_manager'].extra_vars.update({ - 'device_num': DEFAULT_DEVICE_NUM, - 'ptf_nn_port': DEFAULT_PTF_NN_PORT, - 'ifaces_map': ifaces_map, - }) + def start_ptf_nn_agent(): + for i in range(MAX_RETRY_TIME): + ptf_nn_port = random.randint(*DEFAULT_PTF_NN_PORT_RANGE) - current_file_dir = os.path.dirname(os.path.realpath(__file__)) + # generate supervisor configuration for ptf_nn_agent + ptfhost.host.options['variable_manager'].extra_vars.update({ + 'device_num': DEFAULT_DEVICE_NUM, + 'ptf_nn_port': ptf_nn_port, + 'ifaces_map': ifaces_map, + }) + current_file_dir = os.path.dirname(os.path.realpath(__file__)) + ptfhost.template(src=os.path.join(current_file_dir, 'templates/ptf_nn_agent.conf.ptf.j2'), + dest='/etc/supervisor/conf.d/ptf_nn_agent.conf') + + # reread configuration and update supervisor + ptfhost.command('supervisorctl reread') + ptfhost.command('supervisorctl update') - ptfhost.template(src=os.path.join(current_file_dir, 'templates/ptf_nn_agent.conf.ptf.j2'), - dest='/etc/supervisor/conf.d/ptf_nn_agent.conf') + # Force a restart of ptf_nn_agent to ensure that it is in good status. + ptfhost.command('supervisorctl restart ptf_nn_agent') - # reread configuration and update supervisor - ptfhost.command('supervisorctl reread') - ptfhost.command('supervisorctl update') + # check whether ptf_nn_agent starts successfully + if "RUNNING" in ptfhost.command('supervisorctl status ptf_nn_agent', module_ignore_errors=True)["stdout_lines"][0]: + return ptf_nn_port + return None - # Force a restart of ptf_nn_agent to ensure that it is in good status. - ptfhost.command('supervisorctl restart ptf_nn_agent') + ptf_nn_agent_port = start_ptf_nn_agent() + assert ptf_nn_agent_port is not None - with PtfTestAdapter(tbinfo['ptf_ip'], DEFAULT_PTF_NN_PORT, 0, ifaces_map.keys(), ptfhost) as adapter: + with PtfTestAdapter(tbinfo['ptf_ip'], ptf_nn_agent_port, 0, ifaces_map.keys(), ptfhost) as adapter: if not request.config.option.keep_payload: override_ptf_functions() node_id = request.module.__name__ @@ -171,16 +183,31 @@ def nbr_ptfadapter(request, nbrhosts, nbr_device_numbers, ptfadapter): res = host.command('cat /proc/net/dev') ifaces = get_ifaces(res['stdout']) ifaces_map = {int(ifname.replace(ETHERNET_PFX, '')): ifname for ifname in ifaces if ifname.startswith(ETHERNET_PFX)} - host.host.options['variable_manager'].extra_vars.update({ - 'device_num': nbr_device_numbers[name], - 'ptf_nn_port': DEFAULT_PTF_NN_PORT, - 'ifaces_map': ifaces_map, - }) - host.template(src=os.path.join(current_file_dir, 'templates/ptf_nn_agent.conf.ptf.j2'), - dest='/tmp/ptf_nn_agent.conf') - host.shell('docker rm -f ptf || true') - host.shell('docker run -dt --network=host --rm --name ptf -v /tmp/ptf_nn_agent.conf:/etc/supervisor/conf.d/ptf_nn_agent.conf docker-ptf') - ptf_nn_sock_addr = 'tcp://{}:{}'.format(host.facts["mgmt_interface"][0], DEFAULT_PTF_NN_PORT) + + def start_ptf_nn_agent(): + for i in range(MAX_RETRY_TIME): + ptf_nn_port = random.randint(*DEFAULT_PTF_NN_PORT_RANGE) + + host.host.options['variable_manager'].extra_vars.update({ + 'device_num': nbr_device_numbers[name], + 'ptf_nn_port': ptf_nn_port, + 'ifaces_map': ifaces_map, + }) + host.template(src=os.path.join(current_file_dir, 'templates/ptf_nn_agent.conf.ptf.j2'), + dest='/tmp/ptf_nn_agent.conf') + host.shell('docker rm -f ptf || true') + host.shell('docker run -dt --network=host --rm --name ptf -v /tmp/ptf_nn_agent.conf:/etc/supervisor/conf.d/ptf_nn_agent.conf docker-ptf') + + #Maybe the threads in this docker are not ready and may return None + if "RUNNING" in host.shell('docker exec ptf supervisorctl status ptf_nn_agent')["stdout_lines"][0]: + return ptf_nn_port + return None + + ptf_nn_agent_port = start_ptf_nn_agent() + assert ptf_nn_agent_port is not None + + ptf_nn_sock_addr = 'tcp://{}:{}'.format(host.facts["mgmt_interface"][0], ptf_nn_agent_port) device_sockets.append((nbr_device_numbers[name], ifaces_map, ptf_nn_sock_addr)) + ptfadapter.reinit({"device_sockets": device_sockets}) return ptfadapter diff --git a/tests/common/plugins/ptfadapter/ptfadapter.py b/tests/common/plugins/ptfadapter/ptfadapter.py index 0d6ccd30fe..051f36b19a 100644 --- a/tests/common/plugins/ptfadapter/ptfadapter.py +++ b/tests/common/plugins/ptfadapter/ptfadapter.py @@ -59,7 +59,7 @@ def _check_ptf_nn_agent_availability(self, socket_addr): sock = nnpy.Socket(nnpy.AF_SP, nnpy.PAIR) sock.connect(socket_addr) try: - return wait_until(1, 0.2, lambda:sock.get_statistic(self.NN_STAT_CURRENT_CONNECTIONS) == 1) + return wait_until(1, 0.2, 0, lambda:sock.get_statistic(self.NN_STAT_CURRENT_CONNECTIONS) == 1) finally: sock.close() diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index 38156386ca..6e0c354c37 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -188,6 +188,18 @@ def _check_bgp_on_dut(*args, **kwargs): def _check_bgp_status_helper(): asic_check_results = [] bgp_facts = dut.bgp_facts(asic_index='all') + + # Conditions to fail BGP check + # 1. No BGP neighbor. + # 2. Any BGP neighbor down. + # 3. Failed to get BGP status (In theory, this should be protected by previous check, but adding this check + # here will make BGP check more robust, and it is necessary since many operations highly depends on + # the BGP status) + + if len(bgp_facts) == 0: + logger.info("Failed to get BGP status on host %s ..." % dut.hostname) + asic_check_results.append(True) + for asic_index, a_asic_facts in enumerate(bgp_facts): a_asic_result = False a_asic_neighbors = a_asic_facts['ansible_facts']['bgp_neighbors'] @@ -226,7 +238,7 @@ def _check_bgp_status_helper(): networking_uptime = dut.get_networking_uptime().seconds timeout = max(SYSTEM_STABILIZE_MAX_TIME - networking_uptime, 1) interval = 20 - wait_until(timeout, interval, _check_bgp_status_helper) + wait_until(timeout, interval, 0, _check_bgp_status_helper) if (check_result['failed']): for a_result in check_result.keys(): if a_result != 'failed': diff --git a/tests/common/plugins/tacacs.py b/tests/common/plugins/tacacs.py deleted file mode 100644 index 7df1b8fa3a..0000000000 --- a/tests/common/plugins/tacacs.py +++ /dev/null @@ -1,97 +0,0 @@ -import pytest -import crypt -import logging - -from tests.common.utilities import wait_until - -logger = logging.getLogger(__name__) - - -def check_all_services_status(ptfhost): - res = ptfhost.command("service --status-all") - logger.info(res["stdout_lines"]) - - -def start_tacacs_server(ptfhost): - ptfhost.command("service tacacs_plus restart", module_ignore_errors=True) - return "tacacs+ running" in ptfhost.command("service tacacs_plus status", module_ignore_errors=True)["stdout_lines"] - - -def setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip): - """setup tacacs client""" - - # configure tacacs client - duthost.shell("sudo config tacacs passkey %s" % creds_all_duts[duthost]['tacacs_passkey']) - - # get default tacacs servers - config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] - for tacacs_server in config_facts.get('TACPLUS_SERVER', {}): - duthost.shell("sudo config tacacs delete %s" % tacacs_server) - duthost.shell("sudo config tacacs add %s" % tacacs_server_ip) - duthost.shell("sudo config tacacs authtype login") - - # enable tacacs+ - duthost.shell("sudo config aaa authentication login tacacs+") - - -def setup_tacacs_server(ptfhost, creds_all_duts, duthost): - """setup tacacs server""" - - # configure tacacs server - extra_vars = {'tacacs_passkey': creds_all_duts[duthost]['tacacs_passkey'], - 'tacacs_rw_user': creds_all_duts[duthost]['tacacs_rw_user'], - 'tacacs_rw_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_rw_user_passwd'], 'abc'), - 'tacacs_ro_user': creds_all_duts[duthost]['tacacs_ro_user'], - 'tacacs_ro_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_ro_user_passwd'], 'abc'), - 'tacacs_jit_user': creds_all_duts[duthost]['tacacs_jit_user'], - 'tacacs_jit_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'abc'), - 'tacacs_jit_user_membership': creds_all_duts[duthost]['tacacs_jit_user_membership']} - - ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) - ptfhost.template(src="tacacs/tac_plus.conf.j2", dest="/etc/tacacs+/tac_plus.conf") - ptfhost.lineinfile(path="/etc/default/tacacs+", line="DAEMON_OPTS=\"-d 10 -l /var/log/tac_plus.log -C /etc/tacacs+/tac_plus.conf\"", regexp='^DAEMON_OPTS=.*') - check_all_services_status(ptfhost) - - # FIXME: This is a short term mitigation, we need to figure out why the tacacs+ server does not start - # reliably all of a sudden. - wait_until(5, 1, start_tacacs_server, ptfhost) - check_all_services_status(ptfhost) - - -def cleanup_tacacs(ptfhost, duthost, tacacs_server_ip): - # stop tacacs server - ptfhost.service(name="tacacs_plus", state="stopped") - check_all_services_status(ptfhost) - - # reset tacacs client configuration - duthost.shell("sudo config tacacs delete %s" % tacacs_server_ip) - duthost.shell("sudo config tacacs default passkey") - duthost.shell("sudo config aaa authentication login default") - duthost.shell("sudo config aaa authentication failthrough default") - - -@pytest.fixture(scope="module") -def test_tacacs(ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts): - duthost = duthosts[enum_rand_one_per_hwsku_hostname] - tacacs_server_ip = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] - setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip) - setup_tacacs_server(ptfhost, creds_all_duts, duthost) - - yield - - cleanup_tacacs(ptfhost, duthost, tacacs_server_ip) - - -@pytest.fixture(scope="module") -def test_tacacs_v6(ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts): - duthost = duthosts[enum_rand_one_per_hwsku_hostname] - ptfhost_vars = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars - if 'ansible_hostv6' not in ptfhost_vars: - pytest.skip("Skip IPv6 test. ptf ansible_hostv6 not configured.") - tacacs_server_ip = ptfhost_vars['ansible_hostv6'] - setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip) - setup_tacacs_server(ptfhost, creds_all_duts, duthost) - - yield - - cleanup_tacacs(ptfhost, duthost, tacacs_server_ip) diff --git a/tests/common/port_toggle.py b/tests/common/port_toggle.py index 5f5395bbf0..0ec17f6b9b 100644 --- a/tests/common/port_toggle.py +++ b/tests/common/port_toggle.py @@ -60,7 +60,7 @@ def __get_down_ports(expect_up=True): log_system_resources(duthost, logger) logger.info("Wait for ports to go down") - shutdown_ok = wait_until(port_down_wait_time, 5, lambda: len(__get_down_ports(expect_up=False)) == len(ports)) + shutdown_ok = wait_until(port_down_wait_time, 5, 0, lambda: len(__get_down_ports(expect_up=False)) == len(ports)) if not shutdown_ok: up_ports = __get_down_ports(expect_up=True) @@ -74,7 +74,7 @@ def __get_down_ports(expect_up=True): duthost.shell_cmds(cmds=cmds_up) logger.info("Wait for ports to come up") - startup_ok = wait_until(port_up_wait_time, 5, lambda: len(__get_down_ports()) == 0) + startup_ok = wait_until(port_up_wait_time, 5, 0, lambda: len(__get_down_ports()) == 0) if not startup_ok: down_ports = __get_down_ports() diff --git a/tests/common/snappi/snappi_fixtures.py b/tests/common/snappi/snappi_fixtures.py index 69103d828f..5f314127c4 100644 --- a/tests/common/snappi/snappi_fixtures.py +++ b/tests/common/snappi/snappi_fixtures.py @@ -74,6 +74,16 @@ def __gen_mac(id): """ return '00:11:22:33:44:{:02d}'.format(id) +def __gen_pc_mac(id): + """ + Generate a MAC address for a portchannel interface + + Args: + id (int): portchannel ID + Returns: + MAC address (string) + """ + return '11:22:33:44:55:{:02d}'.format(id) def __valid_ipv4_addr(ip): """ @@ -271,6 +281,7 @@ def __portchannel_intf_config(config, port_config_list, duthost, snappi_ports): dut_mac = str(duthost.facts['router_mac']) """ For each port channel """ + pc_id = 0 for pc in pc_member: phy_intfs = pc_member[pc] gw_addr = str(pc_intf[pc]['addr']) @@ -313,11 +324,18 @@ def __portchannel_intf_config(config, port_config_list, duthost, snappi_ports): device = config.devices.device(name='Device {}'.format(pc), container_name=lag.name)[-1] + ethernet = device.ethernet + ethernet.name = 'Ethernet {}'.format(pc) + ethernet.mac = __gen_pc_mac(pc_id) + ip_stack = device.ethernet.ipv4 + ip_stack.name = 'Ipv4 {}'.format(pc) ip_stack.address = pc_ip_addr ip_stack.prefix = int(prefix) ip_stack.gateway = gw_addr + pc_id = pc_id + 1 + return True diff --git a/tests/common/system_utils/docker.py b/tests/common/system_utils/docker.py index bd8c347849..7cc7041bc6 100644 --- a/tests/common/system_utils/docker.py +++ b/tests/common/system_utils/docker.py @@ -188,7 +188,7 @@ def ready_for_swap(): return True - shutdown_check = wait_until(30, 3, ready_for_swap) + shutdown_check = wait_until(30, 3, 0, ready_for_swap) pytest_assert(shutdown_check, "Docker and/or BGP failed to shut down in 30s") @@ -196,7 +196,7 @@ def _perform_syncd_liveness_check(duthost): def check_liveness(): return duthost.is_service_running("syncd") - liveness_check = wait_until(30, 1, check_liveness) + liveness_check = wait_until(30, 1, 0, check_liveness) pytest_assert(liveness_check, "syncd crashed after swap_syncd") diff --git a/tests/common/testbed.py b/tests/common/testbed.py index a4975a51c8..68d97e4975 100644 --- a/tests/common/testbed.py +++ b/tests/common/testbed.py @@ -21,8 +21,11 @@ class TestbedInfo(object): """Parse the testbed file used to describe whole testbed info.""" - TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') - TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment') + TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', + 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') + TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', + 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment') + TOPOLOGY_FILEPATH = "../../ansible/vars/" def __init__(self, testbed_file): if testbed_file.endswith(".csv"): @@ -115,7 +118,46 @@ def _read_testbed_topo_from_yaml(self): tb["duts_map"] = {dut: i for i, dut in enumerate(tb["duts"])} self.testbed_topo[tb["conf-name"]] = tb - def dump_testbeds_to_yaml(self): + def _generate_sai_testbed(self, args): + testbed_data = [] + tb_name = args.sai_testbed_name + sai_topo = "" + sai_ptf_image = "docker-ptf" + + ports_count = len(self.testbed_topo[tb_name]["topo"] + ["properties"]["topology"]["host_interfaces"]) + + if ports_count < 64: + sai_topo = "ptf32" + else: + sai_topo = "ptf64" + + if args.sai_test_ptf: + sai_ptf_image = args.sai_test_ptf + + tb_dict_fields = [ + tb_name, + self.testbed_topo[tb_name]["group-name"], + sai_topo, + sai_ptf_image, + self.testbed_topo[tb_name]["ptf"], + self.testbed_topo[tb_name]["ptf_ip"], + self.testbed_topo[tb_name]["ptf_ipv6"], + self.testbed_topo[tb_name]["server"], + self.testbed_topo[tb_name]["vm_base"] or None, + self.testbed_topo[tb_name]["duts"], + self.testbed_topo[tb_name]["inv_name"], + self.testbed_topo[tb_name]["auto_recover"], + "SAI Testing" + ] + testbed_mapping = zip( + self.TESTBED_FIELDS_RECOMMENDED, tb_dict_fields) + testbed = OrderedDict(testbed_mapping) + testbed_data.append(testbed) + + return testbed_data + + def dump_testbeds_to_yaml(self, args=""): def none_representer(dumper, _): return dumper.represent_scalar("tag:yaml.org,2002:null", "") @@ -151,55 +193,64 @@ def write_line_break(self, data=None): yaml.Dumper.write_line_break(self) testbed_data = [] - for tb_name, tb_dict in self.testbed_topo.items(): - ptf_ip, ptf_ipv6 = None, None - if tb_dict["ptf_ip"]: - ptf_ip = self._ip_mask_to_cidr(tb_dict["ptf_ip"], - tb_dict["ptf_netmask"]) - if tb_dict["ptf_ipv6"]: - ptf_ipv6 = self._ip_mask_to_cidr(tb_dict["ptf_ipv6"], - tb_dict["ptf_netmask_v6"]) - - if len(self.testbed_fields) == len(self.TESTBED_FIELDS_DEPRECATED): - tb_dict_fields = [ - tb_name, - tb_dict["group-name"], - tb_dict["topo"], - tb_dict["ptf_image_name"], - tb_dict["ptf"], - ptf_ip, - ptf_ipv6, - tb_dict["server"], - tb_dict["vm_base"] or None, - tb_dict["duts"], - tb_dict["comment"] - ] - elif len(self.testbed_fields) == len(self.TESTBED_FIELDS_RECOMMENDED): - tb_dict_fields = [ - tb_name, - tb_dict["group-name"], - tb_dict["topo"], - tb_dict["ptf_image_name"], - tb_dict["ptf"], - ptf_ip, - ptf_ipv6, - tb_dict["server"], - tb_dict["vm_base"] or None, - tb_dict["duts"], - tb_dict["inv_name"], - tb_dict["auto_recover"], - tb_dict["comment"] - ] - testbed_mapping = zip(self.testbed_fields, tb_dict_fields) - testbed = OrderedDict(testbed_mapping) - testbed_data.append(testbed) + if args and len(args.sai) > 0: + testbed_data = self._generate_sai_testbed(args) + else: + for tb_name, tb_dict in self.testbed_topo.items(): + ptf_ip, ptf_ipv6 = None, None + if tb_dict["ptf_ip"]: + ptf_ip = self._ip_mask_to_cidr(tb_dict["ptf_ip"], + tb_dict["ptf_netmask"]) + if tb_dict["ptf_ipv6"]: + ptf_ipv6 = self._ip_mask_to_cidr(tb_dict["ptf_ipv6"], + tb_dict["ptf_netmask_v6"]) + + if len(self.testbed_fields) == len(self.TESTBED_FIELDS_DEPRECATED): + tb_dict_fields = [ + tb_name, + tb_dict["group-name"], + tb_dict["topo"], + tb_dict["ptf_image_name"], + tb_dict["ptf"], + ptf_ip, + ptf_ipv6, + tb_dict["server"], + tb_dict["vm_base"] or None, + tb_dict["duts"], + tb_dict["comment"] + ] + elif len(self.testbed_fields) == len(self.TESTBED_FIELDS_RECOMMENDED): + tb_dict_fields = [ + tb_name, + tb_dict["group-name"], + tb_dict["topo"], + tb_dict["ptf_image_name"], + tb_dict["ptf"], + ptf_ip, + ptf_ipv6, + tb_dict["server"], + tb_dict["vm_base"] or None, + tb_dict["duts"], + tb_dict["inv_name"], + tb_dict["auto_recover"], + tb_dict["comment"] + ] + testbed_mapping = zip(self.testbed_fields, tb_dict_fields) + testbed = OrderedDict(testbed_mapping) + testbed_data.append(testbed) # dump blank instead of 'null' for None IncIndentDumper.add_representer(type(None), none_representer) # dump testbed fields in the order same as csv IncIndentDumper.add_representer(OrderedDict, ordereddict_representer) - with open(self.testbed_yamlfile, "w") as yamlfile: + testbed_file_name = "" + if args and len(args.sai) > 0: + testbed_file_name = args.sai + else: + testbed_file_name = self.testbed_yamlfile + + with open(testbed_file_name, "w") as yamlfile: yaml.dump(testbed_data, yamlfile, explicit_start=True, Dumper=IncIndentDumper) @@ -308,7 +359,7 @@ def parse_topo(self): tb["topo"] = defaultdict() tb["topo"]["name"] = topo tb["topo"]["type"] = self.get_testbed_type(topo) - topo_dir = os.path.join(os.path.dirname(__file__), "../../ansible/vars/") + topo_dir = os.path.join(os.path.dirname(__file__), self.TOPOLOGY_FILEPATH) topo_file = os.path.join(topo_dir, "topo_{}.yml".format(topo)) with open(topo_file, 'r') as fh: tb['topo']['properties'] = yaml.safe_load(fh) @@ -330,10 +381,17 @@ def parse_topo(self): file_group.add_argument("-y", "--yaml", dest="testbed_yamlfile", help="testbed yaml file") file_group.add_argument("-c", "--csv", dest="testbed_csvfile", help="testbed csv file") + parser.add_argument("-n", "--testbed", dest="sai_testbed_name", help="sai testbed name") + parser.add_argument("-s", "--sai", dest="sai", help="generate sai testbed file", default="") + parser.add_argument("-p", "--ptf", dest="sai_test_ptf", help="sai test ptf image") parser.add_argument("--print-data", help="print testbed", action="store_true") args = parser.parse_args() testbedfile = args.testbed_csvfile or args.testbed_yamlfile tbinfo = TestbedInfo(testbedfile) + if args.print_data: print(json.dumps(tbinfo.testbed_topo, indent=4)) + + if len(args.sai) > 0: + tbinfo.dump_testbeds_to_yaml(args) diff --git a/tests/common/utilities.py b/tests/common/utilities.py index 62d1baf248..b7b3ea9df2 100644 --- a/tests/common/utilities.py +++ b/tests/common/utilities.py @@ -73,19 +73,25 @@ def wait(seconds, msg=""): time.sleep(seconds) -def wait_until(timeout, interval, condition, *args, **kwargs): +def wait_until(timeout, interval, delay, condition, *args, **kwargs): """ @summary: Wait until the specified condition is True or timeout. @param timeout: Maximum time to wait @param interval: Poll interval + @param delay: Delay time @param condition: A function that returns False or True @param *args: Extra args required by the 'condition' function. @param **kwargs: Extra args required by the 'condition' function. @return: If the condition function returns True before timeout, return True. If the condition function raises an exception, log the error and keep waiting and polling. """ - logger.debug("Wait until %s is True, timeout is %s seconds, checking interval is %s" % \ - (condition.__name__, timeout, interval)) + logger.debug("Wait until %s is True, timeout is %s seconds, checking interval is %s, delay is %s seconds" % \ + (condition.__name__, timeout, interval, delay)) + + if delay > 0: + logger.debug("Delay for %s seconds first" % delay) + time.sleep(delay) + start_time = time.time() elapsed_time = 0 while elapsed_time < timeout: @@ -484,9 +490,12 @@ def compose_dict_from_cli(fields_list): return dict(zip(fields_list[0::2], fields_list[1::2])) -def get_intf_by_sub_intf(sub_intf, vlan_id): +def get_intf_by_sub_intf(sub_intf, vlan_id=None): """ - Deduce interface from sub interface by striping vlan id + Deduce interface from sub interface by striping vlan id, + if vlan id is not passed, will automatically strip vlan id by finding '.', + if '.' found: strip the right or it, + if '.' not found, return original sub_intf. Args: sub_intf (str): sub interface name, e.g. Ethernet100.10 vlan_id (str): vlan id, e.g. 10 @@ -494,7 +503,13 @@ def get_intf_by_sub_intf(sub_intf, vlan_id): Returns: str: interface name, e.g. Ethernet100 """ + if type(sub_intf) != str: + sub_intf = str(sub_intf) + if not vlan_id: + idx_of_sub_int_indicator = sub_intf.find(constants.VLAN_SUB_INTERFACE_SEPARATOR) + if idx_of_sub_int_indicator > -1: + return sub_intf[:idx_of_sub_int_indicator] return sub_intf vlan_suffix = constants.VLAN_SUB_INTERFACE_SEPARATOR + vlan_id diff --git a/tests/configlet/test_add_rack.py b/tests/configlet/test_add_rack.py index c313c3ff08..9c2ef71ebb 100644 --- a/tests/configlet/test_add_rack.py +++ b/tests/configlet/test_add_rack.py @@ -138,7 +138,7 @@ def configure_dut(duthosts, rand_one_dut_hostname): def load_minigraph(duthost, duthost_name): config_reload(duthost, config_source="minigraph", wait=180, start_bgp=True) - assert wait_until(300, 20, duthost.critical_services_fully_started), \ + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \ "All critical services should fully started!{}".format(duthost.critical_services) @@ -192,7 +192,7 @@ def chk_bgp_session(duthost, ip, msg): "{}: BGP session for {} = {}; expect established".format(msg, ip, bgp_state) -def test_add_rack(configure_dut, duthosts, rand_one_dut_hostname): +def test_add_rack(configure_dut, tbinfo, duthosts, rand_one_dut_hostname): global data_dir, orig_db_dir, clet_db_dir, files_dir duthost = duthosts[rand_one_dut_hostname] @@ -210,7 +210,8 @@ def test_add_rack(configure_dut, duthosts, rand_one_dut_hostname): download_sonic_files(duthost, files_dir) # Create minigraph w/o a T0 & configlet, apply & take dump - files_create.do_run(duthost.facts["asic_type"] == "mellanox") + files_create.do_run(is_mlnx = duthost.facts["asic_type"] == "mellanox", + is_storage_backend = 'backend' in tbinfo['topo']['name']) # Ensure BGP session is up before we apply stripped minigraph chk_bgp_session(duthost, tor_data["ip"]["remote"], "pre-clet test") @@ -219,8 +220,8 @@ def test_add_rack(configure_dut, duthosts, rand_one_dut_hostname): apply_clet(duthost, rand_one_dut_hostname) take_DB_dumps(duthost, duthost_name, clet_db_dir, data_dir) - ret = compare_dumps(orig_db_dir, clet_db_dir) - assert not ret, "Failed to compare dumps" + ret, msg = compare_dumps(orig_db_dir, clet_db_dir) + assert not ret, "Failed to compare: " + msg # Ensure BGP session is up chk_bgp_session(duthost, tor_data["ip"]["remote"], "post-clet test") diff --git a/tests/configlet/util/common.py b/tests/configlet/util/common.py index a59c10a8c5..66478fba27 100755 --- a/tests/configlet/util/common.py +++ b/tests/configlet/util/common.py @@ -23,6 +23,7 @@ # # App-DB/"LLDP_ENTRY_TABLE" is very dynamic -- not a candidate for comparison +# link-local IPv6 addresses starts with "fe80:" are skipped from comparison # scan_dbs = { "config-db": { @@ -34,7 +35,12 @@ "app-db": { "db_no": 0, "keys_to_compare": set(), - "keys_to_skip_comp": {"LLDP_ENTRY_TABLE", "NEIGH_TABLE:eth0", "NEIGH_TABLE_DEL_SET"}, + "keys_to_skip_comp": { + "LLDP_ENTRY_TABLE", + "NEIGH_TABLE:eth0", + "NEIGH_TABLE_DEL_SET", + "ROUTE_TABLE:fe80:", + "ROUTE_TABLE:FE80:"}, "keys_skip_val_comp": set() }, "state-db": { @@ -161,6 +167,7 @@ def get_dump(duthost, duthost_name, db_name, db_info, dir_name, data_dir): db_write = {} for k in db_read: + # Transient keys start with "_"; Hence skipped if ((not k.startswith("_")) and (not match_key(k, keys_skip_cmp))): db_write[k] = {} if match_key(k, keys_skip_val) else db_read[k] @@ -206,6 +213,7 @@ def cmp_dump(db_name, orig_db_dir, clet_db_dir): mismatch_cnt = 0 orig_data = {} clet_data = {} + msg = "" fname = "{}.json".format(db_name) with open(os.path.join(orig_db_dir, fname), "r") as s: @@ -225,6 +233,8 @@ def cmp_dump(db_name, orig_db_dir, clet_db_dir): for k in diff: log_error("{}: Missing key: {}".format(fname, k)) mismatch_cnt += 1 + if not msg: + msg = "Missing key: {}".format(k) diff = clet_keys - orig_keys for k in diff: @@ -233,35 +243,48 @@ def cmp_dump(db_name, orig_db_dir, clet_db_dir): for k in orig_keys.intersection(clet_keys): if orig_data[k] != clet_data[k]: if orig_data[k]["type"] != clet_data[k]["type"]: - log_error("{}: mismatch key:{} type:{}".format( - fname, orig_data[k]["type"], clet_data[k]["type"])) + log_error("{}: mismatch key:{} type:{} != {}".format( + fname, k, orig_data[k]["type"], clet_data[k]["type"])) mismatch_cnt += 1 + if not msg: + msg = "mismatch key:{} type:{} != {}".format( + k, orig_data[k]["type"], clet_data[k]["type"]) + if not cmp_value(orig_data[k]["value"], clet_data[k]["value"]): log_error("{}: mismatch key:{} {} != {}".format( fname, k, orig_data[k]["value"], clet_data[k]["value"])) mismatch_cnt += 1 + if not msg: + msg = "mismatch key:{} value:{} != {}".format( + k, orig_data[k]["value"], clet_data[k]["value"]) if not mismatch_cnt: log_info("{} compared good orig={} clet={}".format(db_name, orig_db_dir, clet_db_dir)) else: log_info("{} compare failed orig={} clet={} mismatch_cnt={}".format( db_name, orig_db_dir, clet_db_dir, mismatch_cnt)) - return mismatch_cnt + if msg: + msg = "{}: {}".format(db_name, msg) + return mismatch_cnt, msg def compare_dumps(orig_db_dir, clet_db_dir): mismatch_cnt = 0 + ret_msg = "" for db_name in scan_dbs: - mismatch_cnt += cmp_dump(db_name, orig_db_dir, clet_db_dir) - return mismatch_cnt + cnt, msg = cmp_dump(db_name, orig_db_dir, clet_db_dir) + mismatch_cnt += cnt + if not ret_msg: + ret_msg = msg + return mismatch_cnt, ret_msg def main(): set_print() print("Calling compare dumps") - ret = compare_dumps("logs/AddRack/orig", "logs/AddRack/clet") - print("ret = {}".format(ret)) + ret, msg = compare_dumps("logs/AddRack/orig", "logs/AddRack/clet") + print("ret = {} msg={}".format(ret, msg)) if __name__ == "__main__": main() diff --git a/tests/configlet/util/configlet.py b/tests/configlet/util/configlet.py index 39ba577565..bca9c82db3 100755 --- a/tests/configlet/util/configlet.py +++ b/tests/configlet/util/configlet.py @@ -27,6 +27,27 @@ def get_pfc_time(): return ret +def get_vlan_sub_interface(): + global tor_data, sonic_local_ports + + ret = [] + + port = list(sonic_local_ports)[0] + ".10" + port_ip = port + "|" + tor_data["ip"]["local"] + "/31" + port_ip6 = port + "|" + tor_data["ipv6"]["local"] + "/126" + + ret.append({ + "VLAN_SUB_INTERFACE": { + port: { + "admin_status": "up" + }, + port_ip: {}, + port_ip6: {} } + }) + log_debug("clet: get_vlan_sub_interface: {}".format(str(ret))) + return ret + + def get_port_channel(): global tor_data @@ -146,7 +167,7 @@ def get_device_info(): return ret -def get_port_related_data(is_mlnx): +def get_port_related_data(is_mlnx, is_storage_backend): ret = [] cable = {} queue = {} @@ -217,10 +238,14 @@ def get_port_related_data(is_mlnx): "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", "pfc_enable": "3,4", - "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", - "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]" } + if is_storage_backend: + qos[local_port]["dot1p_to_tc_map"] = "[DOT1P_TO_TC_MAP|AZURE]" + else: + qos[local_port]["dscp_to_tc_map"] = "[DSCP_TO_TC_MAP|AZURE]" + if is_mlnx: qos[local_port]["pfc_to_pg_map"] = "[PFC_PRIORITY_TO_PRIORITY_GROUP_MAP|AZURE]" @@ -276,18 +301,21 @@ def write_out(lst, tmpdir): managed_files["configlet"] = fpath -def main(tmpdir, is_mlnx): +def main(tmpdir, is_mlnx, is_storage_backend): global sonic_local_ports ret = [] _, sonic_local_ports = strip.get_local_ports() ret += update_port() - ret += add_interface() - ret += get_port_channel() + if not is_storage_backend: + ret += add_interface() + ret += get_port_channel() + else: + ret += get_vlan_sub_interface() ret += get_acl() ret += get_device_info() - ret += get_port_related_data(is_mlnx) + ret += get_port_related_data(is_mlnx, is_storage_backend) ret += get_bgp_neighbor() write_out(ret, tmpdir) diff --git a/tests/configlet/util/files_create.py b/tests/configlet/util/files_create.py index c6cf1157fd..1eaec70998 100755 --- a/tests/configlet/util/files_create.py +++ b/tests/configlet/util/files_create.py @@ -10,11 +10,11 @@ import configlet -def do_run(is_mlnx): +def do_run(is_mlnx, is_storage_backend): init_global_data() strip.main(init_data["files_dir"]) - configlet.main(init_data["files_dir"], is_mlnx) + configlet.main(init_data["files_dir"], is_mlnx, is_storage_backend) log_info("Managed files: {}".format(json.dumps(managed_files, indent=4))) @@ -69,7 +69,7 @@ def main(): init_data["version"] = d["build_version"] is_mlnx = (d["asic_type"].lower() == "mellanox") - do_run(is_mlnx) + do_run(is_mlnx, False) if __name__ == "__main__": diff --git a/tests/conftest.py b/tests/conftest.py index f8201fc07e..7e97989fbb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -46,7 +46,6 @@ pytest_plugins = ('tests.common.plugins.ptfadapter', 'tests.common.plugins.ansible_fixtures', 'tests.common.plugins.dut_monitor', - 'tests.common.plugins.tacacs', 'tests.common.plugins.loganalyzer', 'tests.common.plugins.pdu_controller', 'tests.common.plugins.sanity_check', @@ -58,7 +57,8 @@ 'tests.common.dualtor', 'tests.vxlan', 'tests.decap', - 'tests.common.plugins.allure_server') + 'tests.common.plugins.allure_server', + 'tests.common.plugins.conditional_mark') def pytest_addoption(parser): diff --git a/tests/console/socat b/tests/console/socat new file mode 100755 index 0000000000..ddb90978a0 Binary files /dev/null and b/tests/console/socat differ diff --git a/tests/console/test_console_availability.py b/tests/console/test_console_availability.py new file mode 100644 index 0000000000..c393a7b07e --- /dev/null +++ b/tests/console/test_console_availability.py @@ -0,0 +1,69 @@ +import getpass +import pexpect +import pytest + +from tests.common.helpers.assertions import pytest_assert + +pytestmark = [ + pytest.mark.topology("any"), + pytest.mark.device_type("vs") +] + +@pytest.mark.parametrize("target_line", ["1", "2", "3", "4"]) +def test_console_availability(duthost, creds, target_line): + """ + Test console are well functional. + Verify console access is available after connecting from DUT + """ + dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + dutuser, dutpass = creds['sonicadmin_user'], creds['sonicadmin_password'] + hostip, hostuser = "172.17.0.1", getpass.getuser() + + res = duthost.shell("which socat", module_ignore_errors=True) + if res["rc"] != 0: + # install socat to DUT host + duthost.copy(src="./console/socat", dest="/usr/local/bin/socat", mode=0755) + + out = pexpect.run("ssh {}@{} -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null 'which socat'".format( + hostuser, hostip)) + if not out: + # install socat to KVM host + pexpect.run("scp -q {} {}@{}:{}".format("./console/socat", hostuser, hostip, "/usr/local/bin/socat")) + + pytest_assert(duthost.shell("socat -V", module_ignore_errors=True)["rc"] == 0, + "Invalid socat installation on DUT host") + pytest_assert(int(pexpect.run("ssh {}@{} -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " + "'socat -V > /dev/null 2>&1; echo $?'".format(hostuser, hostip))) == 0, + "Invalid socat installation on KVM host") + + out = pexpect.run("ssh {0}@{1} -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " + "'sudo killall -q socat;" + "sudo lsof -i:{3} > /dev/null &&" + "sudo socat TCP-LISTEN:{2},fork,reuseaddr TCP:127.0.0.1:{3} & echo $?'".format( + hostuser, hostip, 2000 + int(target_line), 7000 + int(target_line) - 1)) + pytest_assert(int(out.strip()) == 0, "Failed to start socat on KVM host") + + try: + client = pexpect.spawn( + "ssh {2}@{3} -q -t -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " + "'sudo killall -q socat;" + "sudo killall -q picocom;" + "sudo socat PTY,link=/dev/ttyUSB{0} TCP:10.250.0.1:{1},forever &" + "while [ ! -e /dev/ttyUSB{0} ]; do sleep 1; done;" + "sudo config console del {0} > /dev/null 2>&1;" + "sudo config console add {0} --baud 9600 --devicename device{0};" + "sudo connect line {0}'".format( + target_line, 2000 + int(target_line), dutuser, dutip)) + client.expect('[Pp]assword:') + client.sendline(dutpass) + + i = client.expect(['Successful connection', 'Cannot connect'], timeout=10) + pytest_assert(i == 0, + "Failed to connect line {}".format(target_line)) + client.expect(['login:', '[:>~$]'], timeout=10) + except pexpect.exceptions.EOF: + pytest.fail("EOF reached") + except pexpect.exceptions.TIMEOUT: + pytest.fail("Timeout reached") + except Exception as e: + pytest.fail("Cannot connect to DUT host via SSH: {}".format(e)) diff --git a/tests/console/test_console_reversessh.py b/tests/console/test_console_reversessh.py index 19d4a986e0..16de235346 100644 --- a/tests/console/test_console_reversessh.py +++ b/tests/console/test_console_reversessh.py @@ -41,7 +41,7 @@ def test_console_reversessh_connectivity(duthost, creds, target_line): pytest.fail("Not able to do reverse SSH to remote host via DUT") pytest_assert( - wait_until(10, 1, check_target_line_status, duthost, target_line, "IDLE"), + wait_until(10, 1, 0, check_target_line_status, duthost, target_line, "IDLE"), "Target line {} is busy after exited reverse SSH session".format(target_line)) @pytest.mark.parametrize("target_line", ["1", "2"]) @@ -79,7 +79,7 @@ def test_console_reversessh_force_interrupt(duthost, creds, target_line): # Check the session ended within 5s and the line state is idle pytest_assert( - wait_until(5, 1, check_target_line_status, duthost, target_line, "IDLE"), + wait_until(5, 1, 0, check_target_line_status, duthost, target_line, "IDLE"), "Target line {} not toggle to IDLE state after force clear command sent") try: diff --git a/tests/container_checker/test_container_checker.py b/tests/container_checker/test_container_checker.py index 8017f2fccc..ca41778f1d 100755 --- a/tests/container_checker/test_container_checker.py +++ b/tests/container_checker/test_container_checker.py @@ -152,7 +152,7 @@ def postcheck_critical_processes_status(duthost, up_bgp_neighbors): for 3 minutes. It will return False after timeout """ logger.info("Post-checking status of critical processes and BGP sessions...") - return wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + return wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, 0, post_test_check, duthost, up_bgp_neighbors) @@ -172,6 +172,7 @@ def stop_container(duthost, container_name): logger.info("Waiting until container '{}' is stopped...".format(container_name)) stopped = wait_until(CONTAINER_STOP_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, False) pytest_assert(stopped, "Failed to stop container '{}'".format(container_name)) logger.info("Container '{}' on DuT '{}' was stopped".format(container_name, duthost.hostname)) @@ -222,7 +223,9 @@ def test_container_checker(duthosts, enum_dut_feature_container, rand_selected_d skip_containers = disabled_containers[:] skip_containers.append("gbsyncd") - skip_containers.append("database") + skip_containers.append("database") + skip_containers.append("database-chassis") + # Skip 'radv' container on devices whose role is not T0. if tbinfo["topo"]["type"] != "t0": skip_containers.append("radv") diff --git a/tests/copp/copp_utils.py b/tests/copp/copp_utils.py index 599b2bf1ba..a0f42eaac0 100644 --- a/tests/copp/copp_utils.py +++ b/tests/copp/copp_utils.py @@ -10,6 +10,7 @@ _REMOVE_IP_SCRIPT = "scripts/remove_ip.sh" _ADD_IP_SCRIPT = "scripts/add_ip.sh" +_ADD_IP_BACKEND_SCRIPT = "scripts/add_ip_backend.sh" _UPDATE_COPP_SCRIPT = "copp/scripts/update_copp_config.py" _BASE_COPP_CONFIG = "/tmp/base_copp_config.json" @@ -89,19 +90,27 @@ def restore_policer(dut, nn_target_namespace): else: dut.command("cp {} {}".format(_BASE_COPP_CONFIG, _CONFIG_DB_COPP_CONFIG)) -def configure_ptf(ptf, nn_target_port): +def configure_ptf(ptf, nn_target_port, nn_target_vlanid, is_backend_topology=False): """ Configures the PTF to run the NN agent on the specified port. Args: ptf (PTFHost): The target PTF. nn_target_port (int): The port to run NN agent on. + nn_target_vlanid (str): The vlan id of the port to run NN agent on. + is_backend_topology (bool): Whether it's a backend topology testbed """ ptf.script(cmd=_REMOVE_IP_SCRIPT) - ptf.script(cmd=_ADD_IP_SCRIPT) + if is_backend_topology: + ptf.script(cmd=_ADD_IP_BACKEND_SCRIPT) + else: + ptf.script(cmd=_ADD_IP_SCRIPT) - facts = {"nn_target_port": nn_target_port} + facts = { + "nn_target_port": nn_target_port, + "nn_target_vlanid": nn_target_vlanid + } ptf.host.options["variable_manager"].extra_vars.update(facts) ptf.template(src=_PTF_NN_TEMPLATE, dest=_PTF_NN_DEST) @@ -124,7 +133,7 @@ def restore_ptf(ptf): ptf.supervisorctl(name="ptf_nn_agent", state="restarted") -def configure_syncd(dut, nn_target_port, nn_target_interface, nn_target_namespace, creds): +def configure_syncd(dut, nn_target_port, nn_target_interface, nn_target_namespace, nn_target_vlanid, creds): """ Configures syncd to run the NN agent on the specified port. @@ -137,10 +146,15 @@ def configure_syncd(dut, nn_target_port, nn_target_interface, nn_target_namespac nn_target_port (int): The port to run NN agent on. nn_target_interface (str): The Interface remote NN agents listen to nn_target_namespace (str): The namespace remote NN agents listens + nn_target_vlanid (str): The vlan id of the port to run NN agent on creds (dict): Credential information according to the dut inventory """ - facts = {"nn_target_port": nn_target_port, "nn_target_interface": nn_target_interface} + facts = { + "nn_target_port": nn_target_port, + "nn_target_interface": nn_target_interface, + "nn_target_vlanid": nn_target_vlanid + } dut.host.options["variable_manager"].extra_vars.update(facts) asichost = dut.asic_instance_from_namespace(nn_target_namespace) diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py index 38d6f9b7c7..836ed52af6 100644 --- a/tests/copp/test_copp.py +++ b/tests/copp/test_copp.py @@ -27,7 +27,7 @@ from tests.copp import copp_utils from tests.ptf_runner import ptf_runner -from tests.common import config_reload +from tests.common import config_reload, constants from tests.common.system_utils import docker # Module-level fixtures @@ -35,7 +35,7 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] pytestmark = [ - pytest.mark.topology("t1") + pytest.mark.topology("t1", "t2") ] _COPPTestParameters = namedtuple("_COPPTestParameters", @@ -46,9 +46,11 @@ "peerip", "nn_target_interface", "nn_target_namespace", - "send_rate_limit"]) + "send_rate_limit", + "nn_target_vlanid"]) _SUPPORTED_PTF_TOPOS = ["ptf32", "ptf64"] -_SUPPORTED_T1_TOPOS = ["t1", "t1-lag", "t1-64-lag"] +_SUPPORTED_T1_TOPOS = ["t1", "t1-lag", "t1-64-lag", "t1-backend"] +_SUPPORTED_T2_TOPOS = ["t2"] _TOR_ONLY_PROTOCOL = ["DHCP"] _TEST_RATE_LIMIT = 600 @@ -62,14 +64,14 @@ class TestCOPP(object): "IP2ME", "SNMP", "SSH"]) - def test_policer(self, protocol, duthosts, rand_one_dut_hostname, ptfhost, copp_testbed, dut_type): + def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, copp_testbed, dut_type): """ Validates that rate-limited COPP groups work as expected. Checks that the policer enforces the rate limit for protocols that have a set rate limit. """ - duthost = duthosts[rand_one_dut_hostname] + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] _copp_runner(duthost, ptfhost, protocol, @@ -81,14 +83,14 @@ def test_policer(self, protocol, duthosts, rand_one_dut_hostname, ptfhost, copp_ "LACP", "LLDP", "UDLD"]) - def test_no_policer(self, protocol, duthosts, rand_one_dut_hostname, ptfhost, copp_testbed, dut_type): + def test_no_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, copp_testbed, dut_type): """ Validates that non-rate-limited COPP groups work as expected. Checks that the policer does not enforce a rate limit for protocols that do not have any set rate limit. """ - duthost = duthosts[rand_one_dut_hostname] + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] _copp_runner(duthost, ptfhost, protocol, @@ -96,8 +98,8 @@ def test_no_policer(self, protocol, duthosts, rand_one_dut_hostname, ptfhost, co dut_type) @pytest.fixture(scope="class") -def dut_type(duthosts, rand_one_dut_hostname): - duthost = duthosts[rand_one_dut_hostname] +def dut_type(duthosts, enum_rand_one_per_hwsku_frontend_hostname): + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] cfg_facts = json.loads(duthost.shell("sonic-cfggen -d --print-data")['stdout']) # return config db contents(running-config) dut_type = None @@ -111,7 +113,7 @@ def dut_type(duthosts, rand_one_dut_hostname): @pytest.fixture(scope="class") def copp_testbed( duthosts, - rand_one_dut_hostname, + enum_rand_one_per_hwsku_frontend_hostname, creds, ptfhost, tbinfo, @@ -120,10 +122,10 @@ def copp_testbed( """ Pytest fixture to handle setup and cleanup for the COPP tests. """ - duthost = duthosts[rand_one_dut_hostname] + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] test_params = _gather_test_params(tbinfo, duthost, request) - if test_params.topo not in (_SUPPORTED_PTF_TOPOS + _SUPPORTED_T1_TOPOS): + if test_params.topo not in (_SUPPORTED_PTF_TOPOS + _SUPPORTED_T1_TOPOS + _SUPPORTED_T2_TOPOS): pytest.skip("Topology not supported by COPP tests") try: @@ -135,7 +137,7 @@ def copp_testbed( _teardown_testbed(duthost, creds, ptfhost, test_params, tbinfo) @pytest.fixture(autouse=True) -def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer): +def ignore_expected_loganalyzer_exceptions(enum_rand_one_per_hwsku_frontend_hostname, loganalyzer): """ Ignore expected failures logs during test execution. @@ -152,7 +154,7 @@ def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer): ] if loganalyzer: # Skip if loganalyzer is disabled - loganalyzer[rand_one_dut_hostname].ignore_regex.extend(ignoreRegex) + loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex) def _copp_runner(dut, ptf, protocol, test_params, dut_type): """ @@ -193,13 +195,17 @@ def _gather_test_params(tbinfo, duthost, request): send_rate_limit = request.config.getoption("--send_rate_limit") topo = tbinfo["topo"]["name"] mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) + # get the port_index_map using the ptf_indicies to support multi DUT topologies port_index_map = { k: v - for k, v in mg_facts["minigraph_port_indices"].items() + for k, v in mg_facts["minigraph_ptf_indices"].items() if k in mg_facts["minigraph_ports"] } - nn_target_port = port_index_map[random.choice(port_index_map.keys())] - nn_target_interface = copp_utils._map_port_number_to_interface(duthost, nn_target_port) + # use randam sonic interface for testing + nn_target_interface = random.choice(port_index_map.keys()) + #get the ptf port for choosen port + nn_target_port = port_index_map[nn_target_interface] myip = None peerip = None @@ -210,24 +216,33 @@ def _gather_test_params(tbinfo, duthost, request): break nn_target_namespace = mg_facts["minigraph_neighbors"][nn_target_interface]['namespace'] + if is_backend_topology and len(mg_facts["minigraph_vlan_sub_interfaces"]) > 0: + nn_target_vlanid = mg_facts["minigraph_vlan_sub_interfaces"][0]["vlan"] + else: + nn_target_vlanid = None + - logging.info("nn_target_port {} nn_target_interface {} nn_target_namespace {}".format(nn_target_port, nn_target_interface, nn_target_namespace)) + logging.info("nn_target_port {} nn_target_interface {} nn_target_namespace {} nn_target_vlanid {}".format(nn_target_port, nn_target_interface, nn_target_namespace, nn_target_vlanid)) return _COPPTestParameters(nn_target_port=nn_target_port, swap_syncd=swap_syncd, topo=topo, myip=myip, - peerip = peerip, + peerip=peerip, nn_target_interface=nn_target_interface, nn_target_namespace=nn_target_namespace, - send_rate_limit=send_rate_limit) + send_rate_limit=send_rate_limit, + nn_target_vlanid=nn_target_vlanid) def _setup_testbed(dut, creds, ptf, test_params, tbinfo): """ Sets up the testbed to run the COPP tests. """ + mg_facts = dut.get_extended_minigraph_facts(tbinfo) + is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) + logging.info("Set up the PTF for COPP tests") - copp_utils.configure_ptf(ptf, test_params.nn_target_port) + copp_utils.configure_ptf(ptf, test_params.nn_target_port, test_params.nn_target_vlanid, is_backend_topology) logging.info("Update the rate limit for the COPP policer") copp_utils.limit_policer(dut, _TEST_RATE_LIMIT, test_params.nn_target_namespace) @@ -250,7 +265,7 @@ def _setup_testbed(dut, creds, ptf, test_params, tbinfo): logging.info("Configure syncd RPC for testing") copp_utils.configure_syncd(dut, test_params.nn_target_port, test_params.nn_target_interface, - test_params.nn_target_namespace, creds) + test_params.nn_target_namespace, test_params.nn_target_vlanid, creds) def _teardown_testbed(dut, creds, ptf, test_params, tbinfo): """ diff --git a/tests/crm/test_crm.py b/tests/crm/test_crm.py index 9da7ef153b..73d387c862 100755 --- a/tests/crm/test_crm.py +++ b/tests/crm/test_crm.py @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) CRM_POLLING_INTERVAL = 1 -CRM_UPDATE_TIME = 4 +CRM_UPDATE_TIME = 10 SONIC_RES_UPDATE_TIME = 50 THR_VERIFY_CMDS = OrderedDict([ diff --git a/tests/decap/conftest.py b/tests/decap/conftest.py index f493b17af1..421dec02cd 100644 --- a/tests/decap/conftest.py +++ b/tests/decap/conftest.py @@ -18,4 +18,3 @@ def pytest_generate_tests(metafunc): if "supported_ttl_dscp_params" in metafunc.fixturenames: params = build_ttl_dscp_params({'ttl': ttl, 'dscp': dscp}) metafunc.parametrize("supported_ttl_dscp_params", params, ids=lambda p: "ttl=%s, dscp=%s" % (p['ttl'], p['dscp']), scope="module") - diff --git a/tests/decap/test_decap.py b/tests/decap/test_decap.py index 52b69083b3..5331a0a61f 100644 --- a/tests/decap/test_decap.py +++ b/tests/decap/test_decap.py @@ -22,55 +22,55 @@ logger = logging.getLogger(__name__) PTFRUNNER_QLEN = 1000 -FIB_INFO_DEST = "/root/fib_info.txt" pytestmark = [ pytest.mark.topology('any') ] -@pytest.fixture(scope='module') +@pytest.fixture def ttl_dscp_params(duthost, supported_ttl_dscp_params): if "uniform" in supported_ttl_dscp_params.values() and ("201811" in duthost.os_version or "201911" in duthost.os_version): pytest.skip('uniform ttl/dscp mode is available from 202012. Current version is %s' % duthost.os_version) - + return supported_ttl_dscp_params -@pytest.fixture(scope="module") -def setup_teardown(request, duthosts, fib_info_files, duts_running_config_facts, ttl_dscp_params): +def remove_default_decap_cfg(duthosts): + for duthost in duthosts: + logger.info('Remove default decap cfg on {}'.format(duthost.hostname)) + for asic_id in duthost.get_frontend_asic_ids(): + swss = 'swss{}'.format(asic_id if asic_id is not None else '') + cmds = [ + 'docker exec {} cp /etc/swss/config.d/ipinip.json /default_ipinip.json'.format(swss), + 'docker exec {} sed -i -e \'s/"OP": *"SET"/"OP": "DEL"/g\' /default_ipinip.json'.format(swss), + 'docker exec {} swssconfig /default_ipinip.json'.format(swss), + 'docker exec {} rm /default_ipinip.json'.format(swss) + ] + duthost.shell_cmds(cmds=cmds) - is_multi_asic = duthosts[0].sonichost.is_multi_asic - ecn_mode = "copy_from_outer" - dscp_mode = ttl_dscp_params['dscp'] - ttl_mode = ttl_dscp_params['ttl'] - - # The hostvars dict has definitions defined in ansible/group_vars/sonic/variables - hostvars = duthosts[0].host.options["variable_manager"]._hostvars[duthosts[0].hostname] - sonic_hwsku = duthosts[0].sonichost.facts["hwsku"] - mellanox_hwskus = hostvars.get("mellanox_hwskus", []) +def restore_default_decap_cfg(duthosts): + for duthost in duthosts: + logger.info('Restore default decap cfg on {}'.format(duthost.hostname)) + for asic_id in duthost.get_frontend_asic_ids(): + swss = 'swss{}'.format(asic_id if asic_id is not None else '') + cmd = 'docker exec {} swssconfig /etc/swss/config.d/ipinip.json'.format(swss) + duthost.shell(cmd) - if sonic_hwsku in mellanox_hwskus: - dscp_mode = "uniform" - ecn_mode = "standard" - setup_info = { +@pytest.fixture(scope='module') +def ip_ver(request): + return { "outer_ipv4": to_bool(request.config.getoption("outer_ipv4")), "outer_ipv6": to_bool(request.config.getoption("outer_ipv6")), "inner_ipv4": to_bool(request.config.getoption("inner_ipv4")), "inner_ipv6": to_bool(request.config.getoption("inner_ipv6")), - "dscp_mode": dscp_mode, - "ecn_mode": ecn_mode, - "ttl_mode": ttl_mode, - "fib_info_files": fib_info_files[:3], # Test at most 3 DUTs in case of multi-DUT - "ignore_ttl": True if is_multi_asic else False, - "max_internal_hops": 3 if is_multi_asic else 0, } - # config decap - decap_conf_template = Template(open("../ansible/roles/test/templates/decap_conf.j2").read()) +@pytest.fixture(scope='module') +def loopback_ips(duthosts, duts_running_config_facts): lo_ips = [] lo_ipv6s = [] for duthost in duthosts: @@ -86,42 +86,79 @@ def setup_teardown(request, duthosts, fib_info_files, duts_running_config_facts, lo_ipv6 = str(ip) lo_ips.append(lo_ip) lo_ipv6s.append(lo_ipv6) + return {'lo_ips': lo_ips, 'lo_ipv6s': lo_ipv6s} - decap_conf_vars = { - "lo_ip": lo_ip, - "lo_ipv6": lo_ipv6, - "op": "SET" - } - decap_conf_vars.update(setup_info) - - duthost.copy(content=decap_conf_template.render( - **decap_conf_vars), dest="/tmp/decap_conf.json") - decap_conf_vars["op"] = "DEL" - duthost.copy(content=decap_conf_template.render( - **decap_conf_vars), dest="/tmp/decap_conf_del.json") +@pytest.fixture(scope='module') +def setup_teardown(request, duthosts, duts_running_config_facts, ip_ver, loopback_ips, fib_info_files): - for asic_id in duthost.get_frontend_asic_ids(): - duthost.shell("docker cp /tmp/decap_conf.json swss{}:/decap_conf.json" - .format(asic_id if asic_id is not None else "")) - duthost.shell('docker exec swss{} sh -c "swssconfig /decap_conf.json"' - .format(asic_id if asic_id is not None else "")) + is_multi_asic = duthosts[0].sonichost.is_multi_asic - setup_info['lo_ips'] = lo_ips - setup_info['lo_ipv6s'] = lo_ipv6s - setup_info['router_macs'] = [duthost.facts['router_mac'] for duthost in duthosts] + setup_info = { + "fib_info_files": fib_info_files[:3], # Test at most 3 DUTs in case of multi-DUT + "ignore_ttl": True if is_multi_asic else False, + "max_internal_hops": 3 if is_multi_asic else 0, + 'router_macs': [duthost.facts['router_mac'] for duthost in duthosts] + } + setup_info.update(ip_ver) + setup_info.update(loopback_ips) logger.info(json.dumps(setup_info, indent=2)) + # Remove default tunnel + remove_default_decap_cfg(duthosts) + yield setup_info - # Remove decap configuration - for duthost in duthosts: + # Restore default tunnel + restore_default_decap_cfg(duthosts) + + +def apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, op): + + decap_conf_template = Template(open("../ansible/roles/test/templates/decap_conf.j2").read()) + + # apply test decap configuration (SET or DEL) + for idx, duthost in enumerate(duthosts): + decap_conf_vars = { + 'lo_ip': loopback_ips['lo_ips'][idx], + 'lo_ipv6': loopback_ips['lo_ipv6s'][idx], + 'ttl_mode': ttl_mode, + 'dscp_mode': dscp_mode, + 'ecn_mode': ecn_mode, + 'op': op, + } + decap_conf_vars.update(ip_ver) + duthost.copy( + content=decap_conf_template.render(**decap_conf_vars), + dest='/tmp/decap_conf_{}.json'.format(op)) + for asic_id in duthost.get_frontend_asic_ids(): - duthost.shell("docker cp /tmp/decap_conf_del.json swss{}:/decap_conf_del.json" - .format(asic_id if asic_id is not None else "")) - duthost.shell('docker exec swss{} sh -c "swssconfig /decap_conf_del.json"' - .format(asic_id if asic_id is not None else "")) + swss = 'swss{}'.format(asic_id if asic_id is not None else '') + cmds = [ + 'docker cp /tmp/decap_conf_{}.json {}:/decap_conf_{}.json'.format(op, swss, op), + 'docker exec {} swssconfig /decap_conf_{}.json'.format(swss, op), + 'docker exec {} rm /decap_conf_{}.json'.format(swss, op) + ] + duthost.shell_cmds(cmds=cmds) + duthost.shell('rm /tmp/decap_conf_{}.json'.format(op)) + + +@pytest.fixture +def decap_config(duthosts, ttl_dscp_params, ip_ver, loopback_ips): + ecn_mode = "copy_from_outer" + ttl_mode = ttl_dscp_params['ttl'] + dscp_mode = ttl_dscp_params['dscp'] + if duthosts[0].facts['asic_type'] in ['mellanox']: + ecn_mode = 'standard' + + # Add test decap configuration + apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'SET') + + yield ttl_mode, dscp_mode + + # Remove test decap configuration + apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'DEL') def set_mux_side(tbinfo, mux_server_url, side): @@ -137,10 +174,12 @@ def set_mux_random(tbinfo, mux_server_url): return set_mux_side(tbinfo, mux_server_url, 'random') -def test_decap(tbinfo, duthosts, mux_server_url, setup_teardown, ptfhost, set_mux_random, ttl_dscp_params): +def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, decap_config, mux_server_url, set_mux_random): setup_info = setup_teardown + ttl_mode, dscp_mode = decap_config + if 'dualtor' in tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -156,8 +195,8 @@ def test_decap(tbinfo, duthosts, mux_server_url, setup_teardown, ptfhost, set_mu "lo_ips": setup_info["lo_ips"], "lo_ipv6s": setup_info["lo_ipv6s"], "router_macs": setup_info["router_macs"], - "dscp_mode": setup_info["dscp_mode"], - "ttl_mode": setup_info["ttl_mode"], + "ttl_mode": ttl_mode, + "dscp_mode": dscp_mode, "ignore_ttl": setup_info["ignore_ttl"], "max_internal_hops": setup_info["max_internal_hops"], "fib_info_files": setup_info["fib_info_files"], diff --git a/tests/dhcp_relay/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py index 788cc1e1cc..823760cdbc 100644 --- a/tests/dhcp_relay/test_dhcp_relay.py +++ b/tests/dhcp_relay/test_dhcp_relay.py @@ -72,9 +72,14 @@ def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): downlink_vlan_iface['dhcp_server_addrs'] = mg_facts['dhcp_servers'] - # We choose the physical interface where our DHCP client resides to be index of first interface in the VLAN + # We choose the physical interface where our DHCP client resides to be index of first interface with alias (ignore PortChannel) in the VLAN client_iface = {} - client_iface['name'] = vlan_info_dict['members'][0] + for port in vlan_info_dict['members']: + if port in mg_facts['minigraph_port_name_to_alias_map']: + break + else: + continue + client_iface['name'] = port client_iface['alias'] = mg_facts['minigraph_port_name_to_alias_map'][client_iface['name']] client_iface['port_idx'] = mg_facts['minigraph_ptf_indices'][client_iface['name']] @@ -110,6 +115,7 @@ def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): # Obtain MAC address of an uplink interface because vlan mac may be different than that of physical interfaces res = duthost.shell('cat /sys/class/net/{}/address'.format(uplink_interfaces[0])) dhcp_relay_data['uplink_mac'] = res['stdout'] + dhcp_relay_data['default_gw_ip'] = mg_facts['minigraph_mgmt_interface']['gwaddr'] dhcp_relay_data_list.append(dhcp_relay_data) @@ -119,15 +125,25 @@ def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): def check_routes_to_dhcp_server(duthost, dut_dhcp_relay_data): """Validate there is route on DUT to each DHCP server """ + default_gw_ip = dut_dhcp_relay_data[0]['default_gw_ip'] dhcp_servers = set() for dhcp_relay in dut_dhcp_relay_data: dhcp_servers |= set(dhcp_relay['downlink_vlan_iface']['dhcp_server_addrs']) for dhcp_server in dhcp_servers: rtInfo = duthost.get_ip_route_info(ipaddress.ip_address(dhcp_server)) - if len(rtInfo["nexthops"]) == 0: + nexthops = rtInfo["nexthops"] + if len(nexthops) == 0: logger.info("Failed to find route to DHCP server '{0}'".format(dhcp_server)) return False + if len(nexthops) == 1: + # if only 1 route to dst available - check that it's not default route via MGMT iface + route_index_in_list = 0 + ip_dst_index = 0 + route_dst_ip = nexthops[route_index_in_list][ip_dst_index] + if route_dst_ip == ipaddress.ip_address(default_gw_ip): + logger.info("Found route to DHCP server via default GW(MGMT interface)") + return False return True @@ -170,7 +186,25 @@ def testing_config(request, duthosts, rand_one_dut_hostname, tbinfo): duthost = duthosts[rand_one_dut_hostname] subtype_exist, subtype_value = get_subtype_from_configdb(duthost) - if 'dualtor' not in tbinfo['topo']['name']: + if 'dualtor' in tbinfo['topo']['name']: + if testing_mode == SINGLE_TOR_MODE: + pytest.skip("skip SINGLE_TOR_MODE tests on Dual ToR testbeds") + + if testing_mode == DUAL_TOR_MODE: + if not subtype_exist or subtype_value != 'DualToR': + assert False, "Wrong DHCP setup on Dual ToR testbeds" + + yield testing_mode, duthost, 'dual_testbed' + elif tbinfo['topo']['name'] == 't0-56-po2vlan': + if testing_mode == SINGLE_TOR_MODE: + if subtype_exist and subtype_value == 'DualToR': + assert False, "Wrong DHCP setup on t0-56-vlan2po testbeds" + + yield testing_mode, duthost, 'single_testbed' + + if testing_mode == DUAL_TOR_MODE: + pytest.skip("skip DUAL_TOR_MODE tests on t0-56-vlan2po testbeds") + else: if testing_mode == SINGLE_TOR_MODE: if subtype_exist: duthost.shell('redis-cli -n 4 HDEL "DEVICE_METADATA|localhost" "subtype"') @@ -186,15 +220,6 @@ def testing_config(request, duthosts, rand_one_dut_hostname, tbinfo): if testing_mode == DUAL_TOR_MODE: duthost.shell('redis-cli -n 4 HDEL "DEVICE_METADATA|localhost" "subtype"') restart_dhcp_service(duthost) - else: - if testing_mode == SINGLE_TOR_MODE: - pytest.skip("skip SINGLE_TOR_MODE tests on Dual ToR testbeds") - - if testing_mode == DUAL_TOR_MODE: - if not subtype_exist or subtype_value != 'DualToR': - assert False, "Wrong DHCP setup on Dual ToR testbeds" - - yield testing_mode, duthost, 'dual_testbed' def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config, toggle_all_simulator_ports_to_rand_selected_tor): @@ -244,7 +269,7 @@ def test_dhcp_relay_after_link_flap(ptfhost, dut_dhcp_relay_data, validate_dut_r for iface in dhcp_relay['uplink_interfaces']: duthost.shell('ifconfig {} down'.format(iface)) - pytest_assert(wait_until(50, 5, check_link_status, duthost, dhcp_relay['uplink_interfaces'], "down"), + pytest_assert(wait_until(50, 5, 0, check_link_status, duthost, dhcp_relay['uplink_interfaces'], "down"), "Not all uplinks go down") # Bring all uplink interfaces back up @@ -252,7 +277,7 @@ def test_dhcp_relay_after_link_flap(ptfhost, dut_dhcp_relay_data, validate_dut_r duthost.shell('ifconfig {} up'.format(iface)) # Wait until uplinks are up and routes are recovered - pytest_assert(wait_until(50, 5, check_routes_to_dhcp_server, duthost, dut_dhcp_relay_data), + pytest_assert(wait_until(50, 5, 0, check_routes_to_dhcp_server, duthost, dut_dhcp_relay_data), "Not all DHCP servers are routed") # Run the DHCP relay test on the PTF host @@ -295,7 +320,7 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, dut_dhcp_relay_data, valida for iface in dhcp_relay['uplink_interfaces']: duthost.shell('ifconfig {} down'.format(iface)) - pytest_assert(wait_until(50, 5, check_link_status, duthost, dhcp_relay['uplink_interfaces'], "down"), + pytest_assert(wait_until(50, 5, 0, check_link_status, duthost, dhcp_relay['uplink_interfaces'], "down"), "Not all uplinks go down") # Restart DHCP relay service on DUT @@ -310,7 +335,7 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, dut_dhcp_relay_data, valida duthost.shell('ifconfig {} up'.format(iface)) # Wait until uplinks are up and routes are recovered - pytest_assert(wait_until(50, 5, check_routes_to_dhcp_server, duthost, dut_dhcp_relay_data), + pytest_assert(wait_until(50, 5, 0, check_routes_to_dhcp_server, duthost, dut_dhcp_relay_data), "Not all DHCP servers are routed") # Run the DHCP relay test on the PTF host diff --git a/tests/dhcp_relay/test_dhcpv6_relay.py b/tests/dhcp_relay/test_dhcpv6_relay.py index 18d04ed8fb..1b0f67da91 100644 --- a/tests/dhcp_relay/test_dhcpv6_relay.py +++ b/tests/dhcp_relay/test_dhcpv6_relay.py @@ -110,7 +110,7 @@ def test_dhcp_relay_default(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp_r For each DHCP relay agent running on the DuT, verify DHCP packets are relayed properly """ duthost = duthosts[rand_one_dut_hostname] - skip_release(duthost, ["201811", "201911", "202012"]) + skip_release(duthost, ["201811", "201911", "202106"]) for dhcp_relay in dut_dhcp_relay_data: # Run the DHCP relay test on the PTF host @@ -136,7 +136,7 @@ def test_dhcp_relay_after_link_flap(ptfhost, duthosts, rand_one_dut_hostname, du then test whether the DHCP relay agent relays packets properly. """ duthost = duthosts[rand_one_dut_hostname] - skip_release(duthost, ["201811", "201911", "202012"]) + skip_release(duthost, ["201811", "201911", "202106"]) for dhcp_relay in dut_dhcp_relay_data: # Bring all uplink interfaces down @@ -177,7 +177,7 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, duthosts, rand_one_dut_host relays packets properly. """ duthost = duthosts[rand_one_dut_hostname] - skip_release(duthost, ["201811", "201911", "202012"]) + skip_release(duthost, ["201811", "201911", "202106"]) for dhcp_relay in dut_dhcp_relay_data: # Bring all uplink interfaces down diff --git a/tests/drop_packets/acl_templates/acltb_test_rule_egress.json b/tests/drop_packets/acl_templates/acltb_test_rule_egress.json new file mode 100755 index 0000000000..aa87373eda --- /dev/null +++ b/tests/drop_packets/acl_templates/acltb_test_rule_egress.json @@ -0,0 +1,29 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "OUTDATAACL": { + "acl-entries": { + "acl-entry": { + "1": { + "actions": { + "config": { + "forwarding-action": "DROP" + } + }, + "config": { + "sequence-id": 1 + }, + "ip": { + "config": { + "destination-ip-address": "192.168.144.0/24" + } + } + } + } + } + } + } + } + } +} diff --git a/tests/drop_packets/drop_packets.py b/tests/drop_packets/drop_packets.py index 48c40e08de..2d62d80193 100644 --- a/tests/drop_packets/drop_packets.py +++ b/tests/drop_packets/drop_packets.py @@ -15,6 +15,7 @@ from tests.common.platform.device_utils import fanout_switch_port_lookup from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.utilities import get_inventory_files +from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzerError RX_DRP = "RX_DRP" RX_ERR = "RX_ERR" @@ -24,6 +25,10 @@ pytest.SKIP_COUNTERS_FOR_MLNX = False MELLANOX_MAC_UPDATE_SCRIPT = os.path.join(os.path.dirname(__file__), "fanout/mellanox/mlnx_update_mac.j2") +ACL_COUNTERS_UPDATE_INTERVAL = 10 +LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*" +LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*" +LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*" LOG_EXPECT_PORT_OPER_DOWN_RE = ".*Port {} oper state set from up to down.*" LOG_EXPECT_PORT_OPER_UP_RE = ".*Port {} oper state set from down to up.*" @@ -258,6 +263,115 @@ def ports_info(ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports) return data +def acl_setup(duthosts, loganalyzer, template_dir, acl_rules_template, del_acl_rules_template, dut_tmp_dir, + dut_clear_conf_file_path): + for duthost in duthosts: + acl_facts = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"] + if 'DATAACL' not in acl_facts.keys(): + pytest.skip("Skipping test since DATAACL table is not present on DUT") + + duthost.command("mkdir -p {}".format(dut_tmp_dir)) + dut_conf_file_path = os.path.join(dut_tmp_dir, acl_rules_template) + + logger.info("Generating config for ACL rule, ACL table - DATAACL") + duthost.template(src=os.path.join(template_dir, acl_rules_template), dest=dut_conf_file_path) + logger.info("Generating clear config for ACL rule, ACL table - DATAACL") + duthost.template(src=os.path.join(template_dir, del_acl_rules_template), dest=dut_clear_conf_file_path) + + logger.info("Applying {}".format(dut_conf_file_path)) + + loganalyzer[duthost.hostname].expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE] + with loganalyzer[duthost.hostname]: + duthost.command("config acl update full {}".format(dut_conf_file_path)) + + +def acl_teardown(duthosts, loganalyzer, dut_tmp_dir, dut_clear_conf_file_path): + for duthost in duthosts: + loganalyzer[duthost.hostname].expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE] + with loganalyzer[duthost.hostname]: + logger.info("Applying {}".format(dut_clear_conf_file_path)) + duthost.command("config acl update full {}".format(dut_clear_conf_file_path)) + logger.info("Removing {}".format(dut_tmp_dir)) + duthost.command("rm -rf {}".format(dut_tmp_dir)) + time.sleep(ACL_COUNTERS_UPDATE_INTERVAL) + + +@pytest.fixture +def acl_ingress(duthosts, loganalyzer): + """ Create acl rule defined in config file. Delete rule after test case finished """ + base_dir = os.path.dirname(os.path.realpath(__file__)) + template_dir = os.path.join(base_dir, 'acl_templates') + acl_rules_template = "acltb_test_rule.json" + del_acl_rules_template = "acl_rule_del.json" + dut_tmp_dir = os.path.join("tmp", os.path.basename(base_dir)) + dut_clear_conf_file_path = os.path.join(dut_tmp_dir, del_acl_rules_template) + + acl_setup(duthosts, loganalyzer, template_dir, acl_rules_template, del_acl_rules_template, dut_tmp_dir, + dut_clear_conf_file_path) + yield + acl_teardown(duthosts, loganalyzer, dut_tmp_dir, dut_clear_conf_file_path) + + +def create_or_remove_acl_egress_table(duthost, setup, op): + acl_table_config = { + "table_name": "OUTDATAACL", + "table_ports": ",".join(duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["ports"]), + "table_stage": "egress", + "table_type": "L3" + } + + for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance(): + if op == "add": + logger.info("Creating ACL table: \"{}\" on device {}".format(acl_table_config["table_name"], duthost)) + sonic_host_or_asic_inst.command( + "config acl add table {} {} -s {} -p {}".format( + acl_table_config["table_name"], + acl_table_config["table_type"], + acl_table_config["table_stage"], + acl_table_config["table_ports"] + ) + ) + elif op == "remove": + logger.info("Removing ACL table \"{}\" on device {}".format(acl_table_config["table_name"], duthost)) + sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"])) + else: + pytest.fail("Unvalid op {} should use add or remove".format(op)) + + +@pytest.fixture +def acl_egress(duthosts, loganalyzer, setup): + """ + Create acl table OUTDATAACL + Create acl rule defined in config file. + Delete rule and table after test case finished + """ + base_dir = os.path.dirname(os.path.realpath(__file__)) + template_dir = os.path.join(base_dir, 'acl_templates') + acl_rules_template = "acltb_test_rule_egress.json" + del_acl_rules_template = "acl_rule_del.json" + dut_tmp_dir = os.path.join("tmp", os.path.basename(base_dir)) + dut_clear_conf_file_path = os.path.join(dut_tmp_dir, del_acl_rules_template) + + for duthost in duthosts: + try: + loganalyzer[duthost.hostname].expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE] + with loganalyzer[duthost.hostname]: + create_or_remove_acl_egress_table(duthost, setup, "add") + except LogAnalyzerError as err: + # Cleanup Config DB if table creation failed + logger.error("ACL table creation failed, attempting to clean-up...") + create_or_remove_acl_egress_table(duthost, setup, "remove") + raise err + + acl_setup(duthosts, loganalyzer, template_dir, acl_rules_template, del_acl_rules_template, dut_tmp_dir, + dut_clear_conf_file_path) + yield + acl_teardown(duthosts, loganalyzer, dut_tmp_dir, dut_clear_conf_file_path) + + for duthost in duthosts: + create_or_remove_acl_egress_table(duthost, setup, "remove") + + def log_pkt_params(dut_iface, mac_dst, mac_src, ip_dst, ip_src): """ Displays information about packet fields used in test case: mac_dst, mac_src, ip_dst, ip_src """ logger.info("Selected TX interface on DUT - {}".format(dut_iface)) @@ -575,12 +689,6 @@ def test_dst_ip_link_local(do_test, ptfadapter, duthosts, rand_one_dut_hostname, do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -# Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings. -# Default SONiC behaviour is to forward the traffic, so loop-back filter does not triggers for IP packets. -# All router interfaces has attribute "sx_interface_attributes_t.loopback_enable" - enabled. -# To enable loop-back filter drops - need to disable that attribute when create RIF. -# To do this can be used SAI attribute SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION, which is not exposed to SONiC -@pytest.mark.skip(reason="SONiC can't enable loop-back filter feature") def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet drops by loopback-filter. Loop-back filter means that route to the host @@ -618,7 +726,7 @@ def test_ip_pkt_with_expired_ttl(duthost, do_test, ptfadapter, setup, tx_dut_por log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"]) - + pkt = testutils.simple_tcp_packet( eth_dst=ports_info["dst_mac"], # DUT port eth_src=ports_info["src_mac"], # PTF port @@ -772,3 +880,58 @@ def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_p log_pkt_params(ports_info["dut_iface"], ethernet_dst, ports_info["src_mac"], pkt.getlayer("IP").dst, pkt_fields["ipv4_src"]) do_test("L3", pkt, ptfadapter, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports) + + +def test_acl_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, acl_ingress, + ports_info): + """ + @summary: Verify that DUT drops packet with SRC IP 20.0.0.0/24 matched by ingress ACL + """ + duthost = duthosts[rand_one_dut_hostname] + if tx_dut_ports[ports_info["dut_iface"]] not in \ + duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["ports"]: + pytest.skip("RX DUT port absent in 'DATAACL' table") + + ip_src = "20.0.0.5" + + log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], + ip_src) + + pkt = testutils.simple_tcp_packet( + eth_dst=ports_info["dst_mac"], # DUT port + eth_src=ports_info["src_mac"], # PTF port + ip_src=ip_src, + ip_dst=pkt_fields["ipv4_dst"], + tcp_sport=pkt_fields["tcp_sport"], + tcp_dport=pkt_fields["tcp_dport"] + ) + + do_test("ACL", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) + + +def test_acl_egress_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, + acl_egress, ports_info): + """ + @summary: Verify that DUT drops packet with DST IP 192.168.144.1/24 matched by egress ACL and ACL drop counter incremented + """ + duthost = duthosts[rand_one_dut_hostname] + if tx_dut_ports[ports_info["dut_iface"]] not in \ + duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["ports"]: + pytest.skip("RX DUT port absent in 'DATAACL' table") + + ip_dst = "192.168.144.1" + + log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], ip_dst, + pkt_fields["ipv4_src"]) + + pkt = testutils.simple_tcp_packet( + eth_dst=ports_info["dst_mac"], # DUT port + eth_src=ports_info["src_mac"], # PTF port + ip_dst=ip_dst, + ip_src=pkt_fields["ipv4_src"], + tcp_sport=pkt_fields["tcp_sport"], + tcp_dport=pkt_fields["tcp_dport"], + ip_ttl=64 + ) + do_test(discard_group="ACL", pkt=pkt, ptfadapter=ptfadapter, ports_info=ports_info, + sniff_ports=setup["neighbor_sniff_ports"], tx_dut_ports=tx_dut_ports, drop_information="OUTDATAACL") diff --git a/tests/drop_packets/test_configurable_drop_counters.py b/tests/drop_packets/test_configurable_drop_counters.py index 1e72d9a5c1..0119fd3471 100644 --- a/tests/drop_packets/test_configurable_drop_counters.py +++ b/tests/drop_packets/test_configurable_drop_counters.py @@ -12,17 +12,24 @@ import random import time import json +import tempfile +import re from collections import defaultdict import pytest import ptf.testutils as testutils -from netaddr import IPNetwork +from netaddr import IPNetwork, EUI import configurable_drop_counters as cdc from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.platform.device_utils import fanout_switch_port_lookup +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # lgtm[py/unused-import] +from tests.common.utilities import is_ipv4_address +from tests.common import constants +from tests.common import config_reload + pytestmark = [ pytest.mark.topology('any') @@ -37,9 +44,74 @@ MOCK_DEST_IP = "2.2.2.2" LINK_LOCAL_IP = "169.254.0.1" + +def apply_fdb_config(duthost, vlan_id, iface, mac_address, op, type): + """ Generate FDB config file to apply it using 'swssconfig' tool. + Generated config file template: + [ + { + "FDB_TABLE:[vlan_id]:XX-XX-XX-XX-XX-XX": { + "port": "Ethernet0", + "type": "static" + }, + "OP": "SET" + } + ] + """ + dut_fdb_config = "/tmp/fdb.json" + fdb_config_json = [] + entry_key_template = "FDB_TABLE:{vid}:{mac}" + + fdb_entry_json = {entry_key_template.format(vid=vlan_id, mac=mac_address): + {"port": iface, "type": type}, + "OP": op + } + fdb_config_json.append(fdb_entry_json) + + with tempfile.NamedTemporaryFile(suffix=".json", prefix="fdb_config") as fp: + logging.info("Generating FDB config") + json.dump(fdb_config_json, fp) + fp.flush() + + # Copy FDB JSON config to switch + duthost.template(src=fp.name, dest=dut_fdb_config, force=True) + + # Copy FDB JSON config to SWSS container + cmd = "docker cp {} swss:/".format(dut_fdb_config) + duthost.command(cmd) + + # Set FDB entry + cmd = "docker exec -i swss swssconfig /fdb.json" + duthost.command(cmd) + time.sleep(3) + + cmd = "docker exec -i swss rm -f /fdb.json" + duthost.command(cmd) + time.sleep(5) + +def verifyFdbArp(duthost, dst_ip, dst_mac, dst_intf): + """ + Check if the ARP and FDB entry is present + """ + logging.info("Verify if the ARP and FDB entry is present for {}".format(dst_ip)) + result = duthost.command("show arp {}".format(dst_ip)) + pytest_assert("Total number of entries 1" in result['stdout'], + "ARP entry for {} missing in ASIC".format(dst_ip)) + result = duthost.shell("ip neigh show {}".format(dst_ip)) + pytest_assert(result['stdout_lines'], "{} not in arp table".format(dst_ip)) + match = re.match("{}.*lladdr\s+(.*)\s+[A-Z]+".format(dst_ip), + result['stdout_lines'][0]) + pytest_assert(match, + "Regex failed while retrieving arp entry for {}".format(dst_ip)) + pytest_assert(match.group(1).replace(":", "-") == dst_mac, + "ARP entry's lladdr is changed from {} to {}".format(dst_mac, match.group(1).replace(":", "-"))) + + fdb_count = int(duthost.shell("show mac | grep {} | grep {} | wc -l".format(match.group(1), dst_intf))["stdout"]) + pytest_assert(fdb_count == 1, "FDB entry doesn't exist for {}, fdb_count is {}".format(dst_mac, fdb_count)) + @pytest.mark.parametrize("drop_reason", ["L3_EGRESS_LINK_DOWN"]) -def test_neighbor_link_down(testbed_params, setup_counters, duthosts, rand_one_dut_hostname, mock_server, - send_dropped_traffic, drop_reason): +def test_neighbor_link_down(testbed_params, setup_counters, duthosts, rand_one_dut_hostname, toggle_all_simulator_ports_to_rand_selected_tor, mock_server, + send_dropped_traffic, drop_reason, generate_dropped_packet, tbinfo): """ Verifies counters that check for a neighbor link being down. @@ -56,25 +128,37 @@ def test_neighbor_link_down(testbed_params, setup_counters, duthosts, rand_one_d rx_port = random.choice([port for port in testbed_params["physical_port_map"].keys() if port != mock_server["server_dst_port"]]) - rx_mac = duthost.get_dut_iface_mac(testbed_params["physical_port_map"][rx_port]) - logging.info("Selected port %s, mac = %s to send traffic", rx_port, rx_mac) + logging.info("Selected port %s to send traffic", rx_port) - src_mac = "DE:AD:BE:EF:12:34" src_ip = MOCK_DEST_IP - pkt = _get_simple_ip_packet(src_mac, rx_mac, src_ip, mock_server["server_dst_addr"]) + pkt = generate_dropped_packet(rx_port, src_ip, mock_server["server_dst_addr"]) try: + # Add a static fdb entry + apply_fdb_config(duthost, testbed_params['vlan_interface']['attachto'], + mock_server['server_dst_intf'], mock_server['server_dst_mac'], + "SET", "static") mock_server["fanout_neighbor"].shutdown(mock_server["fanout_intf"]) + time.sleep(3) + verifyFdbArp(duthost, mock_server['server_dst_addr'], mock_server['server_dst_mac'], mock_server['server_dst_intf']) send_dropped_traffic(counter_type, pkt, rx_port) finally: mock_server["fanout_neighbor"].no_shutdown(mock_server["fanout_intf"]) duthost.command("sonic-clear fdb all") duthost.command("sonic-clear arp") + # Delete the static fdb entry + apply_fdb_config(duthost, testbed_params['vlan_interface']['attachto'], + mock_server['server_dst_intf'], mock_server['server_dst_mac'], + "DEL", "static") + # FIXME: Add config reload on t0-backend as a workaround to keep DUT healthy because the following + # drop packet testcases will suffer from the brcm_sai_get_port_stats errors flooded in syslog + if "backend" in tbinfo["topo"]["name"]: + config_reload(duthost) @pytest.mark.parametrize("drop_reason", ["DIP_LINK_LOCAL"]) def test_dip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_hostname, - send_dropped_traffic, drop_reason): + send_dropped_traffic, drop_reason, add_default_route_to_dut, generate_dropped_packet): """ Verifies counters that check for link local dst IP. @@ -85,13 +169,10 @@ def test_dip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_h counter_type = setup_counters([drop_reason]) rx_port = random.choice(testbed_params["physical_port_map"].keys()) - rx_mac = duthost.get_dut_iface_mac(testbed_params["physical_port_map"][rx_port]) - logging.info("Selected port %s, mac = %s to send traffic", rx_port, rx_mac) + logging.info("Selected port %s to send traffic", rx_port) - src_mac = "DE:AD:BE:EF:12:34" src_ip = "10.10.10.10" - - pkt = _get_simple_ip_packet(src_mac, rx_mac, src_ip, LINK_LOCAL_IP) + pkt = generate_dropped_packet(rx_port, src_ip, LINK_LOCAL_IP) try: send_dropped_traffic(counter_type, pkt, rx_port) @@ -102,7 +183,7 @@ def test_dip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_h @pytest.mark.parametrize("drop_reason", ["SIP_LINK_LOCAL"]) def test_sip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_hostname, - send_dropped_traffic, drop_reason): + send_dropped_traffic, drop_reason, add_default_route_to_dut, generate_dropped_packet): """ Verifies counters that check for link local src IP. @@ -113,13 +194,10 @@ def test_sip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_h counter_type = setup_counters([drop_reason]) rx_port = random.choice(testbed_params["physical_port_map"].keys()) - rx_mac = duthost.get_dut_iface_mac(testbed_params["physical_port_map"][rx_port]) - logging.info("Selected port %s, mac = %s to send traffic", rx_port, rx_mac) + logging.info("Selected port %s to send traffic", rx_port) - src_mac = "DE:AD:BE:EF:12:34" dst_ip = "10.10.10.10" - - pkt = _get_simple_ip_packet(src_mac, rx_mac, LINK_LOCAL_IP, dst_ip) + pkt = generate_dropped_packet(rx_port, LINK_LOCAL_IP, dst_ip) try: send_dropped_traffic(counter_type, pkt, rx_port) @@ -128,6 +206,40 @@ def test_sip_link_local(testbed_params, setup_counters, duthosts, rand_one_dut_h duthost.command("sonic-clear arp") +@pytest.fixture +def add_default_route_to_dut(duts_running_config_facts, duthosts, tbinfo): + """ + Add a default route to the device for storage backend testbed. + This is to ensure the packet sent in test_sip_link_local and test_dip_link_local + are routable on the device. + """ + if "backend" in tbinfo["topo"]["name"]: + logging.info("Add default route on the DUT.") + try: + for duthost in duthosts: + cfg_facts = duts_running_config_facts[duthost.hostname] + for asic_index, asic_cfg_facts in enumerate(cfg_facts): + asic = duthost.asic_instance(asic_index) + bgp_neighbors = asic_cfg_facts["BGP_NEIGHBOR"] + ipv4_cmd_parts = ["ip route add default"] + for neighbor in bgp_neighbors.keys(): + if is_ipv4_address(neighbor): + ipv4_cmd_parts.append("nexthop via %s" % neighbor) + ipv4_cmd_parts.sort() + ipv4_cmd = " ".join(ipv4_cmd_parts) + asic.shell(ipv4_cmd) + yield + finally: + logging.info("Remove default route on the DUT.") + for duthost in duthosts: + for asic in duthost.asics: + if asic.is_it_backend(): + continue + asic.shell("ip route del default", module_ignore_errors=True) + else: + yield + + @pytest.fixture(scope="module") def testbed_params(duthosts, rand_one_dut_hostname, tbinfo): """ @@ -150,9 +262,12 @@ def testbed_params(duthosts, rand_one_dut_hostname, tbinfo): for ifname in mgFacts["minigraph_vlans"].values()[VLAN_INDEX]["members"]] + vlan_interface = mgFacts["minigraph_vlan_interfaces"][VLAN_INDEX].copy() + vlan_interface["type"] = mgFacts["minigraph_vlans"].values()[VLAN_INDEX].get("type", "untagged").lower() + return {"physical_port_map": physical_port_map, "vlan_ports": vlan_ports, - "vlan_interface": mgFacts["minigraph_vlan_interfaces"][VLAN_INDEX]} + "vlan_interface": vlan_interface} @pytest.fixture(scope="module") @@ -241,23 +356,26 @@ def _check_drops(): recv_count, dst_port, PACKET_COUNT) return recv_count == PACKET_COUNT - pytest_assert(wait_until(5, 1, _check_drops), "Expected {} drops".format(PACKET_COUNT)) + pytest_assert(wait_until(10, 2, 0, _check_drops), "Expected {} drops".format(PACKET_COUNT)) return _runner @pytest.fixture -def arp_responder(ptfhost, testbed_params): +def arp_responder(ptfhost, testbed_params, tbinfo): """Set up the ARP responder utility in the PTF container.""" vlan_network = testbed_params["vlan_interface"]["subnet"] + is_storage_backend = "backend" in tbinfo["topo"]["name"] logging.info("Generating simulated servers under VLAN network %s", vlan_network) - arp_responder_conf = {} vlan_host_map = _generate_vlan_servers(vlan_network, testbed_params["vlan_ports"]) logging.info("Generating ARP responder topology") - for port in vlan_host_map: - arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port] + if is_storage_backend: + vlan_id = testbed_params["vlan_interface"]["attachto"].lstrip("Vlan") + arp_responder_conf = {"eth%s%s%s" % (k, constants.VLAN_SUB_INTERFACE_SEPARATOR, vlan_id): v for k, v in vlan_host_map.items()} + else: + arp_responder_conf = {"eth%s" % k: v for k, v in vlan_host_map.items()} logging.info("Copying ARP responder topology to PTF") with open("/tmp/from_t1.json", "w") as ar_config: @@ -294,6 +412,7 @@ def mock_server(fanouthosts, testbed_params, arp_responder, ptfadapter, duthosts duthost = duthosts[rand_one_dut_hostname] server_dst_port = random.choice(arp_responder.keys()) server_dst_addr = random.choice(arp_responder[server_dst_port].keys()) + server_dst_mac = str(EUI(arp_responder[server_dst_port].get(server_dst_addr))) server_dst_intf = testbed_params["physical_port_map"][server_dst_port] logging.info("Creating mock server with IP %s; dut port = %s, dut intf = %s", server_dst_addr, server_dst_port, server_dst_intf) @@ -306,15 +425,47 @@ def mock_server(fanouthosts, testbed_params, arp_responder, ptfadapter, duthosts logging.info("Populating FDB and ARP entry for mock server under VLAN") # Issue a ping to populate ARP table on DUT duthost.command('ping %s -c 3' % server_dst_addr, module_ignore_errors=True) + + time.sleep(5) fanout_neighbor, fanout_intf = fanout_switch_port_lookup(fanouthosts, duthost.hostname, server_dst_intf) return {"server_dst_port": server_dst_port, "server_dst_addr": server_dst_addr, + "server_dst_mac": server_dst_mac, "server_dst_intf": server_dst_intf, "fanout_neighbor": fanout_neighbor, "fanout_intf": fanout_intf} +@pytest.fixture +def generate_dropped_packet(duthosts, rand_one_dut_hostname, testbed_params): + + def _get_simple_ip_packet(rx_port, src_ip, dst_ip): + dst_mac = duthost.get_dut_iface_mac(testbed_params["physical_port_map"][rx_port]) + src_mac = "DE:AD:BE:EF:12:34" + # send tagged packet for t0-backend whose vlan mode is tagged + enable_vlan = rx_port in testbed_params["vlan_ports"] and testbed_params["vlan_interface"]["type"] == "tagged" + packet_params = dict( + eth_src=src_mac, + eth_dst=dst_mac, + ip_src=src_ip, + ip_dst=dst_ip + ) + if enable_vlan: + packet_params["dl_vlan_enable"] = enable_vlan + packet_params["vlan_vid"] = int(testbed_params["vlan_interface"]["attachto"].lstrip("Vlan")) + pkt = testutils.simple_ip_packet(**packet_params) + + logging.info("Generated simple IP packet (SMAC=%s, DMAC=%s, SIP=%s, DIP=%s)", + src_mac, dst_mac, src_ip, dst_ip) + + return pkt + + duthost = duthosts[rand_one_dut_hostname] + + return _get_simple_ip_packet + + def _generate_vlan_servers(vlan_network, vlan_ports): vlan_host_map = defaultdict(dict) @@ -335,20 +486,6 @@ def _generate_vlan_servers(vlan_network, vlan_ports): return vlan_host_map -def _get_simple_ip_packet(src_mac, dst_mac, src_ip, dst_ip): - pkt = testutils.simple_ip_packet( - eth_src=src_mac, - eth_dst=dst_mac, - ip_src=src_ip, - ip_dst=dst_ip - ) - - logging.info("Generated simple IP packet (SMAC=%s, DMAC=%s, SIP=%s, DIP=%s)", - src_mac, dst_mac, src_ip, dst_ip) - - return pkt - - def _send_packets(duthost, ptfadapter, pkt, ptf_tx_port_id, count=PACKET_COUNT): duthost.command("sonic-clear dropcounters") @@ -358,4 +495,3 @@ def _send_packets(duthost, ptfadapter, pkt, ptf_tx_port_id, testutils.send(ptfadapter, ptf_tx_port_id, pkt, count=count) time.sleep(1) - diff --git a/tests/drop_packets/test_drop_counters.py b/tests/drop_packets/test_drop_counters.py index ec5b9a9f38..5a2754ffaf 100755 --- a/tests/drop_packets/test_drop_counters.py +++ b/tests/drop_packets/test_drop_counters.py @@ -5,7 +5,6 @@ import yaml import re -import ptf.packet as packet import ptf.testutils as testutils from collections import defaultdict @@ -28,9 +27,6 @@ NAMESPACE_SUFFIX = "-n {} " GET_L2_COUNTERS = "portstat -j " GET_L3_COUNTERS = "intfstat -j " -ACL_COUNTERS_UPDATE_INTERVAL = 10 -LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*" -LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*" LOG_EXPECT_PORT_ADMIN_DOWN_RE = ".*Configure {} admin status to down.*" LOG_EXPECT_PORT_ADMIN_UP_RE = ".*Port {} oper state set from down to up.*" @@ -92,48 +88,8 @@ def parse_combined_counters(duthosts, rand_one_dut_hostname): break -@pytest.fixture -def acl_setup(duthosts, loganalyzer): - """ Create acl rule defined in config file. Delete rule after test case finished """ - for duthost in duthosts: - acl_facts = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"] - if 'DATAACL' not in acl_facts.keys(): - pytest.skip("Skipping test since DATAACL table is not supported on this platform") - - base_dir = os.path.dirname(os.path.realpath(__file__)) - template_dir = os.path.join(base_dir, 'acl_templates') - acl_rules_template = "acltb_test_rule.json" - del_acl_rules_template = "acl_rule_del.json" - dut_tmp_dir = os.path.join("tmp", os.path.basename(base_dir)) - - duthost.command("mkdir -p {}".format(dut_tmp_dir)) - dut_conf_file_path = os.path.join(dut_tmp_dir, acl_rules_template) - dut_clear_conf_file_path = os.path.join(dut_tmp_dir, del_acl_rules_template) - - logger.info("Generating config for ACL rule, ACL table - DATAACL") - duthost.template(src=os.path.join(template_dir, acl_rules_template), dest=dut_conf_file_path) - logger.info("Generating clear config for ACL rule, ACL table - DATAACL") - duthost.template(src=os.path.join(template_dir, del_acl_rules_template), dest=dut_clear_conf_file_path) - - logger.info("Applying {}".format(dut_conf_file_path)) - - loganalyzer[duthost.hostname].expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE] - with loganalyzer[duthost.hostname]: - duthost.command("config acl update full {}".format(dut_conf_file_path)) - - yield - - for duthost in duthosts: - loganalyzer[duthost.hostname].expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE] - with loganalyzer[duthost.hostname]: - logger.info("Applying {}".format(dut_clear_conf_file_path)) - duthost.command("config acl update full {}".format(dut_clear_conf_file_path)) - logger.info("Removing {}".format(dut_tmp_dir)) - duthost.command("rm -rf {}".format(dut_tmp_dir)) - time.sleep(ACL_COUNTERS_UPDATE_INTERVAL) - - -def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports=None, skip_counter_check=False): +def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports=None, + skip_counter_check=False, drop_information=None): """ Base test function for verification of L2 or L3 packet drops. Verification type depends on 'discard_group' value. Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -177,7 +133,8 @@ def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, port time.sleep(ACL_COUNTERS_UPDATE_INTERVAL) acl_drops = 0 for duthost in duthosts: - acl_drops += duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["rules"]["RULE_1"]["packets_count"] + acl_drops += duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][ + drop_information if drop_information else "DATAACL"]["rules"]["RULE_1"]["packets_count"] if acl_drops != PKT_NUMBER: fail_msg = "ACL drop counter was not incremented on iface {}. DUT ACL counter == {}; Sent pkts == {}".format( tx_dut_ports[ports_info["dut_iface"]], acl_drops, PKT_NUMBER @@ -204,42 +161,60 @@ def get_intf_mtu(duthost, intf, asic_index): @pytest.fixture -def mtu_config(duthosts): +def mtu_config(duthosts, rand_one_dut_hostname): """ Fixture which prepare port MTU configuration for 'test_ip_pkt_with_exceeded_mtu' test case """ class MTUConfig(object): iface = None mtu = None default_mtu = 9100 + key = None + asic_index = None @classmethod def set_mtu(cls, mtu, iface, asic_index): - for duthost in duthosts: - namespace = duthost.get_namespace_from_asic_id(asic_index) if duthost.is_multi_asic else '' - cls.mtu = duthost.command("sonic-db-cli -n '{}' CONFIG_DB hget \"PORTCHANNEL|{}\" mtu".format(namespace, iface))["stdout"] - if not cls.mtu: - cls.mtu = cls.default_mtu - if "PortChannel" in iface: - duthost.command("sonic-db-cli -n '{}' CONFIG_DB hset \"PORTCHANNEL|{}\" mtu {}".format(namespace, iface, mtu))["stdout"] - elif "Ethernet" in iface: - duthost.command("sonic-db-cli -n '{}' CONFIG_DB hset \"PORT|{}\" mtu {}".format(namespace, iface, mtu))["stdout"] - else: - raise Exception("Unsupported interface parameter - {}".format(iface)) - cls.iface = iface - check_mtu = lambda: get_intf_mtu(duthost, iface, asic_index) == mtu # lgtm[py/loop-variable-capture] - pytest_assert(wait_until(5, 1, check_mtu), "MTU on interface {} not updated".format(iface)) - cls.asic_index = asic_index + duthost = duthosts[rand_one_dut_hostname] + namespace = duthost.get_namespace_from_asic_id(asic_index) if duthost.is_multi_asic else '' + if "PortChannel" in iface: + cls.key = "PORTCHANNEL" + elif "Ethernet" in iface: + cls.key = "PORT" + else: + raise Exception("Unsupported interface parameter - {}".format(iface)) + + cls.mtu = duthost.command( + "sonic-db-cli -n '{}' CONFIG_DB hget \"{}|{}\" mtu".format( + namespace, cls.key, iface + ) + )["stdout"] + + if not cls.mtu: + cls.mtu = cls.default_mtu + + duthost.command( + "sonic-db-cli -n '{}' CONFIG_DB hset \"{}|{}\" mtu {}".format( + namespace, cls.key, iface, mtu + ) + )["stdout"] + + cls.asic_index = asic_index + cls.iface = iface + check_mtu = lambda: get_intf_mtu(duthost, iface, asic_index) == mtu # lgtm[py/loop-variable-capture] + pytest_assert( + wait_until(5, 1, 0, check_mtu), + "MTU on interface {} not updated".format(iface) + ) @classmethod def restore_mtu(cls): - for duthost in duthosts: - if cls.iface: - namespace = duthost.get_namespace_from_asic_id(cls.asic_index) if duthost.is_multi_asic else '' - if "PortChannel" in cls.iface: - duthost.command("sonic-db-cli -n '{}' CONFIG_DB hset \"PORTCHANNEL|{}\" mtu {}".format(namespace, cls.iface, cls.mtu))["stdout"] - elif "Ethernet" in cls.iface: - duthost.command("sonic-db-cli -n '{}' CONFIG_DB hset \"PORT|{}\" mtu {}".format(namespace, cls.iface, cls.mtu))["stdout"] - else: - raise Exception("Trying to restore MTU on unsupported interface - {}".format(cls.iface)) + duthost = duthosts[rand_one_dut_hostname] + namespace = duthost.get_namespace_from_asic_id( + cls.asic_index + ) if duthost.is_multi_asic else '' + duthost.command( + "sonic-db-cli -n '{}' CONFIG_DB hset \"{}|{}\" mtu {}".format( + namespace, cls.key, cls.iface, cls.mtu + ) + )["stdout"] yield MTUConfig @@ -254,7 +229,8 @@ def check_if_skip(): @pytest.fixture(scope='module') def do_test(duthosts): - def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx_dut_ports=None, comparable_pkt=None, skip_counter_check=False): + def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx_dut_ports=None, + comparable_pkt=None, skip_counter_check=False, drop_information=None): """ Execute test - send packet, check that expected discard counters were incremented and packet was dropped @param discard_group: Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -266,7 +242,8 @@ def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx """ check_if_skip() asic_index = ports_info["asic_index"] - base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports, skip_counter_check=skip_counter_check) + base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports, + skip_counter_check=skip_counter_check, drop_information=drop_information) # Verify packets were not egresed the DUT if discard_group != "NO_DROPS": @@ -310,39 +287,6 @@ def test_reserved_dmac_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) -def test_acl_drop(do_test, ptfadapter, duthosts, rand_one_dut_hostname, setup, tx_dut_ports, pkt_fields, acl_setup, ports_info): - """ - @summary: Verify that DUT drops packet with SRC IP 20.0.0.0/24 matched by ingress ACL and ACL drop counter incremented - """ - duthost = duthosts[rand_one_dut_hostname] - acl_facts = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"] - if 'DATAACL' not in acl_facts.keys(): - pytest.skip("Skipping test since DATAACL table is not supported on this platform") - - if tx_dut_ports[ports_info["dut_iface"]] not in acl_facts["DATAACL"]["ports"]: - pytest.skip("RX DUT port absent in 'DATAACL' table") - - ip_src = "20.0.0.5" - - log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src) - - pkt = testutils.simple_tcp_packet( - eth_dst=ports_info["dst_mac"], # DUT port - eth_src=ports_info["src_mac"], # PTF port - ip_src=ip_src, - ip_dst=pkt_fields["ipv4_dst"], - tcp_sport=pkt_fields["tcp_sport"], - tcp_dport=pkt_fields["tcp_dport"] - ) - asic_index = ports_info["asic_index"] - base_verification("ACL", pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports) - - # Verify packets were not egresed the DUT - exp_pkt = expected_packet_mask(pkt) - exp_pkt.set_do_not_care_scapy(packet.IP, 'ip_src') - testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=setup["neighbor_sniff_ports"]) - - def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, rif_port_down, ports_info): """ @summary: Verify that packets on ingress port are not dropped when egress RIF link is down and check that drop counters not incremented diff --git a/tests/everflow/everflow_test_utilities.py b/tests/everflow/everflow_test_utilities.py index d685499e5f..09b3ad74aa 100644 --- a/tests/everflow/everflow_test_utilities.py +++ b/tests/everflow/everflow_test_utilities.py @@ -32,6 +32,8 @@ STABILITY_BUFFER = 0.05 #50msec +OUTER_HEADER_SIZE = 38 + @pytest.fixture(scope="module") def setup_info(duthosts, rand_one_dut_hostname, tbinfo): """ @@ -190,7 +192,6 @@ def get_port_info(in_port_list, out_port_list, out_port_ptf_id_list, out_port_la if k in mg_facts["minigraph_ports"] } } - # Disable BGP so that we don't keep on bouncing back mirror packets # If we send TTL=1 packet we don't need this but in multi-asic TTL > 1 duthost.command("sudo config bgp shutdown all") @@ -278,8 +279,6 @@ class BaseEverflowTest(object): mirror and ACL stage for the tests. """ - OUTER_HEADER_SIZE = 38 - @pytest.fixture(scope="class", params=[CONFIG_MODE_CLI]) def config_method(self, request): """Get the configuration method for this set of test cases. @@ -304,13 +303,13 @@ def setup_mirror_session(self, duthosts, rand_one_dut_hostname, config_method): dict: Information about the mirror session configuration. """ duthost = duthosts[rand_one_dut_hostname] - session_info = self._mirror_session_info("test_session_1", duthost.facts["asic_type"]) + session_info = BaseEverflowTest.mirror_session_info("test_session_1", duthost.facts["asic_type"]) - self.apply_mirror_config(duthost, session_info, config_method) + BaseEverflowTest.apply_mirror_config(duthost, session_info, config_method) yield session_info - self.remove_mirror_config(duthost, session_info["session_name"], config_method) + BaseEverflowTest.remove_mirror_config(duthost, session_info["session_name"], config_method) @pytest.fixture(scope="class") def policer_mirror_session(self, duthosts, rand_one_dut_hostname, config_method): @@ -330,16 +329,17 @@ def policer_mirror_session(self, duthosts, rand_one_dut_hostname, config_method) self.apply_policer_config(duthost, policer, config_method) # Create a mirror session with the TEST_POLICER attached - session_info = self._mirror_session_info("TEST_POLICER_SESSION", duthost.facts["asic_type"]) - self.apply_mirror_config(duthost, session_info, config_method, policer=policer) + session_info = BaseEverflowTest.mirror_session_info("TEST_POLICER_SESSION", duthost.facts["asic_type"]) + BaseEverflowTest.apply_mirror_config(duthost, session_info, config_method, policer=policer) yield session_info # Clean up mirror session and policer - self.remove_mirror_config(duthost, session_info["session_name"], config_method) + BaseEverflowTest.remove_mirror_config(duthost, session_info["session_name"], config_method) self.remove_policer_config(duthost, policer, config_method) - def apply_mirror_config(self, duthost, session_info, config_method, policer=None): + @staticmethod + def apply_mirror_config(duthost, session_info, config_method=CONFIG_MODE_CLI, policer=None): if config_method == CONFIG_MODE_CLI: command = "config mirror_session add {} {} {} {} {} {}" \ .format(session_info["session_name"], @@ -357,7 +357,8 @@ def apply_mirror_config(self, duthost, session_info, config_method, policer=None duthost.command(command) - def remove_mirror_config(self, duthost, session_name, config_method): + @staticmethod + def remove_mirror_config(duthost, session_name, config_method=CONFIG_MODE_CLI): if config_method == CONFIG_MODE_CLI: command = "config mirror_session remove {}".format(session_name) elif config_method == CONFIG_MODE_CONFIGLET: @@ -411,7 +412,7 @@ def setup_acl_table(self, duthosts, rand_one_dut_hostname, setup_info, setup_mir yield - self.remove_acl_rule_config(duthost, table_name, config_method) + BaseEverflowTest.remove_acl_rule_config(duthost, table_name, config_method) if self.acl_stage() == "egress": self.remove_acl_table_config(duthost, "EVERFLOW_EGRESS", config_method) @@ -472,7 +473,8 @@ def apply_acl_rule_config( duthost.command(command) time.sleep(2) - def remove_acl_rule_config(self, duthost, table_name, config_method): + @staticmethod + def remove_acl_rule_config(duthost, table_name, config_method=CONFIG_MODE_CLI): if config_method == CONFIG_MODE_CLI: duthost.copy(src=os.path.join(FILE_DIR, EVERFLOW_RULE_DELETE_FILE), dest=DUT_RUN_DIR) @@ -536,12 +538,12 @@ def send_and_check_mirror_packets(self, if src_port_namespace != dest_ports_namespace: src_port_set.add(dest_ports[0]) - expected_mirror_packet_with_ttl = self._get_expected_mirror_packet(mirror_session, + expected_mirror_packet_with_ttl = BaseEverflowTest.get_expected_mirror_packet(mirror_session, setup, duthost, mirror_packet, True) - expected_mirror_packet_without_ttl = self._get_expected_mirror_packet(mirror_session, + expected_mirror_packet_without_ttl = BaseEverflowTest.get_expected_mirror_packet(mirror_session, setup, duthost, mirror_packet, @@ -589,8 +591,9 @@ def send_and_check_mirror_packets(self, pytest_assert(inner_packet.pkt_match(mirror_packet), "Mirror payload does not match received packet") else: testutils.verify_no_packet_any(ptfadapter, expected_mirror_packet, dest_ports) - - def _get_expected_mirror_packet(self, mirror_session, setup, duthost, mirror_packet, check_ttl): + + @staticmethod + def get_expected_mirror_packet(mirror_session, setup, duthost, mirror_packet, check_ttl): payload = mirror_packet.copy() # Add vendor specific padding to the packet @@ -626,18 +629,19 @@ def _get_expected_mirror_packet(self, mirror_session, setup, duthost, mirror_pac expected_packet.set_do_not_care_scapy(packet.IP, "tos") # Mask off the payload (we check it later) - expected_packet.set_do_not_care(self.OUTER_HEADER_SIZE * 8, len(payload) * 8) + expected_packet.set_do_not_care(OUTER_HEADER_SIZE * 8, len(payload) * 8) return expected_packet def _extract_mirror_payload(self, encapsulated_packet, payload_size): - pytest_assert(len(encapsulated_packet) >= self.OUTER_HEADER_SIZE, - "Incomplete packet, expected at least {} header bytes".format(self.OUTER_HEADER_SIZE)) + pytest_assert(len(encapsulated_packet) >= OUTER_HEADER_SIZE, + "Incomplete packet, expected at least {} header bytes".format(OUTER_HEADER_SIZE)) inner_frame = encapsulated_packet[-payload_size:] return packet.Ether(inner_frame) - def _mirror_session_info(self, session_name, asic_type): + @staticmethod + def mirror_session_info(session_name, asic_type): session_src_ip = "1.1.1.1" session_dst_ip = "2.2.2.2" session_dscp = "8" diff --git a/tests/everflow/templates/acl-erspan.json.j2 b/tests/everflow/templates/acl-erspan.json.j2 index 93be3b27c1..280b0d70bc 100644 --- a/tests/everflow/templates/acl-erspan.json.j2 +++ b/tests/everflow/templates/acl-erspan.json.j2 @@ -17,11 +17,19 @@ }, {% for qset in rule["qualifiers"].keys() %} "{{ qset }}": { + {% if qset == "input_interface" %} + "interface_ref": { + "config": { + "interface": "{{ rule["qualifiers"]["input_interface"] }}" + } + } + {% else %} "config": { {% for qualifier, value in rule["qualifiers"][qset].items() %} "{{ qualifier }}": {{ value|to_nice_json }}{% if not loop.last %},{% endif %} {% endfor %} } + {% endif %} }{% if not loop.last %},{% endif %} {% endfor %} }{% if not loop.last %},{% endif %} diff --git a/tests/everflow/test_everflow_per_interface.py b/tests/everflow/test_everflow_per_interface.py new file mode 100644 index 0000000000..26f093d883 --- /dev/null +++ b/tests/everflow/test_everflow_per_interface.py @@ -0,0 +1,180 @@ +"""Test cases to support the Everflow Mirroring feature in SONiC.""" +import logging +import time +import pytest + +import ptf.testutils as testutils +import everflow_test_utilities as everflow_utils + +from everflow_test_utilities import BaseEverflowTest +from everflow_test_utilities import TEMPLATE_DIR, EVERFLOW_RULE_CREATE_TEMPLATE, DUT_RUN_DIR, EVERFLOW_RULE_CREATE_FILE +from tests.common.helpers.assertions import pytest_require, pytest_assert + +from everflow_test_utilities import setup_info, EVERFLOW_DSCP_RULES # noqa: F401, E501 lgtm[py/unused-import] pylint: disable=import-error + +pytestmark = [ + pytest.mark.topology("any") +] + +EVERFLOW_TABLE_NAME = { + "ipv4": "EVERFLOW", + "ipv6": "EVERFLOWV6" +} + +EVERFLOW_SESSION_NAME = "everflow_session_per_interface" + +logger = logging.getLogger(__file__) + +@pytest.fixture(scope="module", autouse=True) +def skip_on_dualtor_testbed(tbinfo): + if 'dualtor' in tbinfo['topo']['name']: + pytest.skip("Skip running on dualtor testbed") + + +def build_candidate_ports(duthost, tbinfo): + """ + Build candidate ports for testing + """ + candidate_ports = {} + unselected_ports = {} + if tbinfo['topo']['type'] == 't0': + candidate_neigh_name = 'Server' + else: + candidate_neigh_name = 'T0' + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + + for dut_port, neigh in mg_facts["minigraph_neighbors"].items(): + ptf_idx = mg_facts["minigraph_ptf_indices"][dut_port] + if candidate_neigh_name in neigh['name'] and len(candidate_ports) < 4: + candidate_ports.update({dut_port: ptf_idx}) + if len(unselected_ports) < 4 and dut_port not in candidate_ports: + unselected_ports.update({dut_port: ptf_idx}) + + logger.info("Candidate testing ports are {}".format(candidate_ports)) + return candidate_ports, unselected_ports + + +def build_acl_rule_vars(candidate_ports, ip_ver): + """ + Build vars for generating ACL rule + """ + config_vars = {} + config_vars['acl_table_name'] = EVERFLOW_TABLE_NAME[ip_ver] + config_vars['rules'] = [{'qualifiers': {'input_interface': ','.join(candidate_ports.keys())}}] + return config_vars + + +@pytest.fixture(scope='module') +def apply_mirror_session(rand_selected_dut): + mirror_session_info = BaseEverflowTest.mirror_session_info(EVERFLOW_SESSION_NAME, rand_selected_dut.facts["asic_type"]) + logger.info("Applying mirror session to DUT") + BaseEverflowTest.apply_mirror_config(rand_selected_dut, mirror_session_info) + time.sleep(10) + cmd = 'sonic-db-cli STATE_DB hget \"MIRROR_SESSION_TABLE|{}\" \"monitor_port\"'.format(EVERFLOW_SESSION_NAME) + monitor_port = rand_selected_dut.shell(cmd=cmd)['stdout'] + pytest_assert(monitor_port != "", "Failed to retrieve monitor_port") + + yield mirror_session_info, monitor_port + + logger.info("Removing mirror session from DUT") + BaseEverflowTest.remove_mirror_config(rand_selected_dut, EVERFLOW_SESSION_NAME) + + +@pytest.fixture(scope='module', params=['ipv4', 'ipv6'], autouse=True) +def apply_acl_rule(request, rand_selected_dut, tbinfo, apply_mirror_session): + """ + Apply ACL rule for matching input_ports + """ + # Skip ipv6 test on Mellanox platform + ip_ver = request.param + if "mellanox" == rand_selected_dut.facts["asic_type"] and ip_ver == "ipv6": + pytest.skip("Match 'IN_PORTS' in EVERFLOWV6 is not supported on Mellanox platform") + # Check existence of EVERFLOW + table_name = EVERFLOW_TABLE_NAME[ip_ver] + output = rand_selected_dut.shell('show acl table {}'.format(table_name))['stdout_lines'] + # Skip if EVERFLOW table doesn't exist + pytest_require(len(output) > 2, "Skip test since {} dosen't exist".format(table_name)) + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + mirror_session_info, monitor_port = apply_mirror_session + # Build testing port list + candidate_ports, unselected_ports = build_candidate_ports(rand_selected_dut, tbinfo) + pytest_require(len(candidate_ports) >= 1, "Not sufficient ports for testing") + + # Copy and apply ACL rule + config_vars = build_acl_rule_vars(candidate_ports, ip_ver) + rand_selected_dut.host.options["variable_manager"].extra_vars.update(config_vars) + rand_selected_dut.command("mkdir -p {}".format(DUT_RUN_DIR)) + rand_selected_dut.template(src=os.path.join(TEMPLATE_DIR, EVERFLOW_RULE_CREATE_TEMPLATE), + dest=os.path.join(DUT_RUN_DIR, EVERFLOW_RULE_CREATE_FILE)) + logger.info("Applying acl rule config to DUT") + command = "acl-loader update full {} --table_name {} --session_name {}" \ + .format(os.path.join(DUT_RUN_DIR, EVERFLOW_RULE_CREATE_FILE), table_name, EVERFLOW_SESSION_NAME) + rand_selected_dut.shell(cmd=command) + ret = { + "candidate_ports": candidate_ports, + "unselected_ports": unselected_ports, + "mirror_session_info": mirror_session_info, + "monitor_port": {monitor_port: mg_facts["minigraph_ptf_indices"][monitor_port]} + } + + yield ret + + logger.info("Removing acl rule config from DUT") + BaseEverflowTest.remove_acl_rule_config(rand_selected_dut, table_name) + + +def generate_testing_packet(ptfadapter, duthost, mirror_session_info, router_mac): + packet = testutils.simple_tcp_packet( + eth_src=ptfadapter.dataplane.get_mac(0, 0), + eth_dst=router_mac + ) + setup = {} + setup["router_mac"] = router_mac + exp_packet = BaseEverflowTest.get_expected_mirror_packet(mirror_session_info, setup, duthost, packet, False) + return packet, exp_packet + + +def get_uplink_ports(duthost, tbinfo): + """The collector IP is a destination reachable by default. + So we need to collect the uplink ports to do a packet capture + """ + uplink_ports = [] + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + if 't0' == tbinfo['topo']['type']: + neigh_name = 'T1' + else: + neigh_name = 'T2' + for dut_port, neigh in mg_facts["minigraph_neighbors"].items(): + ptf_idx = mg_facts["minigraph_ptf_indices"][dut_port] + if neigh_name in neigh['name']: + uplink_ports.append(ptf_idx) + return uplink_ports + + +def send_and_verify_packet(ptfadapter, packet, expected_packet, tx_port, rx_ports, exp_recv): + ptfadapter.dataplane.flush() + testutils.send(ptfadapter, pkt=packet, port_id=tx_port) + if exp_recv: + testutils.verify_packet_any_port(ptfadapter, pkt=expected_packet, ports=rx_ports, timeout=5) + else: + testutils.verify_no_packet_any(ptfadapter, pkt=expected_packet, ports=rx_ports) + + +def test_everflow_per_interface(ptfadapter, rand_selected_dut, apply_acl_rule, tbinfo): + """Verify packet ingress from candidate ports are captured by EVERFLOW, while packets + ingress from unselected ports are not captured + """ + everflow_config = apply_acl_rule + packet, exp_packet = generate_testing_packet(ptfadapter, rand_selected_dut, everflow_config['mirror_session_info'], rand_selected_dut.facts["router_mac"]) + uplink_ports = get_uplink_ports(rand_selected_dut, tbinfo) + # Verify that packet ingressed from INPUT_PORTS (candidate ports) are mirrored + for port, ptf_idx in everflow_config['candidate_ports'].items(): + logger.info("Verifying packet ingress from {} is mirrored".format(port)) + send_and_verify_packet(ptfadapter, packet, exp_packet, ptf_idx, uplink_ports, True) + + # Verify that packet ingressed from unselected ports are not mirrored + for port, ptf_idx in everflow_config['unselected_ports'].items(): + logger.info("Verifying packet ingress from {} is not mirrored".format(port)) + send_and_verify_packet(ptfadapter, packet, exp_packet, ptf_idx, uplink_ports, False) + + \ No newline at end of file diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index 9f77cc9503..eac068d38f 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -474,7 +474,7 @@ def test_everflow_dscp_with_policer( tolerance="10") finally: # Clean up ACL rules and routes - self.remove_acl_rule_config(duthost, table_name, config_method) + BaseEverflowTest.remove_acl_rule_config(duthost, table_name, config_method) self.remove_acl_table_config(duthost, table_name, config_method) if bind_interface_namespace: self.remove_acl_table_config(duthost, table_name, config_method, bind_interface_namespace) diff --git a/tests/fdb/files/fdb.j2 b/tests/fdb/files/fdb.j2 index e917f1c81c..795e6d133d 100644 --- a/tests/fdb/files/fdb.j2 +++ b/tests/fdb/files/fdb.j2 @@ -1,4 +1,14 @@ +{# TODO: tagged port should be supported in future #} {% for vlan in minigraph_vlan_interfaces %} -{{ vlan['subnet'] }} {% for ifname in minigraph_vlans[vlan['attachto']]['members'] %} {{ minigraph_port_indices[ifname] }} {% endfor %} +{{ vlan['subnet'] }} +{%- for ifname in minigraph_vlans[vlan['attachto']]['members'] -%} +{%- if ifname in minigraph_portchannels -%} +{%- if minigraph_portchannels[ifname]['members'] -%} +{{ ' ' }}{{ minigraph_port_indices[minigraph_portchannels[ifname]['members'][0]] }} +{%- endif -%} +{%- else -%} +{{ ' ' }}{{ minigraph_port_indices[ifname] }} +{%- endif -%} +{%- endfor %} {% endfor %} diff --git a/tests/fdb/test_fdb.py b/tests/fdb/test_fdb.py index 952772ccfb..1e72364fa3 100644 --- a/tests/fdb/test_fdb.py +++ b/tests/fdb/test_fdb.py @@ -1,11 +1,14 @@ import pytest import ptf.testutils as testutils +import ptf.packet as scapy +from ptf.mask import Mask import time import itertools import logging import pprint +import re from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] @@ -15,7 +18,7 @@ from tests.common.dualtor.mux_simulator_control import mux_server_url, toggle_all_simulator_ports_to_rand_selected_tor pytestmark = [ - pytest.mark.topology('t0'), + pytest.mark.topology('t0', 't0-56-po2vlan'), pytest.mark.usefixtures('disable_fdb_aging') ] @@ -29,37 +32,57 @@ logger = logging.getLogger(__name__) -def send_eth(ptfadapter, source_port, source_mac, dest_mac): +def simple_eth_packet( + pktlen=60, + eth_dst="00:01:02:03:04:05", + eth_src="00:06:07:08:09:0a", + vlan_vid=0, + vlan_pcp=0 +): + pkt = scapy.Ether(dst=eth_dst, src=eth_src) + if vlan_vid or vlan_pcp: + pktlen += 4 + pkt /= scapy.Dot1Q(vlan=vlan_vid, prio=vlan_pcp) + pkt[scapy.Dot1Q : 1].type = DEFAULT_FDB_ETHERNET_TYPE + else: + pkt.type = DEFAULT_FDB_ETHERNET_TYPE + pkt = pkt / ("0" * (pktlen - len(pkt))) + + return pkt + +def send_eth(ptfadapter, source_port, source_mac, dest_mac, vlan_id): """ send ethernet packet :param ptfadapter: PTF adapter object :param source_port: source port :param source_mac: source MAC :param dest_mac: destination MAC + :param vlan_id: VLAN id :return: """ - pkt = testutils.simple_eth_packet( + pkt = simple_eth_packet( eth_dst=dest_mac, eth_src=source_mac, - eth_type=DEFAULT_FDB_ETHERNET_TYPE + vlan_vid=vlan_id ) - logger.debug('send packet source port id {} smac: {} dmac: {}'.format(source_port, source_mac, dest_mac)) + logger.debug('send packet source port id {} smac: {} dmac: {} vlan: {}'.format(source_port, source_mac, dest_mac, vlan_id)) testutils.send(ptfadapter, source_port, pkt) -def send_arp_request(ptfadapter, source_port, source_mac, dest_mac): +def send_arp_request(ptfadapter, source_port, source_mac, dest_mac, vlan_id): """ send arp request packet :param ptfadapter: PTF adapter object :param source_port: source port :param source_mac: source MAC :param dest_mac: destination MAC + :param vlan_id: VLAN id :return: """ pkt = testutils.simple_arp_packet(pktlen=60, eth_dst=dest_mac, eth_src=source_mac, - vlan_vid=0, + vlan_vid=vlan_id, vlan_pcp=0, arp_op=1, ip_snd='10.10.1.3', @@ -67,32 +90,35 @@ def send_arp_request(ptfadapter, source_port, source_mac, dest_mac): hw_snd=source_mac, hw_tgt='ff:ff:ff:ff:ff:ff', ) - logger.debug('send ARP request packet source port id {} smac: {} dmac: {}'.format(source_port, source_mac, dest_mac)) + logger.debug('send ARP request packet source port id {} smac: {} dmac: {} vlan: {}'.format(source_port, source_mac, dest_mac, vlan_id)) testutils.send(ptfadapter, source_port, pkt) -def send_arp_reply(ptfadapter, source_port, source_mac, dest_mac): +def send_arp_reply(ptfadapter, source_port, source_mac, dest_mac, vlan_id): """ send arp reply packet :param ptfadapter: PTF adapter object :param source_port: source port :param source_mac: source MAC :param dest_mac: destination MAC + :param vlan_id: VLAN id :return: """ pkt = testutils.simple_arp_packet(eth_dst=dest_mac, eth_src=source_mac, + vlan_vid=vlan_id, + vlan_pcp=0, arp_op=2, ip_snd='10.10.1.2', ip_tgt='10.10.1.3', hw_tgt=dest_mac, hw_snd=source_mac, ) - logger.debug('send ARP reply packet source port id {} smac: {} dmac: {}'.format(source_port, source_mac, dest_mac)) + logger.debug('send ARP reply packet source port id {} smac: {} dmac: {} vlan: {}'.format(source_port, source_mac, dest_mac, vlan_id)) testutils.send(ptfadapter, source_port, pkt) -def send_recv_eth(ptfadapter, source_port, source_mac, dest_port, dest_mac): +def send_recv_eth(ptfadapter, source_ports, source_mac, dest_ports, dest_mac, src_vlan, dst_vlan): """ send ethernet packet and verify it on dest_port :param ptfadapter: PTF adapter object @@ -100,17 +126,29 @@ def send_recv_eth(ptfadapter, source_port, source_mac, dest_port, dest_mac): :param source_mac: source MAC :param dest_port: destination port to receive packet on :param dest_mac: destination MAC + :param vlan_id: VLAN id :return: """ - pkt = testutils.simple_eth_packet( + pkt = simple_eth_packet( eth_dst=dest_mac, eth_src=source_mac, - eth_type=DEFAULT_FDB_ETHERNET_TYPE + vlan_vid=src_vlan ) - logger.debug('send packet src port {} smac: {} dmac: {} verifying on dst port {}'.format( - source_port, source_mac, dest_mac, dest_port)) - testutils.send(ptfadapter, source_port, pkt) - testutils.verify_packet_any_port(ptfadapter, pkt, [dest_port], timeout=FDB_WAIT_EXPECTED_PACKET_TIMEOUT) + exp_pkt = simple_eth_packet( + eth_dst=dest_mac, + eth_src=source_mac, + vlan_vid=dst_vlan + ) + if dst_vlan: + # expect to receive tagged packet: + # sonic device might modify the 802.1p field, + # need to use Mask to ignore the priority field. + exp_pkt = Mask(exp_pkt) + exp_pkt.set_do_not_care_scapy(scapy.Dot1Q, "prio") + logger.debug('send packet src port {} smac: {} dmac: {} vlan: {} verifying on dst port {}'.format( + source_ports, source_mac, dest_mac, src_vlan, dest_ports)) + testutils.send(ptfadapter, source_ports[0], pkt) + testutils.verify_packet_any_port(ptfadapter, exp_pkt, dest_ports, timeout=FDB_WAIT_EXPECTED_PACKET_TIMEOUT) def setup_fdb(ptfadapter, vlan_table, router_mac, pkt_type): @@ -126,30 +164,39 @@ def setup_fdb(ptfadapter, vlan_table, router_mac, pkt_type): for vlan in vlan_table: for member in vlan_table[vlan]: - mac = ptfadapter.dataplane.get_mac(0, member) + if 'port_index' not in member or 'tagging_mode' not in member: + continue + # member['port_index'] is a list, + # front panel port only has one member, and portchannel might have 0, 1 and multiple member ports, + # portchannel might have no member ports or all member ports are down, so skip empty list + if not member['port_index']: + continue + port_index = member['port_index'][0] + vlan_id = vlan if member['tagging_mode'] == 'tagged' else 0 + mac = ptfadapter.dataplane.get_mac(0, port_index) # send a packet to switch to populate layer 2 table with MAC of PTF interface - send_eth(ptfadapter, member, mac, router_mac) + send_eth(ptfadapter, port_index, mac, router_mac, vlan_id) # put in learned MAC - fdb[member] = { mac } + fdb[port_index] = { mac } # Send packets to switch to populate the layer 2 table with dummy MACs for each port # Totally 10 dummy MACs for each port, send 1 packet for each dummy MAC - dummy_macs = ['{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, member, i) + dummy_macs = ['{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, port_index, i) for i in range(DUMMY_MAC_COUNT)] for dummy_mac in dummy_macs: if pkt_type == "ethernet": - send_eth(ptfadapter, member, dummy_mac, router_mac) + send_eth(ptfadapter, port_index, dummy_mac, router_mac, vlan_id) elif pkt_type == "arp_request": - send_arp_request(ptfadapter, member, dummy_mac, router_mac) + send_arp_request(ptfadapter, port_index, dummy_mac, router_mac, vlan_id) elif pkt_type == "arp_reply": - send_arp_reply(ptfadapter, member, dummy_mac, router_mac) + send_arp_reply(ptfadapter, port_index, dummy_mac, router_mac, vlan_id) else: pytest.fail("Unknown option '{}'".format(pkt_type)) # put in set learned dummy MACs - fdb[member].update(dummy_macs) + fdb[port_index].update(dummy_macs) time.sleep(FDB_POPULATE_SLEEP_TIMEOUT) # Flush dataplane @@ -179,7 +226,15 @@ def fdb_cleanup(duthosts, rand_one_dut_hostname): return else: duthost.command('sonic-clear fdb all') - pytest_assert(wait_until(20, 2, fdb_table_has_no_dynamic_macs, duthost), "FDB Table Cleanup failed") + pytest_assert(wait_until(20, 2, 0, fdb_table_has_no_dynamic_macs, duthost), "FDB Table Cleanup failed") + + +def validate_mac(mac): + if mac.find(':') != -1: + pattern = re.compile(r"^([0-9a-fA-F]{2,2}:){5,5}[0-9a-fA-F]{2,2}$") + if pattern.match(mac): + return True + return False @pytest.fixture @@ -224,20 +279,45 @@ def test_fdb(ansible_adhoc, ptfadapter, duthosts, rand_one_dut_hostname, ptfhost available_ports_idx.append(idx) vlan_table = {} + interface_table = defaultdict(set) + config_portchannels = conf_facts.get('PORTCHANNEL', {}) for name, vlan in conf_facts['VLAN'].items(): - vlan_table[name] = [] - ifnames = conf_facts['VLAN_MEMBER'][name].keys() - vlan_table[name] = [ conf_facts['port_index_map'][ifname] for ifname in ifnames - if conf_facts['port_index_map'][ifname] in available_ports_idx ] - - vlan_member_count = sum([ len(members) for name, members in vlan_table.items() ]) + vlan_id = int(vlan['vlanid']) + vlan_table[vlan_id] = [] + + for ifname in conf_facts['VLAN_MEMBER'][name].keys(): + if 'tagging_mode' not in conf_facts['VLAN_MEMBER'][name][ifname]: + continue + tagging_mode = conf_facts['VLAN_MEMBER'][name][ifname]['tagging_mode'] + port_index = [] + if ifname in config_portchannels: + for member in config_portchannels[ifname]['members']: + if conf_facts['port_index_map'][member] in available_ports_idx: + port_index.append(conf_facts['port_index_map'][member]) + if port_index: + interface_table[ifname].add(vlan_id) + elif conf_facts['port_index_map'][ifname] in available_ports_idx: + port_index.append(conf_facts['port_index_map'][ifname]) + interface_table[ifname].add(vlan_id) + if port_index: + vlan_table[vlan_id].append({'port_index':port_index, 'tagging_mode':tagging_mode}) + + vlan_member_count = sum([ len(members) for members in vlan_table.values() ]) fdb = setup_fdb(ptfadapter, vlan_table, router_mac, pkt_type) for vlan in vlan_table: for src, dst in itertools.combinations(vlan_table[vlan], 2): - for src_mac, dst_mac in itertools.product(fdb[src], fdb[dst]): - send_recv_eth(ptfadapter, src, src_mac, dst, dst_mac) + if 'port_index' not in src or 'tagging_mode' not in src: + continue + if 'port_index' not in dst or 'tagging_mode' not in dst: + continue + src_vlan = vlan if src['tagging_mode'] == 'tagged' else 0 + dst_vlan = vlan if dst['tagging_mode'] == 'tagged' else 0 + src_ports = src['port_index'] + dst_ports = dst['port_index'] + for src_mac, dst_mac in itertools.product(fdb[src_ports[0]], fdb[dst_ports[0]]): + send_recv_eth(ptfadapter, src_ports, src_mac, dst_ports, dst_mac, src_vlan, dst_vlan) # Should we have fdb_facts ansible module for this test? res = duthost.command('show mac') @@ -246,6 +326,21 @@ def test_fdb(ansible_adhoc, ptfadapter, duthosts, rand_one_dut_hostname, ptfhost dummy_mac_count = 0 total_mac_count = 0 for l in res['stdout_lines']: + # No. Vlan MacAddress Port Type + items = l.split() + if len(items) != 5: + continue + # First item must be number + if not items[0].isdigit(): + continue + vlan_id = int(items[1]) + mac = items[2] + ifname = items[3] + fdb_type = items[4] + assert ifname in interface_table + assert vlan_id in interface_table[ifname] + assert validate_mac(mac) == True + assert fdb_type in ['Dynamic', 'Static'] if DUMMY_MAC_PREFIX in l.lower(): dummy_mac_count += 1 if "dynamic" in l.lower(): diff --git a/tests/fdb/test_fdb_mac_expire.py b/tests/fdb/test_fdb_mac_expire.py index a9dea3bcef..33d3d36d3f 100644 --- a/tests/fdb/test_fdb_mac_expire.py +++ b/tests/fdb/test_fdb_mac_expire.py @@ -6,7 +6,7 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm [py/unused-import] pytestmark = [ - pytest.mark.topology('t0') + pytest.mark.topology('t0', 't0-56-po2vlan') ] logger = logging.getLogger(__name__) @@ -125,6 +125,7 @@ def copyFdbInfo(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo): ptfhost.host.options['variable_manager'].extra_vars.update({ "minigraph_vlan_interfaces": mgFacts["minigraph_vlan_interfaces"], "minigraph_port_indices": mgFacts["minigraph_ptf_indices"], + "minigraph_portchannels": mgFacts["minigraph_portchannels"], "minigraph_vlans": mgFacts["minigraph_vlans"], }) diff --git a/tests/iface_namingmode/test_iface_namingmode.py b/tests/iface_namingmode/test_iface_namingmode.py index a346cd9f08..f9787dc4c3 100644 --- a/tests/iface_namingmode/test_iface_namingmode.py +++ b/tests/iface_namingmode/test_iface_namingmode.py @@ -688,14 +688,14 @@ def _port_status(expected_state): ifmode, cli_ns_option, test_intf)) if out['rc'] != 0: pytest.fail() - pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, _port_status, 'down'), + pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, 0, _port_status, 'down'), "Interface {} should be admin down".format(test_intf)) out = dutHostGuest.shell('SONIC_CLI_IFACE_MODE={} sudo config interface {} startup {}'.format( ifmode, cli_ns_option, test_intf)) if out['rc'] != 0: pytest.fail() - pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, _port_status, 'up'), + pytest_assert(wait_until(PORT_TOGGLE_TIMEOUT, 2, 0, _port_status, 'up'), "Interface {} should be admin up".format(test_intf)) diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py index 0a33a6ebd9..6a45c07a4e 100644 --- a/tests/ipfwd/test_dir_bcast.py +++ b/tests/ipfwd/test_dir_bcast.py @@ -20,7 +20,8 @@ def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): extra_vars = { 'minigraph_vlan_interfaces': mg_facts['minigraph_vlan_interfaces'], 'minigraph_vlans': mg_facts['minigraph_vlans'], - 'minigraph_port_indices': mg_facts['minigraph_ptf_indices'] + 'minigraph_port_indices': mg_facts['minigraph_ptf_indices'], + 'minigraph_portchannels': mg_facts['minigraph_portchannels'] } ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) ptfhost.template(src="../ansible/roles/test/templates/fdb.j2", dest="/root/vlan_info.txt") diff --git a/tests/ixia/pfc/test_pfc_pause_lossless.py b/tests/ixia/pfc/test_pfc_pause_lossless.py index 46d0b46493..21fbf98e8c 100644 --- a/tests/ixia/pfc/test_pfc_pause_lossless.py +++ b/tests/ixia/pfc/test_pfc_pause_lossless.py @@ -181,7 +181,7 @@ def test_pfc_pause_single_lossless_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, @@ -248,7 +248,7 @@ def test_pfc_pause_multi_lossless_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, diff --git a/tests/ixia/pfc/test_pfc_pause_lossy.py b/tests/ixia/pfc/test_pfc_pause_lossy.py index 136f300c94..cd3b5a19c5 100644 --- a/tests/ixia/pfc/test_pfc_pause_lossy.py +++ b/tests/ixia/pfc/test_pfc_pause_lossy.py @@ -181,7 +181,7 @@ def test_pfc_pause_single_lossy_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, @@ -248,7 +248,7 @@ def test_pfc_pause_multi_lossy_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=ixia_api, diff --git a/tests/ixia/pfcwd/test_pfcwd_basic.py b/tests/ixia/pfcwd/test_pfcwd_basic.py index 284a06c53f..bf5c603625 100644 --- a/tests/ixia/pfcwd/test_pfcwd_basic.py +++ b/tests/ixia/pfcwd/test_pfcwd_basic.py @@ -16,6 +16,8 @@ pytestmark = [ pytest.mark.topology('tgen') ] +DEPENDENT_SERVICES = ['teamd', 'snmp', 'dhcp_relay', 'radv'] + @pytest.mark.parametrize("trigger_pfcwd", [True, False]) def test_pfcwd_basic_single_lossless_prio(ixia_api, ixia_testbed_config, @@ -167,7 +169,7 @@ def test_pfcwd_basic_single_lossless_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=ixia_api, @@ -230,7 +232,7 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(ixia_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=ixia_api, @@ -289,10 +291,12 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(ixia_api, lossless_prio = int(lossless_prio) logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) - duthost.command("systemctl reset-failed {}".format(restart_service)) + services_to_reset = DEPENDENT_SERVICES + [restart_service] + for service in services_to_reset: + duthost.command("systemctl reset-failed {}".format(service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=ixia_api, @@ -350,10 +354,12 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(ixia_api, testbed_config, port_config_list = ixia_testbed_config logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) - duthost.command("systemctl reset-failed {}".format(restart_service)) + services_to_reset = DEPENDENT_SERVICES + [restart_service] + for service in services_to_reset: + duthost.command("systemctl reset-failed {}".format(service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=ixia_api, diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh index 5fc8bc52c5..1d672a667d 100755 --- a/tests/kvmtest.sh +++ b/tests/kvmtest.sh @@ -135,7 +135,8 @@ test_t0() { platform_tests/test_cpu_memory_usage.py \ bgp/test_bgpmon.py \ container_checker/test_container_checker.py \ - process_monitoring/test_critical_process_monitoring.py" + process_monitoring/test_critical_process_monitoring.py \ + system_health/test_system_status.py" pushd $SONIC_MGMT_DIR/tests ./run_tests.sh $RUNTEST_CLI_COMMON_OPTS -c "$tests" -p logs/$tgname @@ -201,7 +202,8 @@ test_t1_lag() { bgp/test_bgpmon.py \ container_checker/test_container_checker.py \ process_monitoring/test_critical_process_monitoring.py \ - scp/test_scp_copy.py" + scp/test_scp_copy.py \ + pc/test_lag_2.py" pushd $SONIC_MGMT_DIR/tests ./run_tests.sh $RUNTEST_CLI_COMMON_OPTS -c "$tests" -p logs/$tgname diff --git a/tests/memory_checker/test_memory_checker.py b/tests/memory_checker/test_memory_checker.py index 8219440354..573437910c 100644 --- a/tests/memory_checker/test_memory_checker.py +++ b/tests/memory_checker/test_memory_checker.py @@ -159,6 +159,7 @@ def consume_memory_and_restart_container(duthost, container_name, vm_workers, lo logger.info("Waiting for '{}' container to be restarted ...".format(container_name)) restarted = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + 0, check_container_state, duthost, container_name, True) pytest_assert(restarted, "Failed to restart '{}' container!".format(container_name)) logger.info("'{}' container is restarted.".format(container_name)) @@ -193,7 +194,7 @@ def postcheck_critical_processes(duthost, container_name): """ logger.info("Checking the running status of critical processes in '{}' container ..." .format(container_name)) - is_succeeded = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, + is_succeeded = wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS, 0, check_critical_processes, duthost, container_name) if not is_succeeded: pytest.fail("Not all critical processes in '{}' container are running!" diff --git a/tests/monit/test_monit_status.py b/tests/monit/test_monit_status.py index 60c3dcb407..50a3e16ff3 100644 --- a/tests/monit/test_monit_status.py +++ b/tests/monit/test_monit_status.py @@ -105,7 +105,7 @@ def _monit_status(): monit_status_result = duthost.shell("sudo monit status", module_ignore_errors=True) return monit_status_result["rc"] == 0 # Monit is configured with start delay = 300s, hence we wait up to 320s here - pytest_assert(wait_until(320, 20, _monit_status), + pytest_assert(wait_until(320, 20, 0, _monit_status), "Monit is either not running or not configured correctly") logger.info("Checking the running status of Monit was done!") @@ -133,7 +133,7 @@ def test_monit_reporting_message(duthosts, enum_rand_one_per_hwsku_frontend_host logger.info("Checking the format of Monit alerting message ...") - pytest_assert(wait_until(180, 60, check_monit_last_output, duthost), + pytest_assert(wait_until(180, 60, 0, check_monit_last_output, duthost), "Expected Monit reporting message not found") logger.info("Checking the format of Monit alerting message was done!") diff --git a/tests/mvrf/test_mgmtvrf.py b/tests/mvrf/test_mgmtvrf.py index 4f93281b13..b111649e7c 100644 --- a/tests/mvrf/test_mgmtvrf.py +++ b/tests/mvrf/test_mgmtvrf.py @@ -138,7 +138,7 @@ def setup_ntp(ptfhost, duthost, ntp_servers): ptfhost.lineinfile(path="/etc/ntp.conf", line="server 127.127.1.0 prefer") # restart ntp server ntp_en_res = ptfhost.service(name="ntp", state="restarted") - pytest_assert(wait_until(120, 5, check_ntp_status, ptfhost), \ + pytest_assert(wait_until(120, 5, 0, check_ntp_status, ptfhost), \ "NTP server was not started in PTF container {}; NTP service start result {}".format(ptfhost.hostname, ntp_en_res)) # setup ntp on dut to sync with ntp server for ntp_server in ntp_servers: @@ -207,7 +207,7 @@ def test_ntp(self, duthosts, rand_one_dut_hostname, ptfhost, check_ntp_sync, ntp logger.info("Ntp restart in mgmt vrf") execute_dut_command(duthost, force_ntp) duthost.service(name="ntp", state="restarted") - pytest_assert(wait_until(400, 10, check_ntp_status, duthost), "Ntp not started") + pytest_assert(wait_until(400, 10, 0, check_ntp_status, duthost), "Ntp not started") def test_service_acl(self, duthosts, rand_one_dut_hostname, localhost): duthost = duthosts[rand_one_dut_hostname] @@ -247,7 +247,7 @@ def test_warmboot(self, duthosts, rand_one_dut_hostname, localhost, ptfhost, cre duthost = duthosts[rand_one_dut_hostname] duthost.command("sudo config save -y") # This will override config_db.json with mgmt vrf config reboot(duthost, localhost, reboot_type="warm") - pytest_assert(wait_until(120, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") + pytest_assert(wait_until(120, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") self.basic_check_after_reboot(duthost, localhost, ptfhost, creds) @pytest.mark.disable_loganalyzer @@ -255,7 +255,7 @@ def test_reboot(self, duthosts, rand_one_dut_hostname, localhost, ptfhost, creds duthost = duthosts[rand_one_dut_hostname] duthost.command("sudo config save -y") # This will override config_db.json with mgmt vrf config reboot(duthost, localhost) - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") self.basic_check_after_reboot(duthost, localhost, ptfhost, creds) @pytest.mark.disable_loganalyzer @@ -263,5 +263,5 @@ def test_fastboot(self, duthosts, rand_one_dut_hostname, localhost, ptfhost, cre duthost = duthosts[rand_one_dut_hostname] duthost.command("sudo config save -y") # This will override config_db.json with mgmt vrf config reboot(duthost, localhost, reboot_type="fast") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") self.basic_check_after_reboot(duthost, localhost, ptfhost, creds) diff --git a/tests/ntp/test_ntp.py b/tests/ntp/test_ntp.py index 60f4b7ea45..9e3cc39220 100644 --- a/tests/ntp/test_ntp.py +++ b/tests/ntp/test_ntp.py @@ -22,7 +22,7 @@ def setup_ntp(ptfhost, duthosts, rand_one_dut_hostname, creds): # restart ntp server ntp_en_res = ptfhost.service(name="ntp", state="restarted") - pytest_assert(wait_until(120, 5, check_ntp_status, ptfhost), \ + pytest_assert(wait_until(120, 5, 0, check_ntp_status, ptfhost), \ "NTP server was not started in PTF container {}; NTP service start result {}".format(ptfhost.hostname, ntp_en_res)) # setup ntp on dut to sync with ntp server @@ -57,5 +57,5 @@ def test_ntp(duthosts, rand_one_dut_hostname, setup_ntp): duthost.service(name='ntp', state='stopped') duthost.command("ntpd -gq") duthost.service(name='ntp', state='restarted') - pytest_assert(wait_until(720, 10, check_ntp_status, duthost), + pytest_assert(wait_until(720, 10, 0, check_ntp_status, duthost), "NTP not in sync") diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py index 0940d7a506..3e4b04953e 100644 --- a/tests/pc/test_lag_2.py +++ b/tests/pc/test_lag_2.py @@ -38,6 +38,9 @@ def common_setup_teardown(ptfhost): ptfhost.file(path=TEST_DIR, state="absent") +def is_vtestbed(duthost): + return duthost.facts['asic_type'].lower() == "vs" + class LagTest: def __init__(self, duthost, tbinfo, ptfhost, nbrhosts, fanouthosts, conn_graph_facts): self.duthost = duthost @@ -48,7 +51,10 @@ def __init__(self, duthost, tbinfo, ptfhost, nbrhosts, fanouthosts, conn_graph_f self.mg_facts = duthost.get_extended_minigraph_facts(tbinfo) self.conn_graph_facts = conn_graph_facts self.vm_neighbors = self.mg_facts['minigraph_neighbors'] - self.fanout_neighbors = self.conn_graph_facts['device_conn'][duthost.hostname] if 'device_conn' in self.conn_graph_facts else {} + if is_vtestbed(duthost): + self.fanout_neighbors = None + else: + self.fanout_neighbors = self.conn_graph_facts['device_conn'][duthost.hostname] if 'device_conn' in self.conn_graph_facts else {} def __get_lag_facts(self): return self.duthost.lag_facts(host = self.duthost.hostname)['ansible_facts']['lag_facts'] @@ -112,7 +118,7 @@ def __verify_lag_minlink( if po_intf != intf: command = 'bash -c "teamdctl %s %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'runner\'][\'selected\']"' \ % (namespace_prefix, lag_name, po_intf) - wait_until(wait_timeout, delay, self.__check_shell_output, self.duthost, command) + wait_until(wait_timeout, delay, 0, self.__check_shell_output, self.duthost, command) # Refresh lag facts lag_facts = self.__get_lag_facts() @@ -136,7 +142,7 @@ def __verify_lag_minlink( if po_intf != intf: command = 'bash -c "teamdctl %s %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'link\'][\'up\']"'\ % (namespace_prefix, lag_name, po_intf) - wait_until(wait_timeout, delay, self.__check_shell_output, self.duthost, command) + wait_until(wait_timeout, delay, 0, self.__check_shell_output, self.duthost, command) def run_single_lag_lacp_rate_test(self, lag_name, lag_facts): logger.info("Start checking single lag lacp rate for: %s" % lag_name) @@ -216,7 +222,7 @@ def run_lag_fallback_test(self, lag_name, lag_facts): try: # Shut down neighbor interface vm_host.shutdown(neighbor_intf) - wait_until(wait_timeout, delay, self.__check_intf_state, vm_host, neighbor_intf, False) + wait_until(wait_timeout, delay, 0, self.__check_intf_state, vm_host, neighbor_intf, False) # Refresh lag facts lag_facts = self.__get_lag_facts() @@ -242,7 +248,7 @@ def run_lag_fallback_test(self, lag_name, lag_facts): finally: # Bring up neighbor interface vm_host.no_shutdown(neighbor_intf) - wait_until(wait_timeout, delay, self.__check_intf_state, vm_host, neighbor_intf, True) + wait_until(wait_timeout, delay, 0, self.__check_intf_state, vm_host, neighbor_intf, True) @pytest.fixture(autouse=True, scope='module') def skip_if_no_lags(duthosts): @@ -256,6 +262,10 @@ def has_lags(dut): "lacp_rate", "fallback"]) def test_lag(common_setup_teardown, duthosts, tbinfo, nbrhosts, fanouthosts, conn_graph_facts, enum_dut_portchannel, testcase): + # We can't run single_lag test on vtestbed since there is no leaffanout + if testcase == "single_lag" and is_vtestbed(duthosts[0]): + pytest.skip("Skip single_lag test on vtestbed") + ptfhost = common_setup_teardown dut_name, dut_lag = decode_dut_port_name(enum_dut_portchannel) diff --git a/tests/pc/test_po_cleanup.py b/tests/pc/test_po_cleanup.py index ed710741ed..6da34ad629 100644 --- a/tests/pc/test_po_cleanup.py +++ b/tests/pc/test_po_cleanup.py @@ -59,6 +59,6 @@ def test_po_cleanup(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_as logging.info("Disable swss/teamd Feature") duthost.asic_instance(enum_asic_index).stop_service("swss") # Check if Linux Kernel Portchannel Interface teamdev are clean up - if not wait_until(10, 1, check_kernel_po_interface_cleaned, duthost, enum_asic_index): + if not wait_until(10, 1, 0, check_kernel_po_interface_cleaned, duthost, enum_asic_index): fail_msg = "PortChannel interface still exists in kernel" pytest.fail(fail_msg) diff --git a/tests/pc/test_po_update.py b/tests/pc/test_po_update.py index 2c9e398259..dbad98e1b2 100644 --- a/tests/pc/test_po_update.py +++ b/tests/pc/test_po_update.py @@ -76,7 +76,7 @@ def test_po_update(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_fro time.sleep(30) int_facts = asichost.interface_facts()['ansible_facts'] pytest_assert(not int_facts['ansible_interface_facts'][portchannel]['link']) - pytest_assert(wait_until(120, 10, asichost.check_bgp_statistic, 'ipv4_idle', 1)) + pytest_assert(wait_until(120, 10, 0, asichost.check_bgp_statistic, 'ipv4_idle', 1)) # Step 3: Create tmp portchannel asichost.config_portchannel(tmp_portchannel, "add") @@ -96,7 +96,7 @@ def test_po_update(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_fro time.sleep(30) int_facts = asichost.interface_facts()['ansible_facts'] pytest_assert(int_facts['ansible_interface_facts'][tmp_portchannel]['link']) - pytest_assert(wait_until(120, 10, asichost.check_bgp_statistic, 'ipv4_idle', 0)) + pytest_assert(wait_until(120, 10, 0, asichost.check_bgp_statistic, 'ipv4_idle', 0)) finally: # Recover all states if add_tmp_portchannel_ip: @@ -115,4 +115,4 @@ def test_po_update(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_fro if remove_portchannel_members: for member in portchannel_members: asichost.config_portchannel_member(portchannel, member, "add") - pytest_assert(wait_until(120, 10, asichost.check_bgp_statistic, 'ipv4_idle', 0)) + pytest_assert(wait_until(120, 10, 0, asichost.check_bgp_statistic, 'ipv4_idle', 0)) diff --git a/tests/pfc/test_unknown_mac.py b/tests/pfc/test_unknown_mac.py index 4a0a91c532..40e96378ce 100644 --- a/tests/pfc/test_unknown_mac.py +++ b/tests/pfc/test_unknown_mac.py @@ -14,7 +14,7 @@ from tests.common import constants from tests.common.fixtures.ptfhost_utils import change_mac_addresses from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py -from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor from tests.common.utilities import get_intf_by_sub_intf @@ -53,6 +53,11 @@ def unknownMacSetup(duthosts, rand_one_dut_hostname, tbinfo): """ duthost = duthosts[rand_one_dut_hostname] + # The behavior on Mellanox for unknown MAC is flooding rather than DROP, + # so we need to skip this test on Mellanox platform + asic_type = duthost.facts["asic_type"] + pytest_require(asic_type != "mellanox", "Skip on Mellanox platform") + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) server_ips = [] @@ -164,8 +169,6 @@ def populateArp(unknownMacSetup, flushArpFdb, ptfhost, duthosts, rand_one_dut_ho # Wait 5 seconds for secondary ARP before proceeding to clear FDB time.sleep(5) - yield - logger.info("Clean up all ips on the PTF") ptfhost.script("./scripts/remove_ip.sh") diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py index e496f20000..9d11100fe7 100644 --- a/tests/pfcwd/test_pfcwd_function.py +++ b/tests/pfcwd/test_pfcwd_function.py @@ -24,6 +24,9 @@ "forward": "Verify proper function of forward action" } MMU_ACTIONS = ['change', 'noop', 'restore', 'noop'] +DB_SEPARATORS = {'0': ':', '4': '|'} +BF_PROFILE = "BUFFER_PROFILE|{}" +BF_PROFILE_TABLE = "BUFFER_PROFILE_TABLE:{}" pytestmark = [ pytest.mark.disable_loganalyzer, @@ -128,10 +131,11 @@ def update_alpha(dut, port, profile, value): db = "0" else: db = "4" + table_template = BF_PROFILE if db == "4" else BF_PROFILE_TABLE asic.run_redis_cmd( argv = [ - "redis-cli", "-n", db, "HSET", profile, "dynamic_th", value + "redis-cli", "-n", db, "HSET", table_template.format(profile), "dynamic_th", value ] ) @@ -162,11 +166,15 @@ def get_mmu_params(dut, port): "redis-cli", "-n", db, "HGET", pg_pattern.format(port), "profile" ] - )[0].encode("utf-8")[1:-1] + )[0].encode("utf-8") + + if BF_PROFILE[:-2] in pg_profile or BF_PROFILE_TABLE[:-2] in pg_profile: + pg_profile = pg_profile.split(DB_SEPARATORS[db])[-1][:-1] + table_template = BF_PROFILE if db == "4" else BF_PROFILE_TABLE alpha = asic.run_redis_cmd( argv = [ - "redis-cli", "-n", db, "HGET", pg_profile, "dynamic_th" + "redis-cli", "-n", db, "HGET", table_template.format(pg_profile), "dynamic_th" ] )[0].encode("utf-8") diff --git a/tests/pfcwd/test_pfcwd_timer_accuracy.py b/tests/pfcwd/test_pfcwd_timer_accuracy.py index 2005401478..2210b4969c 100644 --- a/tests/pfcwd/test_pfcwd_timer_accuracy.py +++ b/tests/pfcwd/test_pfcwd_timer_accuracy.py @@ -170,17 +170,19 @@ def verify_pfcwd_timers(self): config_detect_time)) pytest_assert(self.all_detect_time[9] < config_detect_time, err_msg) - logger.info("Verify that real detection time is not less than configured") - err_msg = ("Real detection time is less than configured: Real detect time: {} " - "Expected: {} (wd_detect_time)".format(self.all_detect_time[9], - self.timers['pfc_wd_detect_time'])) - pytest_assert(self.all_detect_time[9] > self.timers['pfc_wd_detect_time'], err_msg) - - logger.info("Verify that real restoration time is not less than configured") - err_msg = ("Real restoration time is less than configured: Real restore time: {} " - "Expected: {} (wd_restore_time)".format(self.all_restore_time[9], - self.timers['pfc_wd_restore_time'])) - pytest_assert(self.all_restore_time[9] > self.timers['pfc_wd_restore_time'], err_msg) + if self.timers['pfc_wd_poll_time'] < self.timers['pfc_wd_detect_time']: + logger.info("Verify that real detection time is not less than configured") + err_msg = ("Real detection time is less than configured: Real detect time: {} " + "Expected: {} (wd_detect_time)".format(self.all_detect_time[9], + self.timers['pfc_wd_detect_time'])) + pytest_assert(self.all_detect_time[9] > self.timers['pfc_wd_detect_time'], err_msg) + + if self.timers['pfc_wd_poll_time'] < self.timers['pfc_wd_restore_time']: + logger.info("Verify that real restoration time is not less than configured") + err_msg = ("Real restoration time is less than configured: Real restore time: {} " + "Expected: {} (wd_restore_time)".format(self.all_restore_time[9], + self.timers['pfc_wd_restore_time'])) + pytest_assert(self.all_restore_time[9] > self.timers['pfc_wd_restore_time'], err_msg) logger.info("Verify that real restoration time is less than configured") config_restore_time = self.timers['pfc_wd_restore_time'] + self.timers['pfc_wd_poll_time'] diff --git a/tests/platform_tests/api/test_chassis.py b/tests/platform_tests/api/test_chassis.py index c3644fe42e..86ea4c41cf 100644 --- a/tests/platform_tests/api/test_chassis.py +++ b/tests/platform_tests/api/test_chassis.py @@ -216,6 +216,8 @@ def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname duthost = duthosts[enum_rand_one_per_hwsku_hostname] syseeprom_info_dict = chassis.get_system_eeprom_info(platform_api_conn) + # Convert all keys of syseeprom_info_dict into lower case + syseeprom_info_dict = {k.lower() : v for k, v in syseeprom_info_dict.items()} pytest_assert(syseeprom_info_dict is not None, "Failed to retrieve system EEPROM data") pytest_assert(isinstance(syseeprom_info_dict, dict), "System EEPROM data is not in the expected format") @@ -241,6 +243,8 @@ def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname pytest_assert(re.match(REGEX_SERIAL_NUMBER, serial), "Serial number appears to be incorrect") host_vars = get_host_visible_vars(self.inv_files, duthost.hostname) expected_syseeprom_info_dict = host_vars.get('syseeprom_info') + # Ignore case of keys in syseeprom_info + expected_syseeprom_info_dict = {k.lower(): v for k, v in expected_syseeprom_info_dict.items()} for field in expected_syseeprom_info_dict: pytest_assert(field in syseeprom_info_dict, "Expected field '{}' not present in syseeprom on '{}'".format(field, duthost.hostname)) @@ -396,7 +400,7 @@ def test_sfps(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf except: pytest.fail("num_sfps is not an integer") - list_sfps = list(set(physical_port_indices)) + list_sfps = physical_port_indices logging.info("Physical port indices = {}".format(list_sfps)) @@ -413,7 +417,7 @@ def test_sfps(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf for i in range(len(list_sfps)): index = list_sfps[i] sfp = chassis.get_sfp(platform_api_conn, index) - self.expect(sfp and sfp == sfp_list[i], "SFP number {} object is incorrect index {}".format(i, index)) + self.expect(sfp and sfp in sfp_list, "SFP object for PORT{} NOT found".format(index)) self.assert_expectations() def test_status_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -447,19 +451,42 @@ def test_status_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, 2: "off" } - for index, led_type in enumerate(LED_COLOR_TYPES): - led_type_result = False - for color in led_type: - result = chassis.set_status_led(platform_api_conn, color) - if self.expect(result is not None, "Failed to perform set_status_led"): - led_type_result = result or led_type_result - if ((result is None) or (not result)): - continue - color_actual = chassis.get_status_led(platform_api_conn) - if self.expect(color_actual is not None, "Failed to retrieve status_led"): - if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): - self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {})".format(color, color_actual)) - self.expect(led_type_result is True, "Failed to set status_led to {}".format(LED_COLOR_TYPES_DICT[index])) + led_controllable = True + led_supported_colors = [] + if duthost.facts.get("chassis"): + status_led = duthost.facts.get("chassis").get("status_led") + if status_led: + led_controllable = status_led.get("controllable", True) + led_supported_colors = status_led.get("colors") + + if led_controllable: + led_type_skipped = 0 + for index, led_type in enumerate(LED_COLOR_TYPES): + if led_supported_colors: + led_type = set(led_type) & set(led_supported_colors) + if not led_type: + logger.warning("test_status_led: Skipping set status_led to {} (No supported colors)".format(LED_COLOR_TYPES_DICT[index])) + led_type_skipped += 1 + continue + + led_type_result = False + for color in led_type: + result = chassis.set_status_led(platform_api_conn, color) + if self.expect(result is not None, "Failed to perform set_status_led"): + led_type_result = result or led_type_result + if ((result is None) or (not result)): + continue + color_actual = chassis.get_status_led(platform_api_conn) + if self.expect(color_actual is not None, "Failed to retrieve status_led"): + if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): + self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {})".format(color, color_actual)) + self.expect(led_type_result is True, "Failed to set status_led to {}".format(LED_COLOR_TYPES_DICT[index])) + + if led_type_skipped == len(LED_COLOR_TYPES): + pytest.skip("skipped as no supported colors for all types") + + else: + pytest.skip("skipped as chassis's status led is not controllable") self.assert_expectations() diff --git a/tests/platform_tests/api/test_chassis_fans.py b/tests/platform_tests/api/test_chassis_fans.py index 89937e20ea..4942d6a97a 100644 --- a/tests/platform_tests/api/test_chassis_fans.py +++ b/tests/platform_tests/api/test_chassis_fans.py @@ -71,6 +71,19 @@ def compare_value_with_platform_facts(self, duthost, key, value, fan_idx): self.expect(value == expected_value, "'{}' value is incorrect. Got '{}', expected '{}' for fan {}".format(key, value, expected_value, fan_idx)) + def get_fan_facts(self, duthost, fan_idx, def_value, *keys): + if duthost.facts.get("chassis"): + fans = duthost.facts.get("chassis").get("fans") + if fans: + value = fans[fan_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value # # Functions to test methods inherited from DeviceBase class @@ -168,8 +181,22 @@ def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localho def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + fans_skipped = 0 + for i in range(self.num_fans): speed_target_val = 25 + speed_controllable = self.get_fan_facts(duthost, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_get_fans_target_speed: Skipping chassis fan {} (speed not controllable)".format(i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, i, 25, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, i, 100, "speed", "maximum") + if speed_minimum > speed_target_val or speed_maximum < speed_target_val: + speed_target_val = random.randint(speed_minimum, speed_maximum) + speed_set = fan.set_speed(platform_api_conn, i, speed_target_val) target_speed = fan.get_target_speed(platform_api_conn, i) if self.expect(target_speed is not None, "Unable to retrieve Fan {} target speed".format(i)): @@ -177,6 +204,9 @@ def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, self.expect(target_speed == speed_target_val, "Fan {} target speed setting is not correct, speed_target_val {} target_speed = {}".format( i, speed_target_val, target_speed)) + if fans_skipped == self.num_fans: + pytest.skip("skipped as all chassis fans' speed is not controllable") + self.assert_expectations() def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -191,9 +221,25 @@ def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostna def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - target_speed = random.randint(1, 100) + fans_skipped = 0 + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + if duthost.facts["asic_type"] in ["cisco-8000"]: + target_speed = random.randint(40, 60) + else: + target_speed = random.randint(1, 100) for i in range(self.num_fans): + speed_controllable = self.get_fan_facts(duthost, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_set_fans_speed: Skipping chassis fan {} (speed not controllable)".format(i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, i, 1, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, i, 100, "speed", "maximum") + if speed_minimum > target_speed or speed_maximum < target_speed: + target_speed = random.randint(speed_minimum, speed_maximum) + speed = fan.get_speed(platform_api_conn, i) speed_tol = fan.get_speed_tolerance(platform_api_conn, i) @@ -204,6 +250,9 @@ def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localh self.expect(abs(act_speed - target_speed) <= speed_tol, "Fan {} speed change from {} to {} is not within tolerance, actual speed {}".format(i, speed, target_speed, act_speed)) + if fans_skipped == self.num_fans: + pytest.skip("skipped as all chassis fans' speed is not controllable") + self.assert_expectations() def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -213,8 +262,17 @@ def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos "amber", "green", ] + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + fans_skipped = 0 for i in range(self.num_fans): + led_controllable = self.get_fan_facts(duthost, i, True, "status_led", "controllable") + if not led_controllable: + logger.info("test_set_fans_led: Skipping chassis fan {} (LED not controllable)".format(i)) + fans_skipped += 1 + continue + + LED_COLOR_LIST = self.get_fan_facts(duthost, i, LED_COLOR_LIST, "status_led", "colors") for color in LED_COLOR_LIST: result = fan.set_status_led(platform_api_conn, i, color) @@ -228,4 +286,7 @@ def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan {})".format( color, color_actual, i)) + if fans_skipped == self.num_fans: + pytest.skip("skipped as all chassis fans' LED is not controllable") + self.assert_expectations() diff --git a/tests/platform_tests/api/test_component.py b/tests/platform_tests/api/test_component.py index 7253b57de3..89a66b6fa3 100644 --- a/tests/platform_tests/api/test_component.py +++ b/tests/platform_tests/api/test_component.py @@ -8,6 +8,7 @@ from tests.common.helpers.platform_api import chassis, component from platform_api_test_base import PlatformApiTestBase +from tests.common.utilities import skip_release_for_platform ################################################### # TODO: Remove this after we transition to Python 3 @@ -164,6 +165,9 @@ def test_get_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, self.assert_expectations() def test_get_available_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) + if self.num_components == 0: pytest.skip("No components found on device") @@ -175,6 +179,9 @@ def test_get_available_firmware_version(self, duthosts, enum_rand_one_per_hwsku_ self.assert_expectations() def test_get_firmware_update_notification(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) + if self.num_components == 0: pytest.skip("No components found on device") @@ -185,6 +192,9 @@ def test_get_firmware_update_notification(self, duthosts, enum_rand_one_per_hwsk pytest_assert(isinstance(notif, STRING_TYPE), "Component {}: Firmware update notification appears to be incorrect from image {}".format(i, image)) def test_install_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) + if self.num_components == 0: pytest.skip("No components found on device") @@ -197,6 +207,9 @@ def test_install_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, loca def test_update_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) + if self.num_components == 0: pytest.skip("No components found on device") diff --git a/tests/platform_tests/api/test_fan_drawer.py b/tests/platform_tests/api/test_fan_drawer.py index fac9d50f45..805e99788c 100644 --- a/tests/platform_tests/api/test_fan_drawer.py +++ b/tests/platform_tests/api/test_fan_drawer.py @@ -69,6 +69,20 @@ def compare_value_with_platform_facts(self, duthost, key, value, fan_drawer_idx) self.expect(value == expected_value, "'{}' value is incorrect. Got '{}', expected '{}' for fan drawer {}".format(key, value, expected_value, fan_drawer_idx)) + def get_fan_drawer_facts(self, duthost, fan_drawer_idx, def_value, *keys): + if duthost.facts.get("chassis"): + fan_drawers = duthost.facts.get("chassis").get("fan_drawers") + if fan_drawers: + value = fan_drawers[fan_drawer_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value + # # Functions to test methods inherited from DeviceBase class # @@ -185,28 +199,65 @@ def test_set_fan_drawers_led(self, duthosts, enum_rand_one_per_hwsku_hostname, l 2: "off" } + fan_drawers_skipped = 0 for i in range(self.num_fan_drawers): - for index, led_type in enumerate(LED_COLOR_TYPES): - led_type_result = False - for color in led_type: - result = fan_drawer.set_status_led(platform_api_conn, i, color) - if self.expect(result is not None, "Failed to perform set_status_led"): - led_type_result = result or led_type_result - if ((result is None) or (not result)): - continue - color_actual = fan_drawer.get_status_led(platform_api_conn, i) - if self.expect(color_actual is not None, "Failed to retrieve status_led"): - if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): - self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan_drawer {})".format( - color, color_actual, i)) - self.expect(led_type_result is True, "Failed to set status_led for fan_drawer {} to {}".format(i, LED_COLOR_TYPES_DICT[index])) + led_controllable = self.get_fan_drawer_facts(duthost, i, True, "status_led", "controllable") + led_supported_colors = self.get_fan_drawer_facts(duthost, i, None, "status_led", "colors") + + if led_controllable: + led_type_skipped = 0 + for index, led_type in enumerate(LED_COLOR_TYPES): + if led_supported_colors: + led_type = set(led_type) & set(led_supported_colors) + if not led_type: + logger.warning("test_status_led: Skipping fandrawer {} set status_led to {} (No supported colors)".format(i, LED_COLOR_TYPES_DICT[index])) + led_type_skipped += 1 + continue + + led_type_result = False + for color in led_type: + result = fan_drawer.set_status_led(platform_api_conn, i, color) + if self.expect(result is not None, "Failed to perform set_status_led"): + led_type_result = result or led_type_result + if ((result is None) or (not result)): + continue + color_actual = fan_drawer.get_status_led(platform_api_conn, i) + if self.expect(color_actual is not None, "Failed to retrieve status_led"): + if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): + self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan_drawer {})".format( + color, color_actual, i)) + self.expect(led_type_result is True, "Failed to set status_led for fan_drawer {} to {}".format(i, LED_COLOR_TYPES_DICT[index])) + + if led_type_skipped == len(LED_COLOR_TYPES): + logger.info("test_status_led: Skipping fandrawer {} (no supported colors for all types)".format(i)) + fan_drawers_skipped += 1 + + else: + logger.info("test_status_led: Skipping fandrawer {} (LED is not controllable)".format(i)) + fan_drawers_skipped += 1 + + if fan_drawers_skipped == self.num_fan_drawers: + pytest.skip("skipped as all fandrawers' LED is not controllable/no supported colors") + self.assert_expectations() def test_get_maximum_consumed_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + max_power_skipped = 0 + for i in range(self.num_fan_drawers): + max_power_supported = self.get_fan_drawer_facts(duthost, i, True, "max_consumed_power") + if not max_power_supported: + logger.info("test_get_maximum_consumed_power: Skipping drawer {} (max power not supported)".format(i)) + max_power_skipped += 1 + continue + fan_drawer_max_con_power = fan_drawer.get_maximum_consumed_power(platform_api_conn, i) if self.expect(fan_drawer_max_con_power is not None, "Unable to retrieve module {} slot id".format(i)): self.expect(isinstance(fan_drawer_max_con_power, float), "Module {} max consumed power format appears incorrect ".format(i)) + if max_power_skipped == self.num_fan_drawers: + pytest.skip("skipped as all chassis fan drawers' max consumed power is not supported") + self.assert_expectations() diff --git a/tests/platform_tests/api/test_fan_drawer_fans.py b/tests/platform_tests/api/test_fan_drawer_fans.py index a12b72c73f..5773cb8cbf 100644 --- a/tests/platform_tests/api/test_fan_drawer_fans.py +++ b/tests/platform_tests/api/test_fan_drawer_fans.py @@ -73,6 +73,21 @@ def compare_value_with_platform_facts(self, duthost, key, value, fan_drawer_idx, self.expect(value == expected_value, "'{}' value is incorrect. Got '{}', expected '{}' for fan {} within fan_drawer {}".format(key, value, expected_value, fan_idx, fan_drawer_idx)) + def get_fan_facts(self, duthost, fan_drawer_idx, fan_idx, def_value, *keys): + if duthost.facts.get("chassis"): + fan_drawers = duthost.facts.get("chassis").get("fan_drawers") + if fan_drawers: + fans = fan_drawers[fan_drawer_idx].get("fans") + if fans: + value = fans[fan_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value # # Functions to test methods inherited from DeviceBase class @@ -204,12 +219,26 @@ def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localho def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + fan_drawers_skipped = 0 + for j in range(self.num_fan_drawers): num_fans = fan_drawer.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): - speed_target_val = 25 + speed_controllable = self.get_fan_facts(duthost, j, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_get_fans_target_speed: Skipping fandrawer {} fan {} (speed not controllable)".format(j, i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, j, i, 25, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, j, i, 100, "speed", "maximum") + if speed_minimum > speed_target_val or speed_maximum < speed_target_val: + speed_target_val = random.randint(speed_minimum, speed_maximum) + speed_set = fan_drawer_fan.set_speed(platform_api_conn, j, i, speed_target_val) target_speed = fan_drawer_fan.get_target_speed(platform_api_conn, j, i) if self.expect(target_speed is not None, "Unable to retrieve Fan drawer {} fan {} target speed".format(j, i)): @@ -217,6 +246,12 @@ def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, self.expect(target_speed == speed_target_val, "Fan drawer {} fan {} target speed setting is not correct, speed_target_val {} target_speed = {}".format( j, i, speed_target_val, target_speed)) + if fans_skipped == num_fans: + fan_drawers_skipped += 1 + + if fan_drawers_skipped == self.num_fan_drawers: + pytest.skip("skipped as all fandrawer fans' speed is not controllable") + self.assert_expectations() def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -234,12 +269,26 @@ def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostna def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - for j in range(self.num_fan_drawers): - num_fans = fan_drawer.get_num_fans(platform_api_conn, j) + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + fan_drawers_skipped = 0 + for j in range(self.num_fan_drawers): target_speed = random.randint(1, 100) + num_fans = fan_drawer.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): + speed_controllable = self.get_fan_facts(duthost, j, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_set_fans_speed: Skipping fandrawer {} fan {} (speed not controllable)".format(j, i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, j, i, 1, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, j, i, 100, "speed", "maximum") + if speed_minimum > target_speed or speed_maximum < target_speed: + target_speed = random.randint(speed_minimum, speed_maximum) + speed = fan_drawer_fan.get_speed(platform_api_conn, j, i) speed_tol = fan_drawer_fan.get_speed_tolerance(platform_api_conn, j, i) @@ -250,6 +299,12 @@ def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localh self.expect(abs(act_speed - target_speed) <= speed_tol, "Fan drawer {} fan {} speed change from {} to {} is not within tolerance, actual speed {}".format(j, i, speed, target_speed, act_speed)) + if fans_skipped == num_fans: + fan_drawers_skipped += 1 + + if fan_drawers_skipped == self.num_fan_drawers: + pytest.skip("skipped as all fandrawer fans' speed is not controllable") + self.assert_expectations() def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -281,23 +336,51 @@ def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos 2: "off" } + fan_drawers_skipped = 0 for j in range(self.num_fan_drawers): num_fans = fan_drawer.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): - for index, led_type in enumerate(LED_COLOR_TYPES): - led_type_result = False - for color in led_type: - result = fan_drawer_fan.set_status_led(platform_api_conn, j, i, color) - if self.expect(result is not None, "Failed to perform set_status_led"): - led_type_result = result or led_type_result - if ((result is None) or (not result)): - continue - color_actual = fan_drawer_fan.get_status_led(platform_api_conn, j, i) - if self.expect(color_actual is not None, "Failed to retrieve status_led"): - if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): - self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan {})".format( - color, color_actual, i)) - self.expect(result is True, "Failed to set status_led for fan drawer {} fan {} to {}".format(j , i, LED_COLOR_TYPES_DICT[index])) + led_controllable = self.get_fan_facts(duthost, j, i, True, "status_led", "controllable") + led_supported_colors = self.get_fan_facts(duthost, j, i, None, "status_led", "colors") + + if led_controllable: + led_type_skipped = 0 + for index, led_type in enumerate(LED_COLOR_TYPES): + if led_supported_colors: + led_type = set(led_type) & set(led_supported_colors) + if not led_type: + logger.warning("test_status_led: Skipping fandrawer {} fan {} set status_led to {} (No supported colors)".format(j, i, LED_COLOR_TYPES_DICT[index])) + led_type_skipped += 1 + continue + + led_type_result = False + for color in led_type: + result = fan_drawer_fan.set_status_led(platform_api_conn, j, i, color) + if self.expect(result is not None, "Failed to perform set_status_led"): + led_type_result = result or led_type_result + if ((result is None) or (not result)): + continue + color_actual = fan_drawer_fan.get_status_led(platform_api_conn, j, i) + if self.expect(color_actual is not None, "Failed to retrieve status_led"): + if self.expect(isinstance(color_actual, STRING_TYPE), "Status LED color appears incorrect"): + self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan {})".format( + color, color_actual, i)) + self.expect(result is True, "Failed to set status_led for fan drawer {} fan {} to {}".format(j , i, LED_COLOR_TYPES_DICT[index])) + + if led_type_skipped == len(LED_COLOR_TYPES): + logger.info("test_status_led: Skipping fandrawer {} fan {} (no supported colors for all types)".format(j, i)) + fans_skipped += 1 + + else: + logger.info("test_status_led: Skipping fandrawer {} fan {} (LED is not controllable)".format(j, i)) + fans_skipped += 1 + + if fans_skipped == num_fans: + fan_drawers_skipped += 1 + + if fan_drawers_skipped == self.num_fan_drawers: + pytest.skip("skipped as all fandrawer fans' LED is not controllable/no supported colors") self.assert_expectations() diff --git a/tests/platform_tests/api/test_psu.py b/tests/platform_tests/api/test_psu.py index 53e21d5692..a6abfde990 100644 --- a/tests/platform_tests/api/test_psu.py +++ b/tests/platform_tests/api/test_psu.py @@ -1,6 +1,5 @@ import logging import re -import random import pytest import yaml @@ -65,6 +64,20 @@ def compare_value_with_platform_facts(self, duthost, key, value, psu_idx): self.expect(value == expected_value, "'{}' value is incorrect. Got '{}', expected '{}' for PSU {}".format(key, value, expected_value, psu_idx)) + def get_psu_facts(self, duthost, psu_idx, def_value, *keys): + if duthost.facts.get("chassis"): + psus = duthost.facts.get("chassis").get("psus") + if psus: + value = psus[psu_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value + # # Functions to test methods inherited from DeviceBase class # @@ -157,7 +170,6 @@ def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf self.expect(fan and fan == fan_list[i], "Fan {} of PSU {} is incorrect".format(i, psu_id)) self.assert_expectations() - def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): ''' PSU power test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] @@ -171,16 +183,25 @@ def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, plat voltage = psu.get_voltage(platform_api_conn, psu_id) if self.expect(voltage is not None, "Failed to retrieve voltage of PSU {}".format(psu_id)): self.expect(isinstance(voltage, float), "PSU {} voltage appears incorrect".format(psu_id)) - current = psu.get_current(platform_api_conn, psu_id) - if self.expect(current is not None, "Failed to retrieve current of PSU {}".format(psu_id)): - self.expect(isinstance(current, float), "PSU {} current appears incorrect".format(psu_id)) - power = psu.get_power(platform_api_conn, psu_id) - if self.expect(power is not None, "Failed to retrieve power of PSU {}".format(psu_id)): - self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) - max_supp_power = psu.get_maximum_supplied_power(platform_api_conn, psu_id) - if self.expect(max_supp_power is not None, - "Failed to retrieve maximum supplied power power of PSU {}".format(psu_id)): - self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) + current = None + current_supported = self.get_psu_facts(duthost, psu_id, True, "current") + if current_supported: + current = psu.get_current(platform_api_conn, psu_id) + if self.expect(current is not None, "Failed to retrieve current of PSU {}".format(psu_id)): + self.expect(isinstance(current, float), "PSU {} current appears incorrect".format(psu_id)) + power = None + power_supported = self.get_psu_facts(duthost, psu_id, True, "power") + if power_supported: + power = psu.get_power(platform_api_conn, psu_id) + if self.expect(power is not None, "Failed to retrieve power of PSU {}".format(psu_id)): + self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) + max_supp_power = None + max_power_supported = self.get_psu_facts(duthost, psu_id, True, "max_power") + if max_power_supported: + max_supp_power = psu.get_maximum_supplied_power(platform_api_conn, psu_id) + if self.expect(max_supp_power is not None, + "Failed to retrieve maximum supplied power power of PSU {}".format(psu_id)): + self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) if current is not None and voltage is not None and power is not None: self.expect(abs(power - (voltage*current)) < power*0.1, "PSU {} reading does not make sense \ @@ -190,12 +211,18 @@ def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, plat if self.expect(powergood_status is not None, "Failed to retrieve operational status of PSU {}".format(psu_id)): self.expect(powergood_status is True, "PSU {} is not operational".format(psu_id)) - high_threshold = psu.get_voltage_high_threshold(platform_api_conn, psu_id) - if self.expect(high_threshold is not None, "Failed to retrieve the high voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(high_threshold, float), "PSU {} voltage high threshold appears incorrect".format(psu_id)) - low_threshold = psu.get_voltage_low_threshold(platform_api_conn, psu_id) - if self.expect(low_threshold is not None, "Failed to retrieve the low voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(low_threshold, float), "PSU {} voltage low threshold appears incorrect".format(psu_id)) + high_threshold = None + voltage_high_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_high_threshold") + if voltage_high_threshold_supported: + high_threshold = psu.get_voltage_high_threshold(platform_api_conn, psu_id) + if self.expect(high_threshold is not None, "Failed to retrieve the high voltage threshold of PSU {}".format(psu_id)): + self.expect(isinstance(high_threshold, float), "PSU {} voltage high threshold appears incorrect".format(psu_id)) + low_threshold = None + voltage_low_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_low_threshold") + if voltage_low_threshold_supported: + low_threshold = psu.get_voltage_low_threshold(platform_api_conn, psu_id) + if self.expect(low_threshold is not None, "Failed to retrieve the low voltage threshold of PSU {}".format(psu_id)): + self.expect(isinstance(low_threshold, float), "PSU {} voltage low threshold appears incorrect".format(psu_id)) if high_threshold is not None and low_threshold is not None: self.expect(voltage < high_threshold and voltage > low_threshold, "Voltage {} of PSU {} is not in between {} and {}".format(voltage, psu_id, @@ -203,17 +230,23 @@ def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, plat high_threshold)) self.assert_expectations() - def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): ''' PSU temperature test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) + psus_skipped = 0 for psu_id in range(self.num_psus): name = psu.get_name(platform_api_conn, psu_id) if name in self.psu_skip_list: logger.info("skipping check for {}".format(name)) else: + temperature_supported = self.get_psu_facts(duthost, psu_id, True, "temperature") + if not temperature_supported: + logger.info("test_set_fans_speed: Skipping chassis fan {} (speed not controllable)".format(psu_id)) + psus_skipped += 1 + continue + temperature = psu.get_temperature(platform_api_conn, psu_id) if self.expect(temperature is not None, "Failed to retrieve temperature of PSU {}".format(psu_id)): self.expect(isinstance(temperature, float), "PSU {} temperature appears incorrect".format(psu_id)) @@ -226,8 +259,11 @@ def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost self.expect(temperature < temp_threshold, "Temperature {} of PSU {} is over the threshold {}".format(temperature, psu_id, temp_threshold)) - self.assert_expectations() + if psus_skipped == self.num_psus: + pytest.skip("skipped as all chassis psus' temperature sensor is not supported") + + self.assert_expectations() def test_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): ''' PSU status led test ''' @@ -257,31 +293,56 @@ def test_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platfo 0: "fault", 1: "normal", 2: "off" - } + } + psus_skipped = 0 for psu_id in range(self.num_psus): name = psu.get_name(platform_api_conn, psu_id) if name in self.psu_skip_list: logger.info("skipping check for {}".format(name)) + psus_skipped += 1 else: - for index, led_type in enumerate(LED_COLOR_TYPES): - led_type_result = False - for color in led_type: - result = psu.set_status_led(platform_api_conn, psu_id, color) - if self.expect(result is not None, "Failed to perform set_status_led of PSU {}".format(psu_id)): - led_type_result = result or led_type_result - if ((result is None) or (not result)): - continue - color_actual = psu.get_status_led(platform_api_conn, psu_id) - if self.expect(color_actual is not None, - "Failed to retrieve status_led of PSU {}".format(psu_id)): - if self.expect(isinstance(color_actual, STRING_TYPE), - "PSU {} status LED color appears incorrect".format(psu_id)): - self.expect(color == color_actual, - "Status LED color incorrect (expected: {}, actual: {}) from PSU {}".format( - color, color_actual, psu_id)) - self.expect(led_type_result is True, - "Failed to set status_led of PSU {} to {}".format(psu_id, LED_COLOR_TYPES_DICT[index])) + led_controllable = self.get_psu_facts(duthost, psu_id, True, "status_led", "controllable") + led_supported_colors = self.get_psu_facts(duthost, psu_id, None, "status_led", "colors") + + if led_controllable: + led_type_skipped = 0 + for index, led_type in enumerate(LED_COLOR_TYPES): + if led_supported_colors: + led_type = set(led_type) & set(led_supported_colors) + if not led_type: + logger.warning("test_status_led: Skipping PSU {} set status_led to {} (No supported colors)".format(psu_id, LED_COLOR_TYPES_DICT[index])) + led_type_skipped += 1 + continue + + led_type_result = False + for color in led_type: + result = psu.set_status_led(platform_api_conn, psu_id, color) + if self.expect(result is not None, "Failed to perform set_status_led of PSU {}".format(psu_id)): + led_type_result = result or led_type_result + if ((result is None) or (not result)): + continue + color_actual = psu.get_status_led(platform_api_conn, psu_id) + if self.expect(color_actual is not None, + "Failed to retrieve status_led of PSU {}".format(psu_id)): + if self.expect(isinstance(color_actual, STRING_TYPE), + "PSU {} status LED color appears incorrect".format(psu_id)): + self.expect(color == color_actual, + "Status LED color incorrect (expected: {}, actual: {}) from PSU {}".format( + color, color_actual, psu_id)) + self.expect(led_type_result is True, + "Failed to set status_led of PSU {} to {}".format(psu_id, LED_COLOR_TYPES_DICT[index])) + + if led_type_skipped == len(LED_COLOR_TYPES): + logger.info("test_status_led: Skipping PSU {} (no supported colors for all types)".format(psu_id)) + psus_skipped += 1 + + else: + logger.info("test_status_led: Skipping PSU {} (LED is not controllable)".format(psu_id)) + psus_skipped += 1 + + if psus_skipped == self.num_psus: + pytest.skip("skipped as all PSUs' LED is not controllable/no supported colors/in skip list") self.assert_expectations() @@ -330,9 +391,8 @@ def test_master_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, if self.num_psus == 0: pytest.skip("No psus found on device skipping for device {}".format(duthost)) - else: - psu_id = random.randint(0, self.num_psus) + for psu_id in range(self.num_psus): for index, led_type in enumerate(LED_COLOR_TYPES): led_type_result = False for color in led_type: diff --git a/tests/platform_tests/api/test_psu_fans.py b/tests/platform_tests/api/test_psu_fans.py index 94acb3915a..ce3e77400a 100644 --- a/tests/platform_tests/api/test_psu_fans.py +++ b/tests/platform_tests/api/test_psu_fans.py @@ -75,6 +75,22 @@ def compare_value_with_platform_facts(self, duthost, key, value, psu_idx, fan_id "'{}' value is incorrect. Got '{}', expected '{}' for fan {} within psu {}".format(key, value, expected_value, fan_idx, psu_idx)) + def get_fan_facts(self, duthost, psu_idx, fan_idx, def_value, *keys): + if duthost.facts.get("chassis"): + psus = duthost.facts.get("chassis").get("psus") + if psus: + fans = psus[psu_idx].get("fans") + if fans: + value = fans[fan_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value + # # Functions to test methods inherited from DeviceBase class # @@ -205,12 +221,26 @@ def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localho def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + psus_skipped = 0 + for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): - speed_target_val = 25 + speed_controllable = self.get_fan_facts(duthost, j, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_get_fans_target_speed: Skipping PSU {} fan {} (speed not controllable)".format(j, i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, j, i, 25, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, j, i, 100, "speed", "maximum") + if speed_minimum > speed_target_val or speed_maximum < speed_target_val: + speed_target_val = random.randint(speed_minimum, speed_maximum) + speed_set = psu_fan.set_speed(platform_api_conn, j, i, speed_target_val) target_speed = psu_fan.get_target_speed(platform_api_conn, j, i) if self.expect(target_speed is not None, "Unable to retrieve psu {} fan {} target speed".format(j, i)): @@ -218,6 +248,12 @@ def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, self.expect(target_speed == speed_target_val, "psu {} fan {} target speed setting is not correct, speed_target_val {} target_speed = {}".format( j, i, speed_target_val, target_speed)) + if num_fans != 0 and fans_skipped == num_fans: + psus_skipped += 1 + + if psus_skipped == self.num_psus: + pytest.skip("skipped as all PSU fans' speed is not controllable") + self.assert_expectations() def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -235,12 +271,26 @@ def test_get_fans_speed_tolerance(self, duthosts, enum_rand_one_per_hwsku_hostna def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - for j in range(self.num_psus): - num_fans = psu.get_num_fans(platform_api_conn, j) + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + psus_skipped = 0 + for j in range(self.num_psus): target_speed = random.randint(1, 100) + num_fans = psu.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): + speed_controllable = self.get_fan_facts(duthost, j, i, True, "speed", "controllable") + if not speed_controllable: + logger.info("test_set_fans_speed: Skipping PSU {} fan {} (speed not controllable)".format(j, i)) + fans_skipped += 1 + continue + + speed_minimum = self.get_fan_facts(duthost, j, i, 1, "speed", "minimum") + speed_maximum = self.get_fan_facts(duthost, j, i, 100, "speed", "maximum") + if speed_minimum > target_speed or speed_maximum < target_speed: + target_speed = random.randint(speed_minimum, speed_maximum) + speed = psu_fan.get_speed(platform_api_conn, j, i) speed_tol = psu_fan.get_speed_tolerance(platform_api_conn, j, i) @@ -251,6 +301,12 @@ def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localh self.expect(abs(act_speed - target_speed) <= speed_tol, "psu {} fan {} speed change from {} to {} is not within tolerance, actual speed {}".format(j, i, speed, target_speed, act_speed)) + if num_fans != 0 and fans_skipped == num_fans: + psus_skipped += 1 + + if psus_skipped == self.num_psus: + pytest.skip("skipped as all PSU fans' speed is not controllable") + self.assert_expectations() def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -260,13 +316,21 @@ def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos "amber", "green", ] - + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + psus_skipped = 0 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) + fans_skipped = 0 for i in range(num_fans): + led_controllable = self.get_fan_facts(duthost, j, i, True, "status_led", "controllable") + if not led_controllable: + logger.info("test_set_fans_led: Skipping PSU {} fan {} (LED not controllable)".format(j, i)) + fans_skipped += 1 + continue + LED_COLOR_LIST = self.get_fan_facts(duthost, j, i, LED_COLOR_LIST, "status_led", "colors") for color in LED_COLOR_LIST: result = psu_fan.set_status_led(platform_api_conn, j, i, color) @@ -280,4 +344,10 @@ def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.expect(color == color_actual, "Status LED color incorrect (expected: {}, actual: {} for fan {})".format( color, color_actual, i)) + if num_fans != 0 and fans_skipped == num_fans: + psus_skipped += 1 + + if psus_skipped == self.num_psus: + pytest.skip("skipped as all PSU fans' LED is not controllable") + self.assert_expectations() diff --git a/tests/platform_tests/api/test_sfp.py b/tests/platform_tests/api/test_sfp.py index d699bddc51..a3797487c6 100644 --- a/tests/platform_tests/api/test_sfp.py +++ b/tests/platform_tests/api/test_sfp.py @@ -53,6 +53,14 @@ def setup(request, duthosts, enum_rand_one_per_hwsku_hostname, xcvr_skip_list, c physical_port_index_map.keys() \ if intf not in xcvr_skip_list[duthost.hostname]]) sfp_setup["sfp_test_port_indices"] = sorted(sfp_port_indices) + + # Fetch SFP names from platform.json + sfp_fact_names = [] + sfp_fact_list = duthost.facts.get("chassis").get("sfps") + for sfp in sfp_fact_list: + sfp_fact_names.append(sfp.get('name')) + sfp_setup["sfp_fact_names"] = sfp_fact_names + if request.cls is not None: request.cls.sfp_setup = sfp_setup @@ -137,24 +145,10 @@ class TestSfpApi(PlatformApiTestBase): # Helper functions # - def compare_value_with_platform_facts(self, key, value, sfp_idx, duthost): - expected_value = None - sfp_id = self.sfp_setup["sfp_port_indices"].index(sfp_idx) - if duthost.facts.get("chassis"): - expected_sfps = duthost.facts.get("chassis").get("sfps") - if expected_sfps: - expected_value = expected_sfps[sfp_id].get(key) - - if self.expect(expected_value is not None, - "Unable to get expected value for '{}' from platform.json file for SFP {}".format(key, sfp_idx)): - self.expect(value == expected_value, - "'{}' value is incorrect. Got '{}', expected '{}' for SFP {}".format(key, value, expected_value, - sfp_idx)) - def is_xcvr_optical(self, xcvr_info_dict): """Returns True if transceiver is optical, False if copper (DAC)""" #For QSFP-DD specification compliance will return type as passive or active - if xcvr_info_dict["type_abbrv_name"] == "QSFP-DD": + if xcvr_info_dict["type_abbrv_name"] == "QSFP-DD" or xcvr_info_dict["type_abbrv_name"] == "OSFP-8X": if xcvr_info_dict["specification_compliance"] == "passive_copper_media_interface": return False else: @@ -188,11 +182,13 @@ def is_xcvr_support_lpmode(self, xcvr_info_dict): # def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + expected_sfp_names = self.sfp_setup["sfp_fact_names"] for i in self.sfp_setup["sfp_test_port_indices"]: name = sfp.get_name(platform_api_conn, i) if self.expect(name is not None, "Unable to retrieve transceiver {} name".format(i)): self.expect(isinstance(name, STRING_TYPE), "Transceiver {} name appears incorrect".format(i)) - self.compare_value_with_platform_facts('name', name, i, duthosts[enum_rand_one_per_hwsku_hostname]) + self.expect(name in expected_sfp_names, + "Transceiver name '{}' for PORT{} NOT found in platform.json".format(name, i)) self.assert_expectations() def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): @@ -464,7 +460,7 @@ def test_tx_disable(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, def test_tx_disable_channel(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): """This function tests both the get_tx_disable_channel() and tx_disable_channel() APIs""" duthost = duthosts[enum_rand_one_per_hwsku_hostname] - skip_release_for_platform(duthost, ["202012"], ["arista", "mlnx"]) + skip_release_for_platform(duthost, ["202012"], ["arista", "mlnx", "nokia"]) for i in self.sfp_setup["sfp_test_port_indices"]: # First ensure that the transceiver type supports setting TX disable on individual channels @@ -513,14 +509,14 @@ def test_lpmode(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, pla logger.warning("test_lpmode: Skipping transceiver {} (not supported on this platform)".format(i)) break self.expect(ret is True, "Failed to {} low-power mode for transceiver {}".format("enable" if state is True else "disable", i)) - self.expect(wait_until(5, 1, self._check_lpmode_status, sfp, platform_api_conn, i, state), + self.expect(wait_until(5, 1, 0, self._check_lpmode_status, sfp, platform_api_conn, i, state), "Transceiver {} expected low-power state {} is not aligned with the real state".format(i, "enable" if state is True else "disable")) self.assert_expectations() def test_power_override(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): """This function tests both the get_power_override() and set_power_override() APIs""" duthost = duthosts[enum_rand_one_per_hwsku_hostname] - skip_release_for_platform(duthost, ["202012"], ["arista", "mlnx"]) + skip_release_for_platform(duthost, ["202012"], ["arista", "mlnx", "nokia"]) for i in self.sfp_setup["sfp_test_port_indices"]: info_dict = sfp.get_transceiver_info(platform_api_conn, i) diff --git a/tests/platform_tests/api/test_thermal.py b/tests/platform_tests/api/test_thermal.py index c7399d0cc2..cd7aacf240 100644 --- a/tests/platform_tests/api/test_thermal.py +++ b/tests/platform_tests/api/test_thermal.py @@ -65,6 +65,21 @@ def compare_value_with_platform_facts(self, duthost, key, value): "Unable to get thermal name list containing thermal '{}' from platform.json file".format(value)): self.expect(value in expected_values, "Thermal name '{}' is not included in {}".format(value, expected_values)) + + def get_thermal_facts(self, duthost, thermal_idx, def_value, *keys): + if duthost.facts.get("chassis"): + thermals = duthost.facts.get("chassis").get("thermals") + if thermals: + value = thermals[thermal_idx] + for key in keys: + value = value.get(key) + if value is None: + return def_value + + return value + + return def_value + # # Functions to test methods inherited from DeviceBase class # @@ -166,47 +181,106 @@ def test_get_maximum_recorded(self, duthosts, enum_rand_one_per_hwsku_hostname, self.assert_expectations() def test_get_low_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - # Ensure the thermal low threshold temperature is sane + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 + for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "low-threshold") + logger.info("threshold_supported: {}".format(threshold_supported)) + if not threshold_supported: + logger.info("test_get_low_threshold: Skipping thermal {} (threshold not supported)".format(i)) + thermals_skipped += 1 + continue + low_threshold = thermal.get_low_threshold(platform_api_conn, i) + # Ensure the thermal low threshold temperature is sane if self.expect(low_threshold is not None, "Unable to retrieve Thermal {} low threshold".format(i)): self.expect(isinstance(low_threshold, float), "Thermal {} low threshold appears incorrect".format(i)) + + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' low-threshold is not supported") + self.assert_expectations() def test_get_high_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - # Ensure the thermal high threshold temperature is sane + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 + for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "high-threshold") + if not threshold_supported: + logger.info("test_get_high_threshold: Skipping thermal {} (threshold not supported)".format(i)) + thermals_skipped += 1 + continue + high_threshold = thermal.get_high_threshold(platform_api_conn, i) + # Ensure the thermal high threshold temperature is sane if self.expect(high_threshold is not None, "Unable to retrieve Thermal {} high threshold".format(i)): self.expect(isinstance(high_threshold, float), "Thermal {} high threshold appears incorrect".format(i)) + + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' high-threshold is not supported") + self.assert_expectations() def test_get_low_critical_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - # Ensure the thermal low critical threshold temperature is sane + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 + for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "low-crit-threshold") + if not threshold_supported: + logger.info("test_get_low_critical_threshold: Skipping thermal {} (threshold not supported)".format(i)) + thermals_skipped += 1 + continue + low_critical_threshold = thermal.get_low_critical_threshold(platform_api_conn, i) + # Ensure the thermal low critical threshold temperature is sane if self.expect(low_critical_threshold is not None, "Unable to retrieve Thermal {} low critical threshold".format(i)): self.expect(isinstance(low_critical_threshold, float), "Thermal {} low threshold appears incorrect".format(i)) + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' low-critical-threshold is not supported") + self.assert_expectations() def test_get_high_critical_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - # Ensure the thermal high threshold temperature is sane + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 + for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "high-crit-threshold") + if not threshold_supported: + logger.info("test_get_high_critical_threshold: Skipping thermal {} (threshold not supported)".format(i)) + thermals_skipped += 1 + continue + high_critical_threshold = thermal.get_high_critical_threshold(platform_api_conn, i) + # Ensure the thermal high threshold temperature is sane if self.expect(high_critical_threshold is not None, "Unable to retrieve Thermal {} high critical threshold".format(i)): self.expect(isinstance(high_critical_threshold, float), "Thermal {} high threshold appears incorrect".format(i)) + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' high-critical-threshold is not supported") + self.assert_expectations() def test_set_low_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) # Ensure the thermal temperature is sane for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "low-threshold") + threshold_controllable = self.get_thermal_facts(duthost, i, True, "controllable") + if not threshold_supported or not threshold_controllable: + logger.info("test_set_low_threshold: Skipping thermal {} (threshold not supported or controllable)".format(i)) + thermals_skipped += 1 + continue + low_temperature = 20 result = thermal.set_low_threshold(platform_api_conn, i, low_temperature) if self.expect(result is not None, "Failed to perform set_low_threshold"): @@ -218,14 +292,25 @@ def test_set_low_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, loc self.expect(temperature == 20, "Thermal {} low threshold {} is not matching the set value {}".format(i, temperature, low_temperature)) + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' low-threshold is not controllable") + self.assert_expectations() def test_set_high_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): duthost = duthosts[enum_rand_one_per_hwsku_hostname] + thermals_skipped = 0 skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) # Ensure the thermal temperature is sane for i in range(self.num_thermals): + threshold_supported = self.get_thermal_facts(duthost, i, True, "high-threshold") + threshold_controllable = self.get_thermal_facts(duthost, i, True, "controllable") + if not threshold_supported or not threshold_controllable: + logger.info("test_set_high_threshold: Skipping thermal {} (threshold not controllable)".format(i)) + thermals_skipped += 1 + continue + high_temperature = 80 result = thermal.set_high_threshold(platform_api_conn, i, high_temperature) if self.expect(result is not None, "Failed to perform set_high_threshold"): @@ -236,4 +321,8 @@ def test_set_high_threshold(self, duthosts, enum_rand_one_per_hwsku_hostname, lo if self.expect(isinstance(temperature, float), "Thermal {} high threshold appears incorrect".format(i)): self.expect(temperature == 80, "Thermal {} high threshold {} is not matching the set value {}".format(i, temperature, high_temperature)) + + if thermals_skipped == self.num_thermals: + pytest.skip("skipped as all chassis thermals' high-threshold is not controllable") + self.assert_expectations() diff --git a/tests/platform_tests/args/advanced_reboot_args.py b/tests/platform_tests/args/advanced_reboot_args.py index 7f9d7a739e..0b6f31c581 100644 --- a/tests/platform_tests/args/advanced_reboot_args.py +++ b/tests/platform_tests/args/advanced_reboot_args.py @@ -56,7 +56,7 @@ def add_advanced_reboot_args(parser): "--sniff_time_incr", action="store", type=int, - default=60, + default=300, help="Sniff time increment", ) diff --git a/tests/platform_tests/cli/test_show_chassis_module.py b/tests/platform_tests/cli/test_show_chassis_module.py index 226b776511..9e395c6609 100644 --- a/tests/platform_tests/cli/test_show_chassis_module.py +++ b/tests/platform_tests/cli/test_show_chassis_module.py @@ -9,7 +9,7 @@ pytest.mark.topology('t2') ] -CMD_SHOW_CHASSIS_MODULE = "show chassis-module" +CMD_SHOW_CHASSIS_MODULE = "show chassis modules" def parse_chassis_module(output, expected_headers): diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index 33f518f33d..82fbe549a9 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -353,6 +353,54 @@ def advanceboot_neighbor_restore(duthosts, rand_one_dut_hostname, nbrhosts, tbin neighbor_vm_restore(duthost, nbrhosts, tbinfo) +@pytest.fixture() +def capture_interface_counters(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + logging.info("Run commands to print logs") + + show_counter_cmds = [ + "show interfaces counters", + "show interfaces counters rif", + "show queue counters", + "show pfc counters" + ] + clear_counter_cmds = [ + "sonic-clear counters", + "sonic-clear queuecounters", + "sonic-clear dropcounters", + "sonic-clear rifcounters", + "sonic-clear pfccounters" + ] + if duthost.facts["asic_type"] == "broadcom": + bcm_show_cmds = [ + "bcmcmd 'show counters'", + "bcmcmd 'cstat all'" + ] + bcm_clear_cmds = [ + "bcmcmd 'clear counters'" + ] + show_counter_cmds = show_counter_cmds + bcm_show_cmds + clear_counter_cmds = clear_counter_cmds + bcm_clear_cmds + duthost.shell_cmds(cmds=clear_counter_cmds, module_ignore_errors=True, verbose=False) + results = duthost.shell_cmds(cmds=show_counter_cmds, module_ignore_errors=True, verbose=False)['results'] + outputs = [] + for res in results: + res.pop('stdout') + res.pop('stderr') + outputs.append(res) + logging.info("Counters before reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4))) + + yield + + results = duthost.shell_cmds(cmds=show_counter_cmds, module_ignore_errors=True, verbose=False)['results'] + outputs = [] + for res in results: + res.pop('stdout') + res.pop('stderr') + outputs.append(res) + logging.info("Counters after reboot test: dut={}, cmd_outputs={}".format(duthost.hostname,json.dumps(outputs, indent=4))) + + def pytest_addoption(parser): add_advanced_reboot_args(parser) add_cont_warm_reboot_args(parser) diff --git a/tests/platform_tests/daemon/test_ledd.py b/tests/platform_tests/daemon/test_ledd.py index 39124bf202..992b43f062 100644 --- a/tests/platform_tests/daemon/test_ledd.py +++ b/tests/platform_tests/daemon/test_ledd.py @@ -18,7 +18,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes -from tests.common.utilities import wait_until +from tests.common.utilities import wait_until, skip_release logger = logging.getLogger(__name__) @@ -119,8 +119,7 @@ def test_pmon_ledd_term_and_start_status(check_daemon_status, duthosts, rand_one """ duthost = duthosts[rand_one_dut_hostname] - if "201811" in duthost.os_version or "201911" in duthost.os_version: - pytest.skip("Skip: SIG_TERM behaves differnetly in {} on {}".format(daemon_name, duthost.os_version)) + skip_release(duthost, ["201811", "201911"]) pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid)) @@ -133,7 +132,7 @@ def test_pmon_ledd_term_and_start_status(check_daemon_status, duthosts, rand_one # TODO: To arm the wait_until API with a delay parameter, by which to delay specified time # before invoking the check function. - wait_until(120, 10, check_expected_daemon_status, duthost, expected_running_status) + wait_until(120, 10, 0, check_expected_daemon_status, duthost, expected_running_status) post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) pytest_assert(post_daemon_status == expected_running_status, diff --git a/tests/platform_tests/daemon/test_pcied.py b/tests/platform_tests/daemon/test_pcied.py index 61f1c650ad..a5e93cc958 100644 --- a/tests/platform_tests/daemon/test_pcied.py +++ b/tests/platform_tests/daemon/test_pcied.py @@ -18,8 +18,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes -from tests.common.utilities import compose_dict_from_cli -from tests.common.utilities import wait_until, skip_release +from tests.common.utilities import compose_dict_from_cli, skip_release, wait_until logger = logging.getLogger(__name__) @@ -99,7 +98,7 @@ def _collect_data(): shared_scope.data_after_restart = collect_data(duthost) return bool(shared_scope.data_after_restart['devices']) pcied_pooling_interval = 60 - wait_until(pcied_pooling_interval, 6, _collect_data) + wait_until(pcied_pooling_interval, 6, 0, _collect_data) return shared_scope.data_after_restart @pytest.fixture(scope='module') diff --git a/tests/platform_tests/daemon/test_psud.py b/tests/platform_tests/daemon/test_psud.py new file mode 100644 index 0000000000..61a56e6bf6 --- /dev/null +++ b/tests/platform_tests/daemon/test_psud.py @@ -0,0 +1,208 @@ +""" +Check daemon status inside PMON container. Each daemon status is checked under the conditions below in this script: +* Daemon Running Status +* Daemon Stop status +* Daemon Restart status + +This script is to cover the test case in the SONiC platform daemon and service test plan: +https://github.com/Azure/sonic-mgmt/blob/master/docs/testplan/PMON-Services-Daemons-test-plan.md +""" +import logging +import re +import time + +from datetime import datetime + +import pytest + +from tests.common.helpers.assertions import pytest_assert +from tests.common.platform.daemon_utils import check_pmon_daemon_enable_status +from tests.common.platform.processes_utils import wait_critical_processes, check_critical_processes +from tests.common.utilities import compose_dict_from_cli, skip_release, wait_until + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('any'), + pytest.mark.sanity_check(skip_sanity=True), + pytest.mark.disable_loganalyzer +] + +expected_running_status = "RUNNING" +expected_stopped_status = "STOPPED" +expected_exited_status = "EXITED" + +daemon_name = "psud" + +SIG_STOP_SERVICE = None +SIG_TERM = "-15" +SIG_KILL = "-9" + +STATE_DB = 6 +psud_tbl_key = "" + +@pytest.fixture(scope="module", autouse=True) +def setup(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + daemon_en_status = check_pmon_daemon_enable_status(duthost, daemon_name) + if daemon_en_status is False: + pytest.skip("{} is not enabled in {}".format(daemon_name, duthost.facts['platform'], duthost.os_version)) + + +@pytest.fixture(scope="module", autouse=True) +def teardown_module(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + yield + + daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + if daemon_status is not "RUNNING": + duthost.start_pmon_daemon(daemon_name) + time.sleep(10) + logger.info("Tearing down: to make sure all the critical services, interfaces and transceivers are good") + check_critical_processes(duthost, watch_secs=10) + + +@pytest.fixture +def check_daemon_status(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + if daemon_status is not "RUNNING": + duthost.start_pmon_daemon(daemon_name) + time.sleep(10) + +def check_expected_daemon_status(duthost, expected_daemon_status): + daemon_status, _ = duthost.get_pmon_daemon_status(daemon_name) + return daemon_status == expected_daemon_status + +def collect_data(duthost): + keys = duthost.shell('sonic-db-cli STATE_DB KEYS "PSU_INFO|*"')['stdout_lines'] + + dev_data = {} + for k in keys: + data = duthost.shell('sonic-db-cli STATE_DB HGETALL "{}"'.format(k))['stdout_lines'] + data = compose_dict_from_cli(data) + dev_data[k] = data + + return {'keys': keys, 'data': dev_data} + +def wait_data(duthost): + class shared_scope: + data_after_restart = {} + def _collect_data(): + shared_scope.data_after_restart = collect_data(duthost) + return bool(shared_scope.data_after_restart['data']) + psud_pooling_interval = 60 + wait_until(psud_pooling_interval, 6, 0, _collect_data) + return shared_scope.data_after_restart + +@pytest.fixture(scope='module') +def data_before_restart(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + + data = collect_data(duthost) + return data + + +def test_pmon_psud_running_status(duthosts, rand_one_dut_hostname, data_before_restart): + """ + @summary: This test case is to check psud status on dut + """ + duthost = duthosts[rand_one_dut_hostname] + daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + logger.info("{} daemon is {} with pid {}".format(daemon_name, daemon_status, daemon_pid)) + pytest_assert(daemon_status == expected_running_status, + "{} expected running status is {} but is {}".format(daemon_name, expected_running_status, daemon_status)) + pytest_assert(daemon_pid != -1, + "{} expected pid is a positive integer but is {}".format(daemon_name, daemon_pid)) + + pytest_assert(data_before_restart['keys'], "DB keys is not availale on daemon running") + pytest_assert(data_before_restart['data'], "DB data is not availale on daemon running") + + +def test_pmon_psud_stop_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart): + """ + @summary: This test case is to check the psud stopped and restarted status + """ + duthost = duthosts[rand_one_dut_hostname] + pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid)) + + duthost.stop_pmon_daemon(daemon_name, SIG_STOP_SERVICE) + time.sleep(2) + + daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + pytest_assert(daemon_status == expected_stopped_status, + "{} expected stopped status is {} but is {}".format(daemon_name, expected_stopped_status, daemon_status)) + pytest_assert(daemon_pid == -1, + "{} expected pid is -1 but is {}".format(daemon_name, daemon_pid)) + + data = collect_data(duthost) + pytest_assert(not data['keys'], "DB data keys is not cleared on daemon stop") + pytest_assert(not data['data'], "DB data is not cleared on daemon stop") + + duthost.start_pmon_daemon(daemon_name) + + wait_until(50, 10, 0, check_expected_daemon_status, duthost, expected_running_status) + + post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + pytest_assert(post_daemon_status == expected_running_status, + "{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status)) + pytest_assert(post_daemon_pid != -1, + "{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid)) + pytest_assert(post_daemon_pid > pre_daemon_pid, + "Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid)) + + data_after_restart = wait_data(duthost) + pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match') + + +def test_pmon_psud_term_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart): + """ + @summary: This test case is to check the psud terminated and restarted status + """ + duthost = duthosts[rand_one_dut_hostname] + + skip_release(duthost, ["201811", "201911"]) + + pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid)) + + duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid) + + wait_until(50, 10, 0, check_expected_daemon_status, duthost, expected_running_status) + + post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + pytest_assert(post_daemon_status == expected_running_status, + "{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status)) + pytest_assert(post_daemon_pid != -1, + "{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid)) + pytest_assert(post_daemon_pid > pre_daemon_pid, + "Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid)) + data_after_restart = wait_data(duthost) + pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match') + + +def test_pmon_psud_kill_and_start_status(check_daemon_status, duthosts, rand_one_dut_hostname, data_before_restart): + """ + @summary: This test case is to check the psud killed unexpectedly (automatically restarted) status + """ + duthost = duthosts[rand_one_dut_hostname] + + skip_release(duthost, ["201811", "201911"]) + + pre_daemon_status, pre_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + logger.info("{} daemon is {} with pid {}".format(daemon_name, pre_daemon_status, pre_daemon_pid)) + + duthost.stop_pmon_daemon(daemon_name, SIG_KILL, pre_daemon_pid) + + wait_until(120, 10, 0, check_expected_daemon_status, duthost, expected_running_status) + + post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) + pytest_assert(post_daemon_status == expected_running_status, + "{} expected restarted status is {} but is {}".format(daemon_name, expected_running_status, post_daemon_status)) + pytest_assert(post_daemon_pid != -1, + "{} expected pid is -1 but is {}".format(daemon_name, post_daemon_pid)) + pytest_assert(post_daemon_pid > pre_daemon_pid, + "Restarted {} pid should be bigger than {} but it is {}".format(daemon_name, pre_daemon_pid, post_daemon_pid)) + data_after_restart = wait_data(duthost) + pytest_assert(data_after_restart == data_before_restart, 'DB data present before and after restart does not match') diff --git a/tests/platform_tests/daemon/test_syseepromd.py b/tests/platform_tests/daemon/test_syseepromd.py index fd16548e19..2d74ad1f5a 100644 --- a/tests/platform_tests/daemon/test_syseepromd.py +++ b/tests/platform_tests/daemon/test_syseepromd.py @@ -70,6 +70,10 @@ def check_daemon_status(duthosts, rand_one_dut_hostname): duthost.start_pmon_daemon(daemon_name) time.sleep(10) +def check_expected_daemon_status(duthost, expected_daemon_status): + daemon_status, _ = duthost.get_pmon_daemon_status(daemon_name) + return daemon_status == expected_daemon_status + def collect_data(duthost): keys = duthost.shell('sonic-db-cli STATE_DB KEYS "EEPROM_INFO|*"')['stdout_lines'] @@ -88,7 +92,7 @@ def _collect_data(): shared_scope.data_after_restart = collect_data(duthost) return bool(shared_scope.data_after_restart['data']) syseepromd_pooling_interval = 60 - wait_until(syseepromd_pooling_interval, 6, _collect_data) + wait_until(syseepromd_pooling_interval, 6, 0, _collect_data) return shared_scope.data_after_restart @pytest.fixture(scope='module') @@ -164,11 +168,7 @@ def test_pmon_syseepromd_term_and_start_status(check_daemon_status, duthosts, ra duthost.stop_pmon_daemon(daemon_name, SIG_TERM, pre_daemon_pid) - daemon_status, daemon_pid = duthost.get_pmon_daemon_status(daemon_name) - pytest_assert(daemon_status != expected_running_status and pre_daemon_pid != daemon_pid, - "{} status for SIG_TERM should not be {} with pid:{}!".format(daemon_name, daemon_status, daemon_pid)) - - time.sleep(10) + wait_until(50, 10, 0, check_expected_daemon_status, duthost, expected_running_status) post_daemon_status, post_daemon_pid = duthost.get_pmon_daemon_status(daemon_name) pytest_assert(post_daemon_status == expected_running_status, diff --git a/tests/platform_tests/fwutil/conftest.py b/tests/platform_tests/fwutil/conftest.py index f9e51eba3a..8e1f82ecc2 100644 --- a/tests/platform_tests/fwutil/conftest.py +++ b/tests/platform_tests/fwutil/conftest.py @@ -91,7 +91,7 @@ def next_image(duthost, fw_pkg): logger.info("Attempting to stage test firware onto newly-installed image.") try: - wait_until(10, 1, check_path_exists, fs_rw) + wait_until(10, 1, 0, check_path_exists, fs_rw) duthost.command("mkdir -p {}".format(fs_mountpoint)) cmd = "mount -t squashfs {} {}".format(fs_path, fs_mountpoint) diff --git a/tests/platform_tests/fwutil/fwutil_common.py b/tests/platform_tests/fwutil/fwutil_common.py index 0b5c9ac779..cb6c668c5b 100644 --- a/tests/platform_tests/fwutil/fwutil_common.py +++ b/tests/platform_tests/fwutil/fwutil_common.py @@ -71,7 +71,7 @@ def complete_install(duthost, localhost, boot_type, res, pdu_ctrl, auto_reboot=F logger.info("Waiting on switch to come up....") localhost.wait_for(host=hn, port=22, state='started', delay=10, timeout=300) logger.info("Waiting on critical systems to come online...") - wait_until(300, 30, duthost.critical_services_fully_started) + wait_until(300, 30, 0, duthost.critical_services_fully_started) time.sleep(60) # Reboot back into original image if neccesary @@ -84,7 +84,7 @@ def complete_install(duthost, localhost, boot_type, res, pdu_ctrl, auto_reboot=F time.sleep(100) logger.info("Waiting on switch to come up....") localhost.wait_for(host=hn, port=22, state='started', delay=10, timeout=150) - wait_until(300, 30, duthost.critical_services_fully_started) + wait_until(300, 30, 0, duthost.critical_services_fully_started) time.sleep(60) def show_firmware(duthost): diff --git a/tests/platform_tests/fwutil/test_fwutil.py b/tests/platform_tests/fwutil/test_fwutil.py index 36acdc825a..4a728bae30 100644 --- a/tests/platform_tests/fwutil/test_fwutil.py +++ b/tests/platform_tests/fwutil/test_fwutil.py @@ -10,7 +10,7 @@ def test_fwutil_show(duthost): """Tests that fwutil show has all components defined for platform""" platform_comp = {} - duthost.fetch(dest=os.path.join("firmware", "platform_components_backup.json"), + duthost.fetch(dest=os.path.join("firmware", "platform_components_backup.json"), src=os.path.join(DEVICES_PATH, duthost.facts["platform"], "platform_components.json"), flat=True) with open(os.path.join("firmware", "platform_components_backup.json")) as f: @@ -26,11 +26,11 @@ def test_fwutil_show(duthost): def test_fwutil_install_file(duthost, localhost, pdu_controller, fw_pkg, random_component): """Tests manually installing firmware to a component from a file.""" - assert call_fwutil(duthost, - localhost, - pdu_controller, - fw_pkg, - component=random_component, + assert call_fwutil(duthost, + localhost, + pdu_controller, + fw_pkg, + component=random_component, basepath=os.path.join(DEVICES_PATH, duthost.facts['platform'])) def test_fwutil_install_url(duthost, localhost, pdu_controller, fw_pkg, random_component, host_firmware): @@ -38,8 +38,8 @@ def test_fwutil_install_url(duthost, localhost, pdu_controller, fw_pkg, random_c assert call_fwutil(duthost, localhost, pdu_controller, - fw_pkg, - component=random_component, + fw_pkg, + component=random_component, basepath=host_firmware) def test_fwutil_install_bad_name(duthost, fw_pkg): @@ -61,16 +61,16 @@ def test_fwutil_update_current(duthost, localhost, pdu_controller, fw_pkg, rando assert call_fwutil(duthost, localhost, pdu_controller, - fw_pkg, + fw_pkg, component=random_component) def test_fwutil_update_next(duthost, localhost, pdu_controller, fw_pkg, random_component, next_image): """Tests updating firmware from the "next" image using fwutil update""" - assert call_fwutil(duthost, + assert call_fwutil(duthost, localhost, pdu_controller, - fw_pkg, - component=random_component, + fw_pkg, + component=random_component, next_image=next_image) def test_fwutil_update_bad_config(duthost, fw_pkg, random_component): @@ -78,7 +78,7 @@ def test_fwutil_update_bad_config(duthost, fw_pkg, random_component): versions = show_firmware(duthost) chassis = versions["chassis"].keys()[0] # Only one chassis - # Test fwutil update with config file without chassis section + # Test fwutil update with config file without chassis section with open("platform_components.json", "w") as f: json.dump({}, f, indent=4) upload_platform(duthost, {}) @@ -106,13 +106,13 @@ def test_fwutil_update_bad_config(duthost, fw_pkg, random_component): found_bad_component = find_pattern(out_bad_version['stdout_lines'], pattern_bad_component) assert found_bad_component -@pytest.mark.skip(reason="Command not yet merged into sonic-utilites") + @pytest.mark.parametrize("reboot_type", ["none", "warm", "fast", "cold", "power off"]) def test_fwutil_auto(duthost, localhost, pdu_controller, fw_pkg, reboot_type): """Tests fwutil update all command ability to properly select firmware for install based on boot type.""" - assert call_fwutil(duthost, + assert call_fwutil(duthost, localhost, pdu_controller, - fw_pkg, + fw_pkg, reboot=reboot_type) diff --git a/tests/platform_tests/link_flap/link_flap_utils.py b/tests/platform_tests/link_flap/link_flap_utils.py index e1eeb7463e..524fd445ee 100644 --- a/tests/platform_tests/link_flap/link_flap_utils.py +++ b/tests/platform_tests/link_flap/link_flap_utils.py @@ -133,7 +133,7 @@ def toggle_one_link(dut, dut_port, fanout, fanout_port, watch=False): need_recovery = True try: fanout.shutdown(fanout_port) - pytest_assert(wait_until(30, 1, __check_if_status, dut, dut_port, 'down', True), "dut port {} didn't go down as expected".format(dut_port)) + pytest_assert(wait_until(30, 1, 0, __check_if_status, dut, dut_port, 'down', True), "dut port {} didn't go down as expected".format(dut_port)) if watch: time.sleep(1) @@ -142,11 +142,11 @@ def toggle_one_link(dut, dut_port, fanout, fanout_port, watch=False): logger.info("Bring up fanout switch %s port %s connecting to %s", fanout.hostname, fanout_port, dut_port) fanout.no_shutdown(fanout_port) need_recovery = False - pytest_assert(wait_until(30, 1, __check_if_status, dut, dut_port, 'up', True), "dut port {} didn't go up as expected".format(dut_port)) + pytest_assert(wait_until(30, 1, 0, __check_if_status, dut, dut_port, 'up', True), "dut port {} didn't go up as expected".format(dut_port)) finally: if need_recovery: fanout.no_shutdown(fanout_port) - wait_until(30, 1, __check_if_status, dut, dut_port, 'up', True) + wait_until(30, 1, 0, __check_if_status, dut, dut_port, 'up', True) def watch_system_status(dut): diff --git a/tests/platform_tests/link_flap/test_cont_link_flap.py b/tests/platform_tests/link_flap/test_cont_link_flap.py index 4eb731702b..d939a5fbe9 100644 --- a/tests/platform_tests/link_flap/test_cont_link_flap.py +++ b/tests/platform_tests/link_flap/test_cont_link_flap.py @@ -63,7 +63,7 @@ def test_cont_link_flap(self, request, duthosts, enum_rand_one_per_hwsku_fronten # Make Sure Orch CPU < orch_cpu_threshold before starting test. logging.info("Make Sure orchagent CPU utilization is less that %d before link flap", orch_cpu_threshold) - pytest_assert(wait_until(100, 2, check_orch_cpu_utilization, duthost, orch_cpu_threshold), + pytest_assert(wait_until(100, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold), "Orch CPU utilization {} > orch cpu threshold {} before link flap" .format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold)) @@ -83,7 +83,7 @@ def test_cont_link_flap(self, request, duthosts, enum_rand_one_per_hwsku_fronten toggle_one_link(duthost, dut_port, fanout, fanout_port, watch=True) # Make Sure all ipv4/ipv6 routes are relearned with jitter of ~5 - if not wait_until(60, 1, check_bgp_routes, duthost, start_time_ipv4_route_counts, start_time_ipv6_route_counts): + if not wait_until(60, 1, 0, check_bgp_routes, duthost, start_time_ipv4_route_counts, start_time_ipv6_route_counts): endv4, endv6 = duthost.get_ip_route_summary() pytest.fail("IP routes are not equal after link flap: before ipv4 {} ipv6 {}, after ipv4 {} ipv6 {}".format(sumv4, sumv6, endv4, endv6)) @@ -112,6 +112,6 @@ def test_cont_link_flap(self, request, duthosts, enum_rand_one_per_hwsku_fronten # Orchagent CPU should consume < orch_cpu_threshold at last. logging.info("watch orchagent CPU utilization when it goes below %d", orch_cpu_threshold) - pytest_assert(wait_until(45, 2, check_orch_cpu_utilization, duthost, orch_cpu_threshold), + pytest_assert(wait_until(45, 2, 0, check_orch_cpu_utilization, duthost, orch_cpu_threshold), "Orch CPU utilization {} > orch cpu threshold {} before link flap" .format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold)) diff --git a/tests/platform_tests/mellanox/check_sysfs.py b/tests/platform_tests/mellanox/check_sysfs.py index c7ced75880..7fc487bc15 100644 --- a/tests/platform_tests/mellanox/check_sysfs.py +++ b/tests/platform_tests/mellanox/check_sysfs.py @@ -67,7 +67,7 @@ def check_sysfs(dut): if not _is_fan_speed_in_range(sysfs_facts): sysfs_fan_config = [generate_sysfs_fan_config(platform_data)] - assert wait_until(30, 5, _check_fan_speed_in_range, dut, sysfs_fan_config), "Fan speed not in range" + assert wait_until(30, 5, 0, _check_fan_speed_in_range, dut, sysfs_fan_config), "Fan speed not in range" logging.info("Check CPU related sysfs") cpu_temp_high_counter = 0 @@ -129,6 +129,17 @@ def check_sysfs(dut): assert "Invalid PSU fan speed value {} for PSU {}, exception: {}".format(psu_info["fan_speed"], psu_id, e) + # Check consistency between voltage capability and sysfs + all_capabilities = platform_data["psus"].get("capabilities") + if all_capabilities: + for capabilities in all_capabilities: + psu_cmd_prefix = 'cat /var/run/hw-management/power/{}_'.format(capabilities.format(psu_id)) + psu_capability = dut.command(psu_cmd_prefix + 'capability')['stdout'].split() + for capability in psu_capability: + # Each capability should exist + output = dut.command(psu_cmd_prefix + capability)['stdout'] + assert output, "PSU capability {} doesn't not exist".format(capability) + logging.info("Check SFP related sysfs") for sfp_id, sfp_info in sysfs_facts['sfp_info'].items(): assert sfp_info["temp_fault"] == '0', "SFP%d temp fault" % sfp_id diff --git a/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py b/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py index b6f4a3401b..062dffe802 100644 --- a/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py +++ b/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py @@ -510,7 +510,7 @@ def mock_speed(self, speed): """ max_speed = self.get_max_speed() if max_speed > 0: - speed_in_rpm = max_speed * speed / 100 + speed_in_rpm = int(round(float(max_speed) * speed / 100)) self.helper.mock_thermal_value(self.speed_file, str(speed_in_rpm)) else: self.helper.mock_thermal_value(self.speed_file, str(speed)) diff --git a/tests/platform_tests/mellanox/minimum_table.py b/tests/platform_tests/mellanox/minimum_table.py index 34fc3b9599..70435280e5 100644 --- a/tests/platform_tests/mellanox/minimum_table.py +++ b/tests/platform_tests/mellanox/minimum_table.py @@ -44,8 +44,8 @@ "unk_untrust": {"-127:5":12, "6:20":13, "21:30":14, "31:35":15, "36:40":16, "41:120":17}, }, 'x86_64-mlnx_msn4410-r0': { - "unk_trust": {"-127:35":14, "36:120":15}, - "unk_untrust": {"-127:35":14, "36:120":15}, + "unk_trust": {"-127:40":12, "41:120":13}, + "unk_untrust": {"-127:10":12, "11:20":13, "21:30":14, "31:35":15, "36:120":16}, }, 'x86_64-mlnx_msn4600-r0': { "unk_trust": {"-127:40": 12, "41:120": 13}, diff --git a/tests/platform_tests/mellanox/test_thermal_control.py b/tests/platform_tests/mellanox/test_thermal_control.py index ad7140c893..d39a05316a 100644 --- a/tests/platform_tests/mellanox/test_thermal_control.py +++ b/tests/platform_tests/mellanox/test_thermal_control.py @@ -47,6 +47,7 @@ def test_dynamic_minimum_table(duthosts, rand_one_dut_hostname, mocker_factory): mocker.mock_min_table(temperature, trust_state) assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, check_cooling_level_larger_than_minimum, duthost, expect_minimum_cooling_level), \ @@ -59,6 +60,7 @@ def test_dynamic_minimum_table(duthosts, rand_one_dut_hostname, mocker_factory): mocker.mock_min_table(temperature, not trust_state) assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, check_cooling_level_larger_than_minimum, duthost, expect_minimum_cooling_level), \ @@ -82,6 +84,7 @@ def test_set_psu_fan_speed(duthosts, rand_one_dut_hostname, mocker_factory): single_fan_mocker.mock_absence() assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME * 2, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, check_psu_fan_speed, duthost, psu_num, @@ -92,6 +95,7 @@ def test_set_psu_fan_speed(duthosts, rand_one_dut_hostname, mocker_factory): single_fan_mocker.mock_presence() wait_result = wait_until(THERMAL_CONTROL_TEST_WAIT_TIME * 2, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, check_psu_fan_speed, duthost, psu_num, diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py index ec3bb03b58..68b9b7af9d 100644 --- a/tests/platform_tests/test_advanced_reboot.py +++ b/tests/platform_tests/test_advanced_reboot.py @@ -22,8 +22,9 @@ ### Tetcases to verify normal reboot procedure ### +@pytest.mark.usefixtures('get_advanced_reboot') def test_fast_reboot(request, get_advanced_reboot, verify_dut_health, - advanceboot_loganalyzer): + advanceboot_loganalyzer, capture_interface_counters): ''' Fast reboot test case is run using advacned reboot test fixture @@ -36,7 +37,7 @@ def test_fast_reboot(request, get_advanced_reboot, verify_dut_health, @pytest.mark.device_type('vs') def test_warm_reboot(request, get_advanced_reboot, verify_dut_health, - advanceboot_loganalyzer): + advanceboot_loganalyzer, capture_interface_counters): ''' Warm reboot test case is run using advacned reboot test fixture diff --git a/tests/platform_tests/test_auto_negotiation.py b/tests/platform_tests/test_auto_negotiation.py index edb7b54882..acd10951a0 100644 --- a/tests/platform_tests/test_auto_negotiation.py +++ b/tests/platform_tests/test_auto_negotiation.py @@ -231,6 +231,7 @@ def test_auto_negotiation_advertised_speeds_all(): logger.info('Wait until all ports are up') wait_result = wait_until(ALL_PORT_WAIT_TIME, PORT_STATUS_CHECK_INTERVAL, + 0, check_ports_up, duthost, [item[1] for item in candidates.values()]) @@ -290,6 +291,7 @@ def test_auto_negotiation_dut_advertises_each_speed(enum_dut_portname_module_fix logger.info('Wait until the port status is up, expected speed: {}'.format(speed)) wait_result = wait_until(SINGLE_PORT_WAIT_TIME, PORT_STATUS_CHECK_INTERVAL, + 0, check_ports_up, duthost, [dut_port], @@ -337,6 +339,7 @@ def test_auto_negotiation_fanout_advertises_each_speed(enum_dut_portname_module_ logger.info('Wait until the port status is up, expected speed: {}'.format(speed)) wait_result = wait_until(SINGLE_PORT_WAIT_TIME, PORT_STATUS_CHECK_INTERVAL, + 0, check_ports_up, duthost, [dut_port], @@ -382,6 +385,7 @@ def test_force_speed(enum_dut_portname_module_fixture): logger.info('Wait until the port status is up, expected speed: {}'.format(speed)) wait_result = wait_until(SINGLE_PORT_WAIT_TIME, PORT_STATUS_CHECK_INTERVAL, + 0, check_ports_up, duthost, [dut_port], @@ -450,6 +454,6 @@ def get_cable_supported_speeds(cls, duthost, dut_port_name): if pos == -1: return None speeds_str = output[pos+1:-1] - speeds = [speed[:-1] + '000' for speed in speeds_str.split(',')] + speeds = list(set([speed.split('G')[0] + '000' for speed in speeds_str.split(',')])) cls.supported_speeds[(duthost, dut_port_name)] = speeds return speeds diff --git a/tests/platform_tests/test_cpu_memory_usage.py b/tests/platform_tests/test_cpu_memory_usage.py index 03c89e59e4..5d4bc0a096 100644 --- a/tests/platform_tests/test_cpu_memory_usage.py +++ b/tests/platform_tests/test_cpu_memory_usage.py @@ -24,10 +24,10 @@ def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thre """Check DUT memory usage and process cpu usage are within threshold.""" duthost = duthosts[enum_rand_one_per_hwsku_hostname] MonitResult = namedtuple('MonitResult', ['processes', 'memory']) - monit_results = duthost.monit_process(iterations=12)['monit_results'] + monit_results = duthost.monit_process(iterations=24)['monit_results'] memory_threshold, cpu_threshold = setup_thresholds - persist_threshold = 4 + persist_threshold = 8 outstanding_mem_polls = {} outstanding_procs = {} outstanding_procs_counter = Counter() @@ -46,7 +46,7 @@ def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thre persist_outstanding_procs = [] for pid, freq in outstanding_procs_counter.most_common(): - if freq < persist_threshold: + if freq <= persist_threshold: break persist_outstanding_procs.append(pid) diff --git a/tests/platform_tests/test_platform_info.py b/tests/platform_tests/test_platform_info.py index c905bcd218..c1292ef712 100644 --- a/tests/platform_tests/test_platform_info.py +++ b/tests/platform_tests/test_platform_info.py @@ -45,11 +45,18 @@ LOG_EXPECT_INSUFFICIENT_FAN_NUM_RE = '.*Insufficient number of working fans warning:.*' LOG_EXPECT_INSUFFICIENT_FAN_NUM_CLEAR_RE = '.*Insufficient number of working fans warning cleared:.*' +# These error messages are not triggered by platform test cases, +# Ref to https://github.com/Azure/sonic-buildimage/issues/8944 +SKIP_ERROR_LOG_COMMON = ['.*ERR syncd#syncd:.*SAI_API_QUEUE:_brcm_sai_cosq_stat_get:.* queue egress Min limit get failed with error Invalid parameter.*', + '.*ERR syncd#syncd:.*collectQueueCounters: QUEUE_WATERMARK_STAT_COUNTER: failed to get stats of queue.*'] + SKIP_ERROR_LOG_SHOW_PLATFORM_TEMP = ['.*ERR pmon#thermalctld.*int\(\) argument must be a string.* or a number.*', '.*ERR pmon#thermalctld.*invalid literal for int\(\) with base 10.*'] SKIP_ERROR_LOG_PSU_ABSENCE = ['.*Error getting sensor data: dps460.*Kernel interface error.*'] +SKIP_ERROR_LOG_SHOW_PLATFORM_TEMP.extend(SKIP_ERROR_LOG_COMMON) +SKIP_ERROR_LOG_PSU_ABSENCE.extend(SKIP_ERROR_LOG_COMMON) def check_sensord_status(ans_host): """ @@ -359,6 +366,7 @@ def test_thermal_control_psu_absence(duthosts, enum_rand_one_per_hwsku_hostname, logging.info('Wait and check all FAN speed turn to 60%...') wait_result = wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, fan_mocker.check_all_fan_speed, 60) @@ -383,6 +391,7 @@ def test_thermal_control_psu_absence(duthosts, enum_rand_one_per_hwsku_hostname, logging.info('Wait and check all FAN speed turn to 65%...') pytest_assert(wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, fan_mocker.check_all_fan_speed, 65), 'FAN speed not change to 65% according to policy') @@ -409,6 +418,7 @@ def turn_off_outlet_and_check_thermal_control(dut, pdu_ctrl, outlet, mocker): logging.info('Wait and check all FAN speed turn to 100%...') pytest_assert(wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, + 0, mocker.check_all_fan_speed, 100), 'FAN speed not turn to 100% after PSU off') @@ -432,7 +442,7 @@ def test_thermal_control_fan_status(duthosts, enum_rand_one_per_hwsku_hostname, logging.info('Mock FAN status data...') fan_mocker.mock_data() # make data random restart_thermal_control_daemon(duthost) - wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, fan_mocker.check_all_fan_speed, + wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, 0, fan_mocker.check_all_fan_speed, 60) check_thermal_algorithm_status(duthost, mocker_factory, False) diff --git a/tests/platform_tests/test_reboot.py b/tests/platform_tests/test_reboot.py index f8055c1ef8..51ca3a5264 100644 --- a/tests/platform_tests/test_reboot.py +++ b/tests/platform_tests/test_reboot.py @@ -75,14 +75,14 @@ def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type = if reboot_type is not None: logging.info("Check reboot cause") - assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, check_reboot_cause, dut, reboot_type), \ + assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, 0, check_reboot_cause, dut, reboot_type), \ "got reboot-cause failed after rebooted by %s" % reboot_type if "201811" in dut.os_version or "201911" in dut.os_version: logging.info("Skip check reboot-cause history for version before 202012") else: logger.info("Check reboot-cause history") - assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, check_reboot_cause_history, dut, + assert wait_until(MAX_WAIT_TIME_FOR_REBOOT_CAUSE, 20, 0, check_reboot_cause_history, dut, REBOOT_TYPE_HISTOYR_QUEUE), "Check reboot-cause history failed after rebooted by %s" % reboot_type if reboot_ctrl_dict[reboot_type]["test_reboot_cause_only"]: logging.info("Further checking skipped for %s test which intends to verify reboot-cause only" % reboot_type) @@ -92,7 +92,7 @@ def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, reboot_type = logging.info("skipping interfaces related check for supervisor") else: logging.info("Wait {} seconds for all the transceivers to be detected".format(MAX_WAIT_TIME_FOR_INTERFACES)) - result = wait_until(MAX_WAIT_TIME_FOR_INTERFACES, 20, check_all_interface_information, dut, interfaces, + result = wait_until(MAX_WAIT_TIME_FOR_INTERFACES, 20, 0, check_all_interface_information, dut, interfaces, xcvr_skip_list) assert result, "Not all transceivers are detected or interfaces are up in {} seconds".format( MAX_WAIT_TIME_FOR_INTERFACES) @@ -209,11 +209,17 @@ def test_power_off_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, @param power_off_delay: Pytest parameter. The delay between turning off and on the PSU """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] + UNSUPPORTED_ASIC_TYPE = ["cisco-8000"] + if duthost.facts["asic_type"] in UNSUPPORTED_ASIC_TYPE: + pytest.skip("Skipping test_power_off_reboot. Test unsupported on {} platform".format(duthost.facts["asic_type"])) pdu_ctrl = pdu_controller if pdu_ctrl is None: pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) all_outlets = pdu_ctrl.get_outlet_status() + # If PDU supports returning output_watts, making sure that all outlets has power. + no_power = [item for item in all_outlets if int(item.get('output_watts', '1')) == 0] + pytest_assert(not no_power, "Not all outlets have power output: {}".format(no_power)) # Purpose of this list is to control sequence of turning on PSUs in power off testing. # If there are 2 PSUs, then 3 scenarios would be covered: @@ -229,13 +235,23 @@ def test_power_off_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, poweroff_reboot_kwargs = {"dut": duthost} - for power_on_seq in power_on_seq_list: - poweroff_reboot_kwargs["pdu_ctrl"] = pdu_ctrl - poweroff_reboot_kwargs["all_outlets"] = all_outlets - poweroff_reboot_kwargs["power_on_seq"] = power_on_seq - poweroff_reboot_kwargs["delay_time"] = power_off_delay - reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_POWEROFF, - _power_off_reboot_helper, poweroff_reboot_kwargs) + try: + for power_on_seq in power_on_seq_list: + poweroff_reboot_kwargs["pdu_ctrl"] = pdu_ctrl + poweroff_reboot_kwargs["all_outlets"] = all_outlets + poweroff_reboot_kwargs["power_on_seq"] = power_on_seq + poweroff_reboot_kwargs["delay_time"] = power_off_delay + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, REBOOT_TYPE_POWEROFF, + _power_off_reboot_helper, poweroff_reboot_kwargs) + except Exception as e: + logging.debug("Restore power after test failure") + for outlet in all_outlets: + logging.debug("turning on {}".format(outlet)) + pdu_ctrl.turn_on_outlet(outlet) + # Sleep 120 for dut to boot up + time.sleep(120) + wait_critical_processes(duthost) + raise e def test_watchdog_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): @@ -260,6 +276,5 @@ def test_continuous_reboot(duthosts, enum_rand_one_per_hwsku_hostname, localhost for i in range(3): reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD) ls_ending_out = set(duthost.shell("ls /dev/C0-*", module_ignore_errors=True)["stdout"].split()) - pytest_assert(ls_ending_out.difference(ls_starting_out).size() == 0 and - ls_starting_out.difference(ls_ending_out).size() == 0, + pytest_assert(ls_ending_out == ls_starting_out, "Console devices have changed: expected console devices: {}, got: {}".format(", ".join(sorted(ls_starting_out)), ", ".join(sorted(ls_ending_out)))) diff --git a/tests/platform_tests/test_reload_config.py b/tests/platform_tests/test_reload_config.py index 4d8cc9c992..2c76bc50a4 100644 --- a/tests/platform_tests/test_reload_config.py +++ b/tests/platform_tests/test_reload_config.py @@ -30,7 +30,7 @@ def test_reload_configuration(duthosts, rand_one_dut_hostname, conn_graph_facts, asic_type = duthost.facts["asic_type"] if config_force_option_supported(duthost): - assert wait_until(300, 20, config_system_checks_passed, duthost) + assert wait_until(300, 20, 0, config_system_checks_passed, duthost) logging.info("Reload configuration") duthost.shell("sudo config reload -y &>/dev/null", executable="/bin/bash") @@ -39,7 +39,7 @@ def test_reload_configuration(duthosts, rand_one_dut_hostname, conn_graph_facts, wait_critical_processes(duthost) logging.info("Wait some time for all the transceivers to be detected") - assert wait_until(300, 20, check_all_interface_information, duthost, interfaces, xcvr_skip_list), \ + assert wait_until(300, 20, 0, check_all_interface_information, duthost, interfaces, xcvr_skip_list), \ "Not all transceivers are detected in 300 seconds" logging.info("Check transceiver status") @@ -74,7 +74,7 @@ def test_reload_configuration_checks(duthosts, rand_one_dut_hostname, localhost, out = duthost.shell("sudo config reload -y", executable="/bin/bash") # config reload command shouldn't work immediately after system reboot assert "Retry later" in out['stdout'] - assert wait_until(300, 20, config_system_checks_passed, duthost) + assert wait_until(300, 20, 0, config_system_checks_passed, duthost) # After the system checks succeed the config reload command should not throw error out = duthost.shell("sudo config reload -y", executable="/bin/bash") @@ -84,7 +84,7 @@ def test_reload_configuration_checks(duthosts, rand_one_dut_hostname, localhost, logging.info("Checking config reload after system is up") out = duthost.shell("sudo config reload -y", executable="/bin/bash") assert "Retry later" in out['stdout'] - assert wait_until(300, 20, config_system_checks_passed, duthost) + assert wait_until(300, 20, 0, config_system_checks_passed, duthost) logging.info("Stopping swss docker and checking config reload") duthost.shell("sudo service swss stop") @@ -98,4 +98,4 @@ def test_reload_configuration_checks(duthosts, rand_one_dut_hostname, localhost, out = duthost.shell("sudo config reload -y -f", executable="/bin/bash") assert "Retry later" not in out['stdout'] - assert wait_until(300, 20, config_system_checks_passed, duthost) + assert wait_until(300, 20, 0, config_system_checks_passed, duthost) diff --git a/tests/platform_tests/test_sequential_restart.py b/tests/platform_tests/test_sequential_restart.py index d8f03889c0..b549d3ad15 100644 --- a/tests/platform_tests/test_sequential_restart.py +++ b/tests/platform_tests/test_sequential_restart.py @@ -66,7 +66,7 @@ def restart_service_and_check(localhost, dut, enum_frontend_asic_index, service, wait_critical_processes(dut) logging.info("Wait some time for all the transceivers to be detected") - pytest_assert(wait_until(300, 20, check_interface_information, dut, enum_frontend_asic_index, interfaces, xcvr_skip_list), + pytest_assert(wait_until(300, 20, 0, check_interface_information, dut, enum_frontend_asic_index, interfaces, xcvr_skip_list), "Not all interface information are detected within 300 seconds") logging.info("Check transceiver status on asic %s" % enum_frontend_asic_index) @@ -105,7 +105,7 @@ def test_restart_swss(duthosts, rand_one_dut_hostname, enum_frontend_asic_index, restart_service_and_check(localhost, duthost, enum_frontend_asic_index, "swss", all_interfaces, xcvr_skip_list) -@pytest.mark.skip(reason="Restarting syncd is not supported yet") + def test_restart_syncd(duthosts, rand_one_dut_hostname, enum_frontend_asic_index, localhost, conn_graph_facts, xcvr_skip_list): """ @summary: This test case is to restart the syncd service and check platform status diff --git a/tests/platform_tests/thermal_control_test_helper.py b/tests/platform_tests/thermal_control_test_helper.py index a7202178b7..78f8033682 100644 --- a/tests/platform_tests/thermal_control_test_helper.py +++ b/tests/platform_tests/thermal_control_test_helper.py @@ -263,7 +263,7 @@ def restart_thermal_control_daemon(dut): assert output["rc"] == 0, "Run command '%s' failed" % find_thermalctld_pid_cmd # Usually there should be 2 thermalctld processes, but there is chance that # sonic platform API might use subprocess which creates extra thermalctld process. - # For example, chassis.get_all_sfps will call sfp constructor, and sfp constructor may + # For example, chassis.get_all_sfps will call sfp constructor, and sfp constructor may # use subprocess to call ethtool to do initialization. # So we check here thermalcltd must have at least 2 processes. assert len(output["stdout_lines"]) >= 2, "There should be at least 2 thermalctld process" @@ -303,7 +303,8 @@ def __enter__(self): thermal control daemon to make it effect. :return: """ - if os.path.exists(self.thermal_policy_file_path): + out = self.dut.stat(path=self.thermal_policy_file_path) + if out['stat']['exists']: self.dut.command('mv -f {} {}'.format(self.thermal_policy_file_path, self.thermal_policy_file_backup_path)) else: logging.warning("Thermal Policy file {} not found".format(self.thermal_policy_file_path)) @@ -318,8 +319,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): :param exc_tb: Not used. :return: """ - - if os.path.exists(self.thermal_policy_file_backup_path): + out = self.dut.stat(path=self.thermal_policy_file_backup_path) + if out['stat']['exists']: self.dut.command('mv -f {} {}'.format(self.thermal_policy_file_backup_path, self.thermal_policy_file_path)) restart_thermal_control_daemon(self.dut) @@ -328,7 +329,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def disable_thermal_policy(duthosts, enum_rand_one_per_hwsku_hostname): """Fixture to help disable thermal policy during the test. After test, it will automatically re-enable thermal policy. The idea here is to make thermalctld - load a invalid policy file. To use this fixture, the test case will probably + load a invalid policy file. To use this fixture, the test case will probably marked as @pytest.mark.disable_loganalyzer. Args: diff --git a/tests/process_monitoring/test_critical_process_monitoring.py b/tests/process_monitoring/test_critical_process_monitoring.py index f210bb565d..c80b136adb 100755 --- a/tests/process_monitoring/test_critical_process_monitoring.py +++ b/tests/process_monitoring/test_critical_process_monitoring.py @@ -167,7 +167,7 @@ def postcheck_critical_processes_status(duthost, up_bgp_neighbors): for 3 minutes. It will return False after timeout. """ logger.info("Post-checking status of critical processes and BGP sessions...") - return wait_until(POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, + return wait_until(POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, 0, post_test_check, duthost, up_bgp_neighbors) diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 65ae763699..5d72eb6e70 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -1,4 +1,9 @@ import pipes +import traceback +import logging + +logger = logging.getLogger(__name__) + def ptf_runner(host, testdir, testname, platform_dir=None, params={}, platform="remote", qlen=0, relax=True, debug_level="info", @@ -38,8 +43,14 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, if timeout: cmd += " --test-case-timeout {}".format(int(timeout)) - result = host.shell(cmd, chdir="/root", module_ignore_errors=module_ignore_errors) - if module_ignore_errors: - if result["rc"] != 0: - return result + try: + result = host.shell(cmd, chdir="/root", module_ignore_errors=module_ignore_errors) + if module_ignore_errors: + if result["rc"] != 0: + return result + except Exception: + traceback_msg = traceback.format_exc() + logger.error("Exception caught while executing case: {}. Error message: {}"\ + .format(testname, traceback_msg)) + raise Exception return True diff --git a/tests/qos/files/qos.yml b/tests/qos/files/qos.yml index 673f4eb640..fe5544beaa 100644 --- a/tests/qos/files/qos.yml +++ b/tests/qos/files/qos.yml @@ -1691,19 +1691,22 @@ qos_params: th3: topo-t0-80: 100000_300m: - pkts_num_leak_out: 92 + pkts_num_leak_out: 42 + pkts_num_egr_mem: 50 xoff_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 22058 - pkts_num_trig_ingr_drp: 22520 + pkts_num_trig_pfc: 21898 + pkts_num_trig_ingr_drp: 22360 + pkts_num_margin: 4 xoff_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 22058 - pkts_num_trig_ingr_drp: 22520 + pkts_num_trig_pfc: 21898 + pkts_num_trig_ingr_drp: 22360 + pkts_num_margin: 4 hdrm_pool_size: dscps: [3, 4] ecn: 1 @@ -1718,33 +1721,34 @@ qos_params: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 22058 - pkts_num_trig_ingr_drp: 22520 + pkts_num_trig_pfc: 21898 + pkts_num_trig_ingr_drp: 22360 cell_size: 254 pkts_num_margin: 2 xon_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 22058 - pkts_num_dismiss_pfc: 7 + pkts_num_trig_pfc: 21898 + pkts_num_dismiss_pfc: 13 xon_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 22058 - pkts_num_dismiss_pfc: 7 + pkts_num_trig_pfc: 21898 + pkts_num_dismiss_pfc: 13 lossy_queue_1: dscp: 8 ecn: 1 pg: 0 - pkts_num_trig_egr_drp: 73501 + pkts_num_trig_egr_drp: 72967 + pkts_num_margin: 11 wm_pg_shared_lossless: dscp: 3 ecn: 1 pg: 3 pkts_num_fill_min: 10 - pkts_num_trig_pfc: 22058 + pkts_num_trig_pfc: 21898 packet_size: 64 cell_size: 254 wm_pg_shared_lossy: @@ -1752,7 +1756,7 @@ qos_params: ecn: 1 pg: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 73501 + pkts_num_trig_egr_drp: 72967 packet_size: 64 cell_size: 254 wm_q_shared_lossless: @@ -1760,7 +1764,7 @@ qos_params: ecn: 1 queue: 3 pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 22520 + pkts_num_trig_ingr_drp: 22360 cell_size: 254 wm_buf_pool_lossless: dscp: 3 @@ -1768,8 +1772,8 @@ qos_params: pg: 3 queue: 3 pkts_num_fill_ingr_min: 7 - pkts_num_trig_pfc: 22058 - pkts_num_trig_ingr_drp: 22520 + pkts_num_trig_pfc: 21898 + pkts_num_trig_ingr_drp: 22360 pkts_num_fill_egr_min: 8 cell_size: 254 wm_q_shared_lossy: @@ -1777,7 +1781,7 @@ qos_params: ecn: 1 queue: 0 pkts_num_fill_min: 7 - pkts_num_trig_egr_drp: 73501 + pkts_num_trig_egr_drp: 72967 cell_size: 254 wm_buf_pool_lossy: dscp: 8 @@ -1785,7 +1789,7 @@ qos_params: pg: 0 queue: 0 pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 73501 + pkts_num_trig_egr_drp: 72967 pkts_num_fill_egr_min: 7 cell_size: 254 ecn_1: @@ -1842,27 +1846,30 @@ qos_params: cell_size: 254 topo-t1-lag: 400000_300m: - pkts_num_leak_out: 230 + pkts_num_leak_out: 109 + pkts_num_egr_mem: 101 xoff_1: dscp: 3 ecn: 1 pg: 3 pkts_num_trig_pfc: 15152 pkts_num_trig_ingr_drp: 16624 + pkts_num_margin: 4 xoff_2: dscp: 4 ecn: 1 pg: 4 pkts_num_trig_pfc: 15152 pkts_num_trig_ingr_drp: 16624 + pkts_num_margin: 4 hdrm_pool_size: dscps: [3, 4] ecn: 1 pgs: [3, 4] - src_port_ids: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - dst_port_id: 20 + src_port_ids: [0, 2, 4, 6, 8, 10, 12, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + dst_port_id: 29 pgs_num: 37 - pkts_num_trig_pfc: 1857 + pkts_num_trig_pfc: 1856 pkts_num_hdrm_full: 1472 pkts_num_hdrm_partial: 552 wm_pg_headroom: @@ -1878,18 +1885,19 @@ qos_params: ecn: 1 pg: 3 pkts_num_trig_pfc: 15152 - pkts_num_dismiss_pfc: 9 + pkts_num_dismiss_pfc: 13 xon_2: dscp: 4 ecn: 1 pg: 4 pkts_num_trig_pfc: 15152 - pkts_num_dismiss_pfc: 9 + pkts_num_dismiss_pfc: 13 lossy_queue_1: dscp: 8 ecn: 1 pg: 0 pkts_num_trig_egr_drp: 50482 + pkts_num_margin: 5 wm_pg_shared_lossless: dscp: 3 ecn: 1 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 0471d380d0..77a6783343 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -10,7 +10,8 @@ from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice from tests.common.utilities import wait_until from tests.common.dualtor.dual_tor_utils import upper_tor_host,lower_tor_host -from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor +from tests.common.dualtor.mux_simulator_control import mux_server_url, toggle_all_simulator_ports +from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR from tests.common.utilities import check_qos_db_fv_reference_with_table logger = logging.getLogger(__name__) @@ -144,7 +145,7 @@ def __computeBufferThreshold(self, dut_asic, bufferProfile): else: db = "4" keystr = "BUFFER_POOL|" - if check_qos_db_fv_reference_with_table(duthost) == True: + if check_qos_db_fv_reference_with_table(dut_asic) == True: pool = bufferProfile["pool"].encode("utf-8").translate(None, "[]") else: pool = keystr + bufferProfile["pool"].encode("utf-8") @@ -170,7 +171,7 @@ def __updateVoidRoidParams(self, dut_asic, bufferProfile): Returns: Updates bufferProfile with VOID/ROID obtained from Redis db """ - if check_qos_db_fv_reference_with_table(duthost) == True: + if check_qos_db_fv_reference_with_table(dut_asic) == True: if self.isBufferInApplDb(dut_asic): bufferPoolName = bufferProfile["pool"].encode("utf-8").translate( None, "[]").replace("BUFFER_POOL_TABLE:",'' @@ -213,13 +214,13 @@ def __getBufferProfile(self, request, dut_asic, os_version, table, port, priorit if self.isBufferInApplDb(dut_asic): db = "0" keystr = "{0}:{1}:{2}".format(table, port, priorityGroup) - bufkeystr = "BUFFER_POOL_TABLE:" + bufkeystr = "BUFFER_PROFILE_TABLE:" else: db = "4" keystr = "{0}|{1}|{2}".format(table, port, priorityGroup) - bufkeystr = "BUFFER_POOL|" + bufkeystr = "BUFFER_PROFILE|" - if check_qos_db_fv_reference_with_table(duthost) == True: + if check_qos_db_fv_reference_with_table(dut_asic) == True: bufferProfileName = dut_asic.run_redis_cmd( argv = ["redis-cli", "-n", db, "HGET", keystr, "profile"] )[0].encode("utf-8").translate(None, "[]") @@ -288,7 +289,7 @@ def __getEcnWredParam(self, dut_asic, table, port): Returns: wredProfile (dict): Map of ECN/WRED attributes """ - if check_qos_db_fv_reference_with_table(duthost) == True: + if check_qos_db_fv_reference_with_table(dut_asic) == True: wredProfileName = dut_asic.run_redis_cmd( argv = [ "redis-cli", "-n", "4", "HGET", @@ -344,7 +345,7 @@ def __getSchedulerParam(self, dut_asic, port, queue): Returns: SchedulerParam (dict): Map of scheduler parameters """ - if check_qos_db_fv_reference_with_table(duthost) == True: + if check_qos_db_fv_reference_with_table(dut_asic) == True: schedProfile = dut_asic.run_redis_cmd( argv = [ "redis-cli", "-n", "4", "HGET", @@ -665,7 +666,7 @@ def updateIptables( def stopServices( self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index, swapSyncd, enable_container_autorestart, disable_container_autorestart, - tbinfo, upper_tor_host, lower_tor_host + tbinfo, upper_tor_host, lower_tor_host, toggle_all_simulator_ports ): """ Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start @@ -679,6 +680,7 @@ def stopServices( """ if 'dualtor' in tbinfo['topo']['name']: duthost = upper_tor_host + duthost_lower = lower_tor_host else: duthost = duthosts[rand_one_dut_hostname] @@ -713,25 +715,52 @@ def updateDockerService(host, docker="", action="", service=""): ] feature_list = ['lldp', 'bgp', 'syncd', 'swss'] + if 'dualtor' in tbinfo['topo']['name']: + disable_container_autorestart(duthost_lower, testcase="test_qos_sai", feature_list=feature_list) + disable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list) for service in services: updateDockerService(duthost, action="stop", **service) """ Stop mux container for dual ToR """ if 'dualtor' in tbinfo['topo']['name']: - duthost.shell('sudo systemctl stop mux') - logger.info("Stop mux container for dual ToR testbed") + file = "/usr/local/bin/write_standby.py" + backup_file = "/usr/local/bin/write_standby.py.bkup" + toggle_all_simulator_ports(UPPER_TOR) + + try: + duthost.shell("ls %s" % file) + duthost.shell("sudo cp {} {}".format(file,backup_file)) + duthost.shell("sudo rm {}".format(file)) + duthost.shell("sudo touch {}".format(file)) + except: + pytest.skip('file {} not found'.format(file)) + + duthost_lower.shell('sudo config feature state mux disabled') + duthost.shell('sudo config feature state mux disabled') yield enable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list) + if 'dualtor' in tbinfo['topo']['name']: + enable_container_autorestart(duthost_lower, testcase="test_qos_sai", feature_list=feature_list) + for service in services: updateDockerService(duthost, action="start", **service) """ Start mux conatiner for dual ToR """ if 'dualtor' in tbinfo['topo']['name']: - duthost.shell('sudo systemctl start mux') - logger.info("Start mux container for dual ToR testbed") + try: + duthost.shell("ls %s" % backup_file) + duthost.shell("sudo cp {} {}".format(backup_file,file)) + duthost.shell("sudo chmod +x {}".format(file)) + duthost.shell("sudo rm {}".format(backup_file)) + except: + pytest.skip('file {} not found'.format(backup_file)) + + duthost.shell('sudo config feature state mux enabled') + duthost_lower.shell('sudo config feature state mux enabled') + logger.info("Start mux container for dual ToR testbed") @pytest.fixture(autouse=True) def updateLoganalyzerExceptions(self, rand_one_dut_hostname, loganalyzer): @@ -928,6 +957,14 @@ def populateArpEntries( ptfhost, testCase=saiQosTest, testParams=testParams ) + @pytest.fixture(scope='class', autouse=True) + def dut_disable_ipv6(self, duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=1") + + yield + duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=0") + @pytest.fixture(scope='class', autouse=True) def sharedHeadroomPoolSize( self, request, duthosts, enum_frontend_asic_index, @@ -1294,7 +1331,7 @@ def is_intf_status(asic, intf, oper_state): # wait for port status to change pytest_assert( wait_until( - 10, 1, is_intf_status, frontend_asic, intf, + 10, 1, 0, is_intf_status, frontend_asic, intf, oper_state ), "Failed to update port status {} {}".format( diff --git a/tests/qos/test_buffer.py b/tests/qos/test_buffer.py index 85180a4fe9..4a0b578698 100644 --- a/tests/qos/test_buffer.py +++ b/tests/qos/test_buffer.py @@ -476,7 +476,7 @@ def _ensure_pool_size(duthost, expected_pool_size, expected_shp_size, ingress_lo else: delay = 1 - return wait_until(timeout, delay, _ensure_pool_size, duthost, expected_pool_size, expected_shp_size, ingress_lossless_pool_oid) + return wait_until(timeout, delay, 0, _ensure_pool_size, duthost, expected_pool_size, expected_shp_size, ingress_lossless_pool_oid) def check_pg_profile(duthost, pg, expected_profile, fail_test=True): @@ -498,7 +498,7 @@ def _check_pg_profile(duthost, pg, expected_profile): profile = duthost.shell('redis-cli hget {} profile'.format(pg))['stdout'] return (profile == expected_profile) - if wait_until(10, 2, _check_pg_profile, duthost, pg, expected_profile): + if wait_until(10, 2, 0, _check_pg_profile, duthost, pg, expected_profile): return True else: if fail_test: @@ -518,7 +518,7 @@ def _check_pfc_enable(duthost, port, expected_pfc_enable_map): pfc_enable = duthost.shell('redis-cli -n 4 hget "PORT_QOS_MAP|{}" pfc_enable'.format(port))['stdout'] return (expected_pfc_enable_map == pfc_enable) - pytest_assert(wait_until(10, 2, _check_pfc_enable, duthost, port, expected_pfc_enable_map), + pytest_assert(wait_until(10, 2, 0, _check_pfc_enable, duthost, port, expected_pfc_enable_map), "Port {} pfc enable check failed expected: {} got: {}".format( port, expected_pfc_enable_map, @@ -1173,7 +1173,7 @@ def _check_buffer_profiles_for_shp(duthost, shp_enabled): # Return True only if all lossless profiles pass the check return True - pytest_assert(wait_until(20, 2, _check_buffer_profiles_for_shp, duthost, shp_enabled)) + pytest_assert(wait_until(20, 2, 0, _check_buffer_profiles_for_shp, duthost, shp_enabled)) def test_shared_headroom_pool_configure(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test): @@ -1587,6 +1587,8 @@ def _check_buffer_object_list_aligns_with_expected_ones(port_to_test, table, exp duthost = duthosts[rand_one_dut_hostname] is_qos_db_reference_with_table = check_qos_db_fv_reference_with_table(duthost) original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout'] + raw_lanes_str = duthost.shell('redis-cli -n 4 hget "PORT|{}" lanes'.format(port_to_test))['stdout'] + list_of_lanes = raw_lanes_str.split(',') original_cable_len = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout'] if check_qos_db_fv_reference_with_table(duthost) == True: original_profile = duthost.shell('redis-cli hget "BUFFER_PG_TABLE:{}:3-4" profile'.format(port_to_test))['stdout'][1:-1] @@ -1598,7 +1600,7 @@ def _check_buffer_object_list_aligns_with_expected_ones(port_to_test, table, exp new_cable_len = '15m' - extra_overhead = TESTPARAM_EXTRA_OVERHEAD.get(original_speed) + extra_overhead = TESTPARAM_EXTRA_OVERHEAD.get(str(len(list_of_lanes))) if not extra_overhead: extra_overhead = TESTPARAM_EXTRA_OVERHEAD.get('default') if not extra_overhead: diff --git a/tests/qos/test_buffer_traditional.py b/tests/qos/test_buffer_traditional.py new file mode 100644 index 0000000000..318bb77521 --- /dev/null +++ b/tests/qos/test_buffer_traditional.py @@ -0,0 +1,269 @@ +import logging + +import pytest + +from tests.common.utilities import wait_until +from tests.common.helpers.assertions import pytest_assert + +pytestmark = [ + pytest.mark.topology('any') +] + +DEFAULT_LOSSLESS_PROFILES = None +RECLAIM_BUFFER_ON_ADMIN_DOWN = None + +@pytest.fixture(scope="module", autouse=True) +def setup_module(duthosts, rand_one_dut_hostname): + """Setup module. Called only once when the module is initialized + + Args: + duthosts: The duthosts object + rand_one_dut_hostname: + """ + global RECLAIM_BUFFER_ON_ADMIN_DOWN + + duthost = duthosts[rand_one_dut_hostname] + if duthost.facts["asic_type"] in ["mellanox"]: + RECLAIM_BUFFER_ON_ADMIN_DOWN = True + else: + RECLAIM_BUFFER_ON_ADMIN_DOWN = False + + if "201911" not in duthost.os_version: + pytest.skip("Buffer test runs on 201911 branch only, skip") + + load_lossless_info_from_pg_profile_lookup(duthost) + + +def load_lossless_info_from_pg_profile_lookup(duthost): + """Load pg_profile_lookup.ini to a dictionary. Called only once when the module is initialized + + Args: + duthost: the DUT host object + + Return: + The dictionary containing the information in pg_profile_lookup.ini + """ + global DEFAULT_LOSSLESS_PROFILES + + # Check the threshold mode + threshold_mode = duthost.shell('redis-cli -n 4 hget "BUFFER_POOL|ingress_lossless_pool" mode')['stdout'] + threshold_field_name = 'dynamic_th' if threshold_mode == 'dynamic' else 'static_th' + dut_hwsku = duthost.facts["hwsku"] + dut_platform = duthost.facts["platform"] + skudir = "/usr/share/sonic/device/{}/{}/".format(dut_platform, dut_hwsku) + pg_profile_lookup_file = os.path.join(skudir, 'pg_profile_lookup.ini') + duthost.file(path=pg_profile_lookup_file, state="file") + lines = duthost.shell('cat {}'.format(pg_profile_lookup_file))["stdout_lines"] + DEFAULT_LOSSLESS_PROFILES = {} + for line in lines: + if line[0] == '#': + continue + tokens = line.split() + speed = tokens[0] + cable_length = tokens[1] + size = tokens[2] + xon = tokens[3] + xoff = tokens[4] + threshold = tokens[5] + profile_info = { + 'pool': '[BUFFER_POOL|ingress_lossless_pool]', + 'size': size, + 'xon': xon, + 'xoff': xoff, + threshold_field_name: threshold} + if len(tokens) > 6: + profile_info['xon_offset'] = tokens[6] + DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)] = profile_info + + +def make_dict_from_output_lines(lines): + if lines: + return dict(zip(lines[::2], lines[1::2])) + return None + + +def test_buffer_pg(duthosts, rand_one_dut_hostname, conn_graph_facts): + """The testcase for (traditional) buffer manager + + 1. For all ports in the config_db, + - Check whether there is no lossless buffer PG configured on an admin-down port + - on all paltforms, there is no lossless PG configured on inactive ports which are admin-down + which is guaranteed by buffer template + - Check whether the lossless PG aligns with the port's speed and cable length + - If name to oid maps exist for port and PG, check whether the information in ASIC_DB aligns with that in CONFIG_DB + - If a lossless profile hasn't been checked, check whether lossless profile in CONFIG_DB aligns with + - pg_profile_lookup.ini according to speed and cable length + - information in ASIC_DB + 2. Shutdown a port and check whether the lossless buffer PGs + - has been removed on Mellanox platforms + - will not be changed on other platforms + 3. Startup the port and check whether the lossless PG has been readded. + """ + def _check_condition(condition, message, use_assert): + """Check whether the condition is satisfied + + Args: + condition: The condition to check + message: The message to log or in pytest_assert + use_assert: Whether to use assert or not. If this is called from wait_until(), it should be False. + + Return: + The condition + """ + if use_assert: + pytest_assert(condition, message) + elif not condition: + logging.info("Port buffer check: {}".format(message)) + return False + + return True + + def _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile, use_assert=True): + """Check port's buffer information against CONFIG_DB and ASIC_DB + + Args: + duthost: The duthost object + port: The port to test in string + expected_profile: The expected profile in string + use_assert: Whether or not to use pytest_assert in case any conditional check isn't satisfied + + Return: + A tuple consisting of the OID of buffer profile and whether there is any check failed + """ + profile_in_pg = duthost.shell('redis-cli -n 4 hget "BUFFER_PG|{}|3-4" profile'.format(port))['stdout'] + buffer_profile_oid = None + default_lossless_pgs = ['3', '4'] + + if expected_profile: + if not _check_condition(profile_in_pg == expected_profile, "Buffer profile of lossless PG of port {} isn't the expected ({})".format(port, expected_profile), use_assert): + return None, False + + if pg_name_map: + for pg in default_lossless_pgs: + buffer_pg_asic_oid = pg_name_map['{}:{}'.format(port, pg)] + buffer_pg_asic_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_pg_asic_oid))['stdout'] + buffer_profile_oid_in_pg = duthost.shell('redis-cli -n 1 hget {} SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE'.format(buffer_pg_asic_key))['stdout'] + logging.info("Checking admin-up port {} lossless PG {} in ASIC_DB ({})".format(port, pg, buffer_profile_oid_in_pg)) + if buffer_profile_oid: + if not _check_condition(buffer_profile_oid == buffer_profile_oid_in_pg, + "Different OIDs in PG 3 ({}) and 4 ({}) in port {}".format(buffer_profile_oid, buffer_profile_oid_in_pg, port), + use_assert): + return None, False + else: + buffer_profile_oid = buffer_profile_oid_in_pg + else: + if not _check_condition(not profile_in_pg, "Buffer PG configured on admin down port {}".format(port), use_assert): + return None, False + if pg_name_map: + for pg in default_lossless_pgs: + buffer_pg_asic_oid = pg_name_map['{}:{}'.format(port, pg)] + buffer_pg_asic_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_pg_asic_oid))['stdout'] + buffer_profile_oid_in_pg = duthost.shell('redis-cli -n 1 hget {} SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE'.format(buffer_pg_asic_key))['stdout'] + logging.info("Checking admin-down port {} lossless PG {}".format(port, pg)) + if not _check_condition(not buffer_profile_oid_in_pg or buffer_profile_oid_in_pg == 'oid:0x0', + "Buffer PG configured on admin down port in ASIC_DB {}".format(port), + use_assert): + return None, False + + return buffer_profile_oid, True + + def _check_port_buffer_info_and_return(duthost, port, expected_profile): + """Check port's buffer information against CONFIG_DB and ASIC_DB and return the result + + This is called from wait_until + + Args: + duthost: The duthost object + port: The port to test in string + expected_profile: The expected profile in string + + Return: + Whether all the checks passed + """ + _, result = _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile, False) + return result + + global DEFAULT_LOSSLESS_PROFILES + + duthost = duthosts[rand_one_dut_hostname] + + # Check whether the COUNTERS_PG_NAME_MAP exists. Skip ASIC_DB checking if it isn't + pg_name_map = make_dict_from_output_lines(duthost.shell('redis-cli -n 2 hgetall COUNTERS_PG_NAME_MAP')['stdout'].split()) + cable_length_map = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "CABLE_LENGTH|AZURE"')['stdout'].split()) + + configdb_ports = [x.split('|')[1] for x in duthost.shell('redis-cli -n 4 keys "PORT|*"')['stdout'].split()] + profiles_checked = {} + lossless_pool_oid = None + buffer_profile_asic_info = None + admin_up_ports = set() + for port in configdb_ports: + port_config = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "PORT|{}"'.format(port))['stdout'].split()) + + is_port_up = port_config.get('admin_status') == 'up' + if is_port_up or not RECLAIM_BUFFER_ON_ADMIN_DOWN: + if is_port_up: + admin_up_ports.add(port) + + cable_length = cable_length_map[port] + speed = port_config['speed'] + expected_profile = '[BUFFER_PROFILE|pg_lossless_{}_{}_profile]'.format(speed, cable_length) + + logging.info("Checking admin-{} port {} buffer information: profile {}".format('up' if is_port_up else 'down', port, expected_profile)) + + buffer_profile_oid, _ = _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile) + + if expected_profile not in profiles_checked: + profile_info = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "{}"'.format(expected_profile[1:-1]))['stdout'].split()) + pytest_assert(profile_info == DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)], "Buffer profile {} {} doesn't match default {}".format(expected_profile, profile_info, DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)])) + + logging.info("Checking buffer profile {}: OID: {}".format(expected_profile, buffer_profile_oid)) + if buffer_profile_oid: + # Further check the buffer profile in ASIC_DB + buffer_profile_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_profile_oid))['stdout'] + buffer_profile_asic_info = make_dict_from_output_lines(duthost.shell('redis-cli -n 1 hgetall {}'.format(buffer_profile_key))['stdout'].split()) + pytest_assert(buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_XON_TH'] == profile_info['xon'] and + buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_XOFF_TH'] == profile_info['xoff'] and + buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_RESERVED_BUFFER_SIZE'] == profile_info['size'] and + (buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE'] == 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC' and + buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH'] == profile_info['dynamic_th'] or + buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE'] == 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC' and + buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH'] == profile_info['static_th']), + "Buffer profile {} {} doesn't align with ASIC_TABLE {}".format(expected_profile, profile_info, buffer_profile_asic_info)) + + profiles_checked[expected_profile] = buffer_profile_oid + if not lossless_pool_oid: + if buffer_profile_asic_info: + lossless_pool_oid = buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID'] + else: + pytest_assert(lossless_pool_oid == buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID'], + "Buffer profile {} has different buffer pool id {} from others {}".format(expected_profile, buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID'], lossless_pool_oid)) + else: + pytest_assert(profiles_checked[expected_profile] == buffer_profile_oid, + "PG {}|3-4 has different OID of profile from other PGs sharing the same profile {}".format(port, expected_profile)) + else: + # Port admin down. Make sure no lossless PG configured. + # After deployment, there should not be lossless PG configured on any platforms + # This is guaranteed by buffers_config.j2: no lossless PG will be configured on inactive ports + logging.info("Checking admin-down port buffer information: {}".format(port)) + _, _ = _check_port_buffer_info_and_get_profile_oid(duthost, port, None) + + port_to_shutdown = admin_up_ports.pop() + expected_profile = duthost.shell('redis-cli -n 4 hget "BUFFER_PG|{}|3-4" profile'.format(port_to_shutdown))['stdout'] + try: + # Shutdown the port and check whether the lossless PGs + # - have been removed on Mellanox platforms + # - will not be affected on other platforms + logging.info("Shut down an admin-up port {} and check its buffer information".format(port_to_shutdown)) + duthost.shell('config interface shutdown {}'.format(port_to_shutdown)) + if RECLAIM_BUFFER_ON_ADMIN_DOWN: + expected_profile_admin_down = None + else: + expected_profile_admin_down = expected_profile + wait_until(60, 5, _check_port_buffer_info_and_return, duthost, port_to_shutdown, expected_profile_admin_down) + + # Startup the port and check whether the lossless PG has been reconfigured + logging.info("Re-startup the port {} and check its buffer information".format(port_to_shutdown)) + duthost.shell('config interface startup {}'.format(port_to_shutdown)) + wait_until(60, 5, _check_port_buffer_info_and_return, duthost, port_to_shutdown, expected_profile) + finally: + duthost.shell('config interface startup {}'.format(port_to_shutdown), module_ignore_errors=True) diff --git a/tests/qos/test_pfc_pause.py b/tests/qos/test_pfc_pause.py index dc846eef11..1066c563a3 100644 --- a/tests/qos/test_pfc_pause.py +++ b/tests/qos/test_pfc_pause.py @@ -198,11 +198,7 @@ def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, return results -# For this test, we use the fanout connected to the DUT to send PFC pause frames. -# The fanout needs to send PFC frames fast enough so that the queue remains completely paused for the entire duration of the test. -# The inter packet interval between PFC frames to completely block a queue vary based on link speed and we have seen flakiness in our test runs. -# Since this test is already covered under the 'ixia' folder where we use a traffic generator to generate pause frames, skipping this here. -@pytest.mark.skip(reason="Fanout needs to send PFC frames fast enough to completely pause the queue") + def test_pfc_pause_lossless(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts, fanout_graph_facts, lossless_prio_dscp_map, enum_dut_lossless_prio): diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 1b1cf83ea1..4475d45736 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -115,8 +115,13 @@ def testQosSaiPfcXoffLimit( "pkts_num_trig_ingr_drp": qosConfig[xoffProfile]["pkts_num_trig_ingr_drp"], "hwsku":dutTestParams['hwsku'] }) + + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + if "pkts_num_margin" in qosConfig[xoffProfile].keys(): testParams["pkts_num_margin"] = qosConfig[xoffProfile]["pkts_num_margin"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.PFCtest", testParams=testParams ) @@ -184,10 +189,16 @@ def testQosSaiPfcXonLimit( "pkts_num_leak_out": dutQosConfig["param"][portSpeedCableLength]["pkts_num_leak_out"], "hwsku":dutTestParams['hwsku'] }) + + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + if "pkts_num_hysteresis" in qosConfig[xonProfile].keys(): testParams["pkts_num_hysteresis"] = qosConfig[xonProfile]["pkts_num_hysteresis"] + if "pkts_num_margin" in qosConfig[xonProfile].keys(): testParams["pkts_num_margin"] = qosConfig[xonProfile]["pkts_num_margin"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.PFCXonTest", testParams=testParams ) @@ -255,6 +266,9 @@ def testQosSaiHeadroomPoolSize( if margin: testParams["margin"] = margin + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest", testParams=testParams @@ -318,6 +332,10 @@ def testQosSaiHeadroomPoolWatermark( "max_headroom": sharedHeadroomPoolSize, "hwsku":dutTestParams['hwsku'] }) + + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest", testParams=testParams @@ -434,11 +452,17 @@ def testQosSaiLossyQueue( "pkts_num_trig_egr_drp": qosConfig["lossy_queue_1"]["pkts_num_trig_egr_drp"], "hwsku":dutTestParams['hwsku'] }) + + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + if "packet_size" in qosConfig["lossy_queue_1"].keys(): testParams["packet_size"] = qosConfig["lossy_queue_1"]["packet_size"] testParams["cell_size"] = qosConfig["lossy_queue_1"]["cell_size"] + if "pkts_num_margin" in qosConfig["lossy_queue_1"].keys(): testParams["pkts_num_margin"] = qosConfig["lossy_queue_1"]["pkts_num_margin"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.LossyQueueTest", testParams=testParams @@ -474,6 +498,7 @@ def testQosSaiDscpQueueMapping( "src_port_ip": dutConfig["testPorts"]["src_port_ip"], "hwsku":dutTestParams['hwsku'] }) + self.runPtfTest( ptfhost, testCase="sai_qos_tests.DscpMappingPB", testParams=testParams @@ -514,6 +539,40 @@ def testQosSaiDot1pQueueMapping( testParams=testParams ) + def testQosSaiDot1pPgMapping( + self, ptfhost, dutTestParams, dutConfig + ): + """ + Test QoS SAI Dot1p to PG mapping + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + if "backend" not in dutTestParams["topo"]: + pytest.skip("Dot1p-PG mapping is not supported on {}".format(dutTestParams["topo"])) + + testParams = dict() + testParams.update(dutTestParams["basicParams"]) + testParams.update({ + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "vlan_id": dutConfig["testPorts"]["src_port_vlan"] + }) + self.runPtfTest( + ptfhost, testCase="sai_qos_tests.Dot1pToPgMapping", + testParams=testParams + ) + def testQosSaiDwrr( self, ptfhost, dutTestParams, dutConfig, dutQosConfig, ): @@ -622,6 +681,9 @@ def testQosSaiPgSharedWatermark( "hwsku":dutTestParams['hwsku'] }) + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + if "packet_size" in qosConfig[pgProfile].keys(): testParams["packet_size"] = qosConfig[pgProfile]["packet_size"] @@ -735,8 +797,13 @@ def testQosSaiQSharedWatermark( "cell_size": qosConfig[queueProfile]["cell_size"], "hwsku":dutTestParams['hwsku'] }) + + if "pkts_num_egr_mem" in qosConfig.keys(): + testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] + if "packet_size" in qosConfig[queueProfile].keys(): testParams["packet_size"] = qosConfig[queueProfile]["packet_size"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.QSharedWatermarkTest", testParams=testParams @@ -863,7 +930,7 @@ def test_qos_masic_dscp_queue_mapping( # ensure the test destination IP has a path to backend ASIC pytest_assert( wait_until( - 30, 1, self.check_v4route_backend_nhop, duthost, + 30, 1, 0, self.check_v4route_backend_nhop, duthost, test_params["src_asic"], test_params["dst_port_ip"] ), "Route {} doesn't have backend ASIC nexthop on ASIC {}".format( diff --git a/tests/read_mac/test_read_mac_metadata.py b/tests/read_mac/test_read_mac_metadata.py index 22ee6988b0..64aade175f 100644 --- a/tests/read_mac/test_read_mac_metadata.py +++ b/tests/read_mac/test_read_mac_metadata.py @@ -82,7 +82,7 @@ def run_test_in_reinstall_loop(self): self.deploy_image_to_duthost(duthost, counter) reboot(duthost, localhost, wait=120) logger.info("Wait until system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started") + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") if current_minigraph: logger.info("Execute cli 'config load_minigraph -y' to apply new minigraph") diff --git a/tests/restapi/conftest.py b/tests/restapi/conftest.py index f2aa98bcc7..ca3b923810 100644 --- a/tests/restapi/conftest.py +++ b/tests/restapi/conftest.py @@ -1,6 +1,5 @@ import logging import pytest -import time from tests.common import config_reload import urllib3 from urlparse import urlunparse @@ -8,7 +7,8 @@ from tests.common.helpers.assertions import pytest_require as pyrequire from tests.common.helpers.dut_utils import check_container_state -RESTAPI_SERVER_START_WAIT_TIME = 15 +from helper import apply_cert_config + RESTAPI_CONTAINER_NAME = 'restapi' @pytest.fixture(scope="module", autouse=True) @@ -91,37 +91,7 @@ def setup_restapi_server(duthosts, rand_one_dut_hostname, localhost): duthost.copy(src='restapiserver.crt', dest='/etc/sonic/credentials/testrestapiserver.crt') duthost.copy(src='restapiserver.key', dest='/etc/sonic/credentials/testrestapiserver.key') - # Set client certificate subject name in config DB - dut_command = "redis-cli -n 4 hset \ - 'RESTAPI|certs' \ - 'client_crt_cname' \ - 'test.client.restapi.sonic'" - duthost.shell(dut_command) - - # Set CA cert path in config DB - dut_command = "redis-cli -n 4 hset \ - 'RESTAPI|certs' \ - 'ca_crt' \ - '/etc/sonic/credentials/restapiCA.pem'" - duthost.shell(dut_command) - - # Set server certificate path in config DB - dut_command = "redis-cli -n 4 hset \ - 'RESTAPI|certs' \ - 'server_crt' \ - '/etc/sonic/credentials/testrestapiserver.crt'" - duthost.shell(dut_command) - dut_command = "redis-cli -n 4 hset \ - 'RESTAPI|certs' \ - 'server_key' \ - '/etc/sonic/credentials/testrestapiserver.key'" - duthost.shell(dut_command) - - # Restart RESTAPI server with the updated config - dut_command = "sudo systemctl restart restapi" - duthost.shell(dut_command) - time.sleep(RESTAPI_SERVER_START_WAIT_TIME) - + apply_cert_config(duthost) urllib3.disable_warnings() yield @@ -133,6 +103,7 @@ def setup_restapi_server(duthosts, rand_one_dut_hostname, localhost): restapiclient.*" localhost.shell(local_command) + @pytest.fixture def construct_url(duthosts, rand_one_dut_hostname): def get_endpoint(path): @@ -154,4 +125,7 @@ def vlan_members(duthosts, rand_one_dut_hostname, tbinfo): VLAN_INDEX = 0 mg_facts = duthost.get_extended_minigraph_facts(tbinfo) vlan_interfaces = mg_facts["minigraph_vlans"].values()[VLAN_INDEX]["members"] - return vlan_interfaces + if vlan_interfaces is not None: + return vlan_interfaces + else: + return [] diff --git a/tests/restapi/helper.py b/tests/restapi/helper.py new file mode 100644 index 0000000000..ffc482324f --- /dev/null +++ b/tests/restapi/helper.py @@ -0,0 +1,35 @@ +import time + +RESTAPI_SERVER_START_WAIT_TIME = 15 + +def apply_cert_config(duthost): + # Set client certificate subject name in config DB + dut_command = "redis-cli -n 4 hset \ + 'RESTAPI|certs' \ + 'client_crt_cname' \ + 'test.client.restapi.sonic'" + duthost.shell(dut_command) + + # Set CA cert path in config DB + dut_command = "redis-cli -n 4 hset \ + 'RESTAPI|certs' \ + 'ca_crt' \ + '/etc/sonic/credentials/restapiCA.pem'" + duthost.shell(dut_command) + + # Set server certificate path in config DB + dut_command = "redis-cli -n 4 hset \ + 'RESTAPI|certs' \ + 'server_crt' \ + '/etc/sonic/credentials/testrestapiserver.crt'" + duthost.shell(dut_command) + dut_command = "redis-cli -n 4 hset \ + 'RESTAPI|certs' \ + 'server_key' \ + '/etc/sonic/credentials/testrestapiserver.key'" + duthost.shell(dut_command) + + # Restart RESTAPI server with the updated config + dut_command = "sudo systemctl restart restapi" + duthost.shell(dut_command) + time.sleep(RESTAPI_SERVER_START_WAIT_TIME) \ No newline at end of file diff --git a/tests/restapi/restapi_operations.py b/tests/restapi/restapi_operations.py index b8bfff50ea..732d615678 100644 --- a/tests/restapi/restapi_operations.py +++ b/tests/restapi/restapi_operations.py @@ -20,6 +20,12 @@ def __init__(self, client_cert, client_key): def request(self, method, url, params=None): session = requests.Session() session.headers.update({'Content-type': 'application/json'}) + # Disable proxies explicitly + proxies = { + "http": "", + "https": "" + } + session.proxies.update(proxies) if method == GET: req = requests.Request(GET, url) elif method == POST: @@ -39,6 +45,23 @@ def request(self, method, url, params=None): # # Fundamental operations # + # Reset Status + def get_reset_status(self, construct_url): + path = API_VERSION+"/config/resetstatus" + url = construct_url(path) + if url: + return self.request(GET, url) + else: + logger.error("Malformed URL for "+path+"!") + + def post_reset_status(self, construct_url, params): + path = API_VERSION+"/config/resetstatus" + url = construct_url(path) + if url: + return self.request(POST, url, params) + else: + logger.error("Malformed URL for "+path+"!") + # Decap def post_config_tunnel_decap_tunnel_type(self, construct_url, tunnel_type, params): path = API_VERSION+'/config/tunnel/decap/{tunnel_type}'.format(tunnel_type=tunnel_type) diff --git a/tests/restapi/test_restapi.py b/tests/restapi/test_restapi.py index a8aeb92fa7..3046a57cd6 100644 --- a/tests/restapi/test_restapi.py +++ b/tests/restapi/test_restapi.py @@ -1,10 +1,12 @@ import pytest import time import logging -import requests import json from tests.common.helpers.assertions import pytest_assert +from tests.common import config_reload +from tests.common.reboot import reboot +from helper import apply_cert_config from restapi_operations import Restapi @@ -20,6 +22,64 @@ restapi = Restapi(CLIENT_CERT, CLIENT_KEY) +''' +This test checks for reset status and sets it +''' +def test_check_reset_status(construct_url, duthosts, rand_one_dut_hostname, localhost): + duthost = duthosts[rand_one_dut_hostname] + # Set reset status + logger.info("Checking for RESTAPI reset status") + r = restapi.get_reset_status(construct_url) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + response = r.json() + pytest_assert(response['reset_status'] == "true") + logger.info("Setting RESTAPI reset status") + params = '{"reset_status":"false"}' + r = restapi.post_reset_status(construct_url, params) + pytest_assert(r.status_code == 200) + r = restapi.get_reset_status(construct_url) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + response = r.json() + pytest_assert(response['reset_status'] == "false") + + # Check reset status post config reload + logger.info("Checking for RESTAPI reset status after config reload") + config_reload(duthost) + apply_cert_config(duthost) + r = restapi.get_reset_status(construct_url) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + response = r.json() + pytest_assert(response['reset_status'] == "true") + + # Check reset status post fast reboot + check_reset_status_after_reboot('fast', "false", "true", duthost, localhost, construct_url) + # Check reset status post cold reboot + check_reset_status_after_reboot('cold', "false", "true", duthost, localhost, construct_url) + # Check reset status post warm reboot + check_reset_status_after_reboot('warm', "false", "false", duthost, localhost, construct_url) + +def check_reset_status_after_reboot(reboot_type, pre_reboot_status, post_reboot_status, duthost, localhost, construct_url): + logger.info("Checking for RESTAPI reset status after "+reboot_type+" reboot") + params = '{"reset_status":"false"}' + r = restapi.post_reset_status(construct_url, params) + pytest_assert(r.status_code == 200) + r = restapi.get_reset_status(construct_url) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + response = r.json() + pytest_assert(response['reset_status'] == pre_reboot_status) + reboot(duthost, localhost, reboot_type) + apply_cert_config(duthost) + r = restapi.get_reset_status(construct_url) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + response = r.json() + pytest_assert(response['reset_status'] == post_reboot_status) + + ''' This test creates a default VxLAN Tunnel and two VNETs. It adds VLAN, VLAN member, VLAN neighbor and routes to each VNET ''' @@ -66,6 +126,8 @@ def test_data_path(construct_url, vlan_members): pytest_assert(r.json() == json.loads(expected)) logger.info("VLAN 2000 with ip_prefix: 100.0.10.1/24 under vnet_id: vnet-guid-2 has been successfully created") + if len(vlan_members) < 1: + pytest.skip("No VLAN interface available") vlan_intf = vlan_members[0] logger.info("VLAN Interface: "+vlan_intf) @@ -100,7 +162,8 @@ def test_data_path(construct_url, vlan_members): # Add routes params = '[{"cmd": "add", "ip_prefix": "100.0.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}, \ {"cmd": "add", "ip_prefix": "101.0.20.5/32", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": "1c:34:da:72:b0:8a"}, \ - {"cmd": "add", "ip_prefix": "192.168.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}]' + {"cmd": "add", "ip_prefix": "192.168.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}, \ + {"cmd": "add", "ip_prefix": "100.0.30.0/24", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}]' logger.info("Adding routes with vnid: 7036001 to VNET vnet-guid-2") r = restapi.patch_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-2', params) pytest_assert(r.status_code == 204) @@ -114,11 +177,32 @@ def test_data_path(construct_url, vlan_members): logger.info(r.json()) expected = [{"nexthop": "100.3.152.52", "ip_prefix": "192.168.20.4/32", "vnid": 7036001}, {"nexthop": "100.3.152.52", "ip_prefix": "101.0.20.5/32", "mac_address": "1c:34:da:72:b0:8a", "vnid": 7036001}, - {"nexthop": "100.3.152.52", "ip_prefix": "100.0.20.4/32", "vnid": 7036001}] + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.20.4/32", "vnid": 7036001}, + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.30.0/24", "vnid": 7036001}] for route in expected: pytest_assert(route in r.json()) logger.info("Routes with vnid: 7036001 to VNET vnet-guid-2 have been added successfully") + # Add routes + params = '[{"cmd": "add", "ip_prefix": "100.0.50.4/24", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}, \ + {"cmd": "add", "ip_prefix": "100.0.70.0/16", "nexthop": "100.3.152.52", "vnid": 7036001, "mac_address": null}]' + logger.info("Adding routes with incorrect CIDR addresses with vnid: 7036001 to VNET vnet-guid-2") + r = restapi.patch_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-2', params) + pytest_assert(r.status_code == 207) + + # Verify routes have not been added + # Add some delay before query + time.sleep(5) + params = '{}' + r = restapi.get_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-2', params) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + expected = [{"nexthop": "100.3.152.52", "ip_prefix": "100.0.50.4/24", "vnid": 7036001}, + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.70.0/16", "vnid": 7036001}] + for route in expected: + pytest_assert(route not in r.json()) + logger.info("Routes with incorrect CIDR addresses with vnid: 7036001 to VNET vnet-guid-2 have not been added successfully") + # # Create second VNET and add VLAN, VLAN member, VLAN neighbor and routes to it @@ -186,7 +270,8 @@ def test_data_path(construct_url, vlan_members): # Add routes params = '[{"cmd": "add", "ip_prefix": "100.0.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}, \ {"cmd": "add", "ip_prefix": "101.0.20.5/32", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": "1c:34:da:72:b0:8a"}, \ - {"cmd": "add", "ip_prefix": "192.168.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}]' + {"cmd": "add", "ip_prefix": "192.168.20.4/32", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}, \ + {"cmd": "add", "ip_prefix": "100.0.30.0/24", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}]' logger.info("Adding routes with vnid: 7036002 to VNET vnet-guid-3") r = restapi.patch_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-3', params) pytest_assert(r.status_code == 204) @@ -198,11 +283,32 @@ def test_data_path(construct_url, vlan_members): logger.info(r.json()) expected = [{"nexthop": "100.3.152.52", "ip_prefix": "192.168.20.4/32", "vnid": 7036002}, {"nexthop": "100.3.152.52", "ip_prefix": "101.0.20.5/32", "mac_address": "1c:34:da:72:b0:8a", "vnid": 7036002}, - {"nexthop": "100.3.152.52", "ip_prefix": "100.0.20.4/32", "vnid": 7036002}] + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.20.4/32", "vnid": 7036002}, + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.30.0/24", "vnid": 7036002}] for route in expected: pytest_assert(route in r.json()) logger.info("Routes with vnid: 3000 to VNET vnet-guid-3 have been added successfully") + # Add routes + params = '[{"cmd": "add", "ip_prefix": "100.0.50.4/24", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}, \ + {"cmd": "add", "ip_prefix": "100.0.70.0/16", "nexthop": "100.3.152.52", "vnid": 7036002, "mac_address": null}]' + logger.info("Adding routes with incorrect CIDR addresses with vnid: 7036002 to VNET vnet-guid-3") + r = restapi.patch_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-3', params) + pytest_assert(r.status_code == 207) + + # Verify routes have not been added + # Add some delay before query + time.sleep(5) + params = '{}' + r = restapi.get_config_vrouter_vrf_id_routes(construct_url, 'vnet-guid-3', params) + pytest_assert(r.status_code == 200) + logger.info(r.json()) + expected = [{"nexthop": "100.3.152.52", "ip_prefix": "100.0.50.4/24", "vnid": 7036002}, + {"nexthop": "100.3.152.52", "ip_prefix": "100.0.70.0/16", "vnid": 7036002}] + for route in expected: + pytest_assert(route not in r.json()) + logger.info("Routes with incorrect CIDR addresses with vnid: 7036002 to VNET vnet-guid-3 have not been added successfully") + ''' This test creates a VNET. It adds routes to the VNET and deletes them diff --git a/tests/route/test_default_route.py b/tests/route/test_default_route.py index 9575636fcf..d991fa9396 100644 --- a/tests/route/test_default_route.py +++ b/tests/route/test_default_route.py @@ -1,7 +1,10 @@ import pytest import ipaddress import logging -from tests.common.helpers.assertions import pytest_assert, pytest_require + +from tests.common.helpers.assertions import pytest_assert +from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies + pytestmark = [ pytest.mark.topology('any'), @@ -10,13 +13,11 @@ logger = logging.getLogger(__name__) -def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index, tbinfo): +def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index): """ check if ipv4 and ipv6 default src address match Loopback0 address """ - pytest_require('t1-backend' not in tbinfo['topo']['name'], "Skip this testcase since this topology {} has no default routes".format(tbinfo['topo']['name'])) - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asichost = duthost.asic_instance(enum_asic_index) @@ -48,13 +49,11 @@ def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostna pytest_assert(rtinfo['set_src'] == lo_ipv6.ip, \ "default v6 route set src to wrong IP {} != {}".format(rtinfo['set_src'], lo_ipv6.ip)) -def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index, tbinfo): +def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_asic_index): """ check if ipv6 default route nexthop address uses global address """ - pytest_require('t1-backend' not in tbinfo['topo']['name'], "Skip this testcase since this topology {} has no default routes".format(tbinfo['topo']['name'])) - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asichost = duthost.asic_instance(enum_asic_index) diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py index f5e91a84c2..10f4b2c63a 100644 --- a/tests/route/test_route_perf.py +++ b/tests/route/test_route_perf.py @@ -187,7 +187,7 @@ def _check_num_routes(expected_num_routes): # Check the number of routes in ASIC_DB return count_routes(duthost) == expected_num_routes - if not wait_until(route_timeout, 0.5, _check_num_routes, expected_num_routes): + if not wait_until(route_timeout, 0.5, 0, _check_num_routes, expected_num_routes): pytest.fail('failed to add routes within time limit') # Record time when all routes show up in ASIC_DB diff --git a/tests/route/test_static_route.py b/tests/route/test_static_route.py index e3e62ac8b0..669c03356f 100644 --- a/tests/route/test_static_route.py +++ b/tests/route/test_static_route.py @@ -12,12 +12,13 @@ from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports from tests.common.dualtor.mux_simulator_control import mux_server_url from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor -from tests.common.utilities import wait_until +from tests.common.utilities import wait_until, get_intf_by_sub_intf from tests.common import config_reload import ptf.testutils as testutils import ptf.mask as mask import ptf.packet as packet from pkg_resources import parse_version +from tests.common import constants pytestmark = [ @@ -38,15 +39,15 @@ def is_dualtor(tbinfo): return "dualtor" in tbinfo["topo"]["name"] -def add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False): +def add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_interfaces, ipv6=False): if ipv6: for idx in range(len(nexthop_addrs)): - ptfhost.shell("ip -6 addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_devs[idx]), module_ignore_errors=True) + ptfhost.shell("ip -6 addr add {}/{} dev eth{}".format(nexthop_addrs[idx], prefix_len, nexthop_interfaces[idx]), module_ignore_errors=True) else: vlan_host_map = defaultdict(dict) for idx in range(len(nexthop_addrs)): - mac = ptfadapter.dataplane.get_mac(0, nexthop_devs[idx]).replace(":", "") - vlan_host_map[nexthop_devs[idx]][nexthop_addrs[idx]] = mac + mac = ptfadapter.dataplane.get_mac(0, int(get_intf_by_sub_intf(nexthop_interfaces[idx]))).replace(":", "") + vlan_host_map[nexthop_interfaces[idx]][nexthop_addrs[idx]] = mac arp_responder_conf = {} for port in vlan_host_map: @@ -114,6 +115,11 @@ def generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, expected_po testutils.send(ptfadapter, ptf_t1_intf_index, pkt) testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=expected_ports) +def wait_all_bgp_up(duthost): + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) + if not wait_until(300, 10, 0, duthost.check_bgp_session_state, bgp_neighbors.keys()): + pytest.fail("not all bgp sessions are up after config reload") def check_route_redistribution(duthost, prefix, ipv6, removed=False): if ipv6: @@ -134,20 +140,23 @@ def check_route_redistribution(duthost, prefix, ipv6, removed=False): if matched: bgp_neighbors.append(str(matched.group(0))) - for neighbor in bgp_neighbors: - adv_routes = duthost.shell(SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE.format(neighbor))["stdout"] - if removed: - assert prefix not in adv_routes - else: - assert prefix in adv_routes + def _check_routes(): + for neighbor in bgp_neighbors: + adv_routes = duthost.shell(SHOW_BGP_ADV_ROUTES_CMD_TEMPLATE.format(neighbor))["stdout"] + if removed and prefix in adv_routes: + return False + if not removed and prefix not in adv_routes: + return False + return True + assert(wait_until(60, 15, 0, _check_routes)) -def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_addrs, prefix_len, nexthop_devs, ipv6=False, config_reload_test=False): +def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=False, config_reload_test=False): # Clean up arp or ndp clear_arp_ndp(duthost, ipv6=ipv6) # Add ipaddresses in ptf - add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_devs, ipv6=ipv6) + add_ipaddr(ptfadapter, ptfhost, nexthop_addrs, prefix_len, nexthop_interfaces, ipv6=ipv6) try: # Add static route @@ -165,6 +174,9 @@ def run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, prefix, nexthop_ if config_reload_test: duthost.shell('config save -y') config_reload(duthost, wait=350) + #FIXME: We saw re-establishing BGP sessions can takes around 7 minutes + # on some devices (like 4600) after config reload, so we need below patch + wait_all_bgp_up(duthost) generate_and_verify_traffic(duthost, ptfadapter, tbinfo, ip_dst, nexthop_devs, ipv6=ipv6) check_route_redistribution(duthost, prefix, ipv6) @@ -192,53 +204,60 @@ def get_nexthops(duthost, tbinfo, ipv6=False, count=1): vlan_intf = mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0] prefix_len = vlan_intf['prefixlen'] + is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) if is_dualtor(tbinfo): server_ips = mux_cable_server_ip(duthost) vlan_intfs = natsort.natsorted(server_ips.keys()) nexthop_devs = [mg_facts["minigraph_ptf_indices"][_] for _ in vlan_intfs] server_ip_key = "server_ipv6" if ipv6 else "server_ipv4" nexthop_addrs = [server_ips[_][server_ip_key].split("/")[0] for _ in vlan_intfs] + nexthop_interfaces = nexthop_devs else: vlan_subnet = ipaddress.ip_network(vlan_intf['subnet']) - vlan_ports = mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]['attachto']]['members'] + vlan = mg_facts['minigraph_vlans'][mg_facts['minigraph_vlan_interfaces'][1 if ipv6 else 0]['attachto']] + vlan_ports = vlan['members'] + vlan_id = vlan['vlanid'] vlan_ptf_ports = [mg_facts['minigraph_ptf_indices'][port] for port in vlan_ports] nexthop_devs = vlan_ptf_ports + # backend topology use ethx.x(e.g. eth30.1000) during servers and T0 in ptf + # in other topology use ethx(e.g. eth30) + if is_backend_topology: + nexthop_interfaces = [str(dev) + constants.VLAN_SUB_INTERFACE_SEPARATOR + str(vlan_id) for dev in nexthop_devs] + else: + nexthop_interfaces = nexthop_devs nexthop_addrs = [str(vlan_subnet[i + 2]) for i in range(len(nexthop_devs))] count = min(count, len(nexthop_devs)) indices = random.sample(list(range(len(nexthop_devs))), k=count) - return prefix_len, [nexthop_addrs[_] for _ in indices], [nexthop_devs[_] for _ in indices] + return prefix_len, [nexthop_addrs[_] for _ in indices], [nexthop_devs[_] for _ in indices], [nexthop_interfaces[_] for _ in indices] def test_static_route(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor): duthost = rand_selected_dut skip_201911_and_older(duthost) - prefix_len, nexthop_addrs, nexthop_devs = get_nexthops(duthost, tbinfo) + prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo) run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "1.1.1.0/24", - nexthop_addrs, prefix_len, nexthop_devs) + nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces) def test_static_route_ecmp(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor): duthost = rand_selected_dut skip_201911_and_older(duthost) - prefix_len, nexthop_addrs, nexthop_devs = get_nexthops(duthost, tbinfo, count=3) + prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, count=3) run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2.2.2.0/24", - nexthop_addrs, prefix_len, nexthop_devs, config_reload_test=True) + nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, config_reload_test=True) def test_static_route_ipv6(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor): duthost = rand_selected_dut skip_201911_and_older(duthost) - prefix_len, nexthop_addrs, nexthop_devs = get_nexthops(duthost, tbinfo, ipv6=True) + prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, ipv6=True) run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2000:1::/64", - nexthop_addrs, prefix_len, nexthop_devs, ipv6=True) + nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=True) -# This test case may fail due to a known issue https://github.com/Azure/sonic-buildimage/issues/4930. -# Temporarily disabling the test case due to the this issue. -@pytest.mark.skip(reason="Test case may fail due to a known issue") def test_static_route_ecmp_ipv6(rand_selected_dut, ptfadapter, ptfhost, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor): duthost = rand_selected_dut skip_201911_and_older(duthost) - prefix_len, nexthop_addrs, nexthop_devs = get_nexthops(duthost, tbinfo, ipv6=True, count=3) + prefix_len, nexthop_addrs, nexthop_devs, nexthop_interfaces = get_nexthops(duthost, tbinfo, ipv6=True, count=3) run_static_route_test(duthost, ptfadapter, ptfhost, tbinfo, "2000:2::/64", - nexthop_addrs, prefix_len, nexthop_devs, ipv6=True, config_reload_test=True) + nexthop_addrs, prefix_len, nexthop_devs, nexthop_interfaces, ipv6=True, config_reload_test=True) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index b09b877e7e..20f17bdb42 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -146,6 +146,11 @@ function setup_test_options() done TEST_CASES=$(python -c "print '\n'.join('''${FINAL_CASES}'''.split())") + if [[ -z $TEST_CASES ]]; then + echo "No test case to run based on conditions of '-c', '-I' and '-S'. Please check..." + show_help_and_exit 1 + fi + PYTEST_COMMON_OPTS="--inventory ${INVENTORY} \ --host-pattern ${DUT_NAME} \ --testbed ${TESTBED_NAME} \ diff --git a/tests/sai_qualify/conftest.py b/tests/sai_qualify/conftest.py index aebc6ab603..ef10ef1921 100644 --- a/tests/sai_qualify/conftest.py +++ b/tests/sai_qualify/conftest.py @@ -159,11 +159,11 @@ def start_sai_test_conatiner_with_retry(duthost, container_name): dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] logger.info("Checking the PRC connection before starting the {}.".format(container_name)) - rpc_ready = wait_until(1, 1, _is_rpc_server_ready, dut_ip) + rpc_ready = wait_until(1, 1, 0, _is_rpc_server_ready, dut_ip) if not rpc_ready: logger.info("Attempting to start {}.".format(container_name)) - sai_ready = wait_until(SAI_TEST_CTNR_CHECK_TIMEOUT_IN_SEC, SAI_TEST_CTNR_RESTART_INTERVAL_IN_SEC, _is_sai_test_container_restarted, duthost, container_name) + sai_ready = wait_until(SAI_TEST_CTNR_CHECK_TIMEOUT_IN_SEC, SAI_TEST_CTNR_RESTART_INTERVAL_IN_SEC, 0, _is_sai_test_container_restarted, duthost, container_name) pt_assert(sai_ready, "[{}] sai test container failed to start in {}s".format(container_name, SAI_TEST_CTNR_CHECK_TIMEOUT_IN_SEC)) logger.info("Waiting for another {} second for sai test container warm up.".format(SAI_TEST_CONTAINER_WARM_UP_IN_SEC)) time.sleep(SAI_TEST_CONTAINER_WARM_UP_IN_SEC) @@ -199,7 +199,7 @@ def _is_sai_test_container_restarted(duthost, container_name): logger.info("{} already exists, stop and remove it for a clear restart.".format(container_name)) stop_and_rm_sai_test_container(duthost, container_name) _start_sai_test_container(duthost, container_name) - rpc_ready = wait_until(RPC_RESTART_INTERVAL_IN_SEC, RPC_CHECK_INTERVAL_IN_SEC, _is_rpc_server_ready, dut_ip) + rpc_ready = wait_until(RPC_RESTART_INTERVAL_IN_SEC, RPC_CHECK_INTERVAL_IN_SEC, 0, _is_rpc_server_ready, dut_ip) if not rpc_ready: logger.info("Failed to start up {} for sai testing on DUT, stop it for a restart".format(container_name)) return rpc_ready @@ -437,7 +437,7 @@ def ready_for_sai_test(): return False return True - shutdown_check = wait_until(20, 4, ready_for_sai_test) + shutdown_check = wait_until(20, 4, 0, ready_for_sai_test) if running_services: format_list = ['{:>1}' for item in running_services] servers = ','.join(format_list) diff --git a/tests/saitests/sai_qos_tests.py b/tests/saitests/sai_qos_tests.py index d2e079d607..93f966265b 100644 --- a/tests/saitests/sai_qos_tests.py +++ b/tests/saitests/sai_qos_tests.py @@ -567,11 +567,12 @@ def runTest(self): # dot1p 4 -> pg 4 # dot1p 5 -> pg 0 # dot1p 6 -> pg 0 - # dot1p 7 -> pg 0 + # dot1p 7 -> pg 7 pg_dot1p_map = { - 0 : [0, 1, 2, 5, 6, 7], + 0 : [0, 1, 2, 5, 6], 3 : [3], - 4 : [4] + 4 : [4], + 7 : [7] } print >> sys.stderr, pg_dot1p_map @@ -717,21 +718,31 @@ def runTest(self): else: margin = 2 + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': pkts_num_leak_out = 0 # send packets short of triggering pfc - send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_pfc - 1 - margin) + if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': + # send packets short of triggering pfc + send_packet(self, src_port_id, pkt, pkts_num_egr_mem + pkts_num_leak_out + pkts_num_trig_pfc - 1 - margin) + else: + # send packets short of triggering pfc + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_pfc - 1 - margin) + # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) actual_pkts_num_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] send_packet(self, src_port_id, pkt, actual_pkts_num_leak_out) @@ -932,6 +943,10 @@ def runTest(self): src_port_id, pkt_dst_mac3, dst_port_3_ip, src_port_ip, dst_port_3_id, src_port_vlan ) + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) try: @@ -943,15 +958,21 @@ def runTest(self): # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': pkts_num_leak_out = 0 - send_packet( - self, src_port_id, pkt, - pkts_num_leak_out + pkts_num_trig_pfc - pkts_num_dismiss_pfc - hysteresis - ) + if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': + send_packet( + self, src_port_id, pkt, + pkts_num_egr_mem + pkts_num_leak_out + pkts_num_trig_pfc - pkts_num_dismiss_pfc - hysteresis + ) + else: + send_packet( + self, src_port_id, pkt, + pkts_num_leak_out + pkts_num_trig_pfc - pkts_num_dismiss_pfc - hysteresis + ) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) actual_port_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] send_packet(self, src_port_id, pkt, actual_port_leak_out) @@ -960,21 +981,30 @@ def runTest(self): xmit_2_counters_base, queue_counters = sai_thrift_read_port_counters( self.client, port_list[dst_port_2_id] ) - send_packet( - self, src_port_id, pkt2, - pkts_num_leak_out + margin + pkts_num_dismiss_pfc - 1 + hysteresis - ) + if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': + send_packet( + self, src_port_id, pkt2, + pkts_num_egr_mem + pkts_num_leak_out + margin + pkts_num_dismiss_pfc - 1 + hysteresis + ) + else: + send_packet( + self, src_port_id, pkt2, + pkts_num_leak_out + margin + pkts_num_dismiss_pfc - 1 + hysteresis + ) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': send_packet(self, src_port_id, pkt2, actual_port_leak_out) # send 1 packet to dst port 3, triggering PFC xmit_3_counters_base, queue_counters = sai_thrift_read_port_counters( self.client, port_list[dst_port_3_id] ) - send_packet(self, src_port_id, pkt3, pkts_num_leak_out + 1) + if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': + send_packet(self, src_port_id, pkt3, pkts_num_egr_mem + pkts_num_leak_out + 1) + else: + send_packet(self, src_port_id, pkt3, pkts_num_leak_out + 1) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': send_packet(self, src_port_id, pkt3, actual_port_leak_out) # allow enough time for the dut to sync up the counter values in counters_db @@ -1152,6 +1182,10 @@ def runTest(self): recv_counters_bases = [sai_thrift_read_port_counters(self.client, port_list[sid])[0] for sid in self.src_port_ids] xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[self.dst_port_id]) + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + # Pause egress of dut xmit port sai_thrift_port_tx_disable(self.client, self.asic_type, [self.dst_port_id]) @@ -1164,7 +1198,12 @@ def runTest(self): ip_src=self.src_port_ips[sidx], ip_dst=self.dst_port_ip, ip_ttl=64) - send_packet(self, self.src_port_ids[sidx], pkt, self.pkts_num_leak_out) + + hwsku = self.test_params['hwsku'] + if (hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32'): + send_packet(self, self.src_port_ids[sidx], pkt, pkts_num_egr_mem + self.pkts_num_leak_out) + else: + send_packet(self, self.src_port_ids[sidx], pkt, self.pkts_num_leak_out) # send packets to all pgs to fill the service pool # and trigger PFC on all pgs @@ -1185,6 +1224,7 @@ def runTest(self): pkts_num_trig_pfc = self.pkts_num_trig_pfc else: pkts_num_trig_pfc = self.pkts_num_trig_pfc_shp[i] + send_packet(self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, pkts_num_trig_pfc / self.pkt_size_factor) print >> sys.stderr, "Service pool almost filled" @@ -1540,6 +1580,9 @@ def runTest(self): pkts.append(recv_pkt) except AttributeError: continue + except IndexError: + # Ignore captured non-IP packet + continue queue_pkt_counters = [0] * (prio_list[-1] + 1) queue_num_of_pkts = [0] * (prio_list[-1] + 1) @@ -1655,19 +1698,28 @@ def runTest(self): else: margin = 2 + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': pkts_num_leak_out = 0 # send packets short of triggering egress drop - send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_egr_drp - 1 - margin) + if hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + # send packets short of triggering egress drop + send_packet(self, src_port_id, pkt, pkts_num_egr_mem + pkts_num_leak_out + pkts_num_trig_egr_drp - 1 - margin) + else: + # send packets short of triggering egress drop + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_egr_drp - 1 - margin) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) actual_pkts_num_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] send_packet(self, src_port_id, pkt, actual_pkts_num_leak_out) @@ -1765,11 +1817,18 @@ def runTest(self): # Add slight tolerance in threshold characterization to consider # the case that cpu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed - margin = 2 + if hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + margin = 10 + else: + margin = 2 # Get a snapshot of counter values xmit_counters_base, queue_counters_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) # send packets @@ -1777,18 +1836,21 @@ def runTest(self): # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': pkts_num_leak_out = pkts_num_leak_out - margin # send packets to fill pg min but not trek into shared pool # so if pg min is zero, it directly treks into shared pool by 1 # this is the case for lossy traffic - send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) + if hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + send_packet(self, src_port_id, pkt, pkts_num_egr_mem + pkts_num_leak_out + pkts_num_fill_min) + else: + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) actual_pkts_num_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] if actual_pkts_num_leak_out > pkts_num_leak_out: @@ -2015,6 +2077,10 @@ def runTest(self): # shared test, so the margin here actually means extra capacity margin margin = 8 + # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout + if 'pkts_num_egr_mem' in self.test_params.keys(): + pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + xmit_counters_base, queue_counters_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) @@ -2023,19 +2089,22 @@ def runTest(self): # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': pkts_num_leak_out = pkts_num_leak_out - margin # send packets to fill queue min but not trek into shared pool # so if queue min is zero, it will directly trek into shared pool by 1 # TH2 uses scheduler-based TX enable, this does not require sending packets # to leak out - send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) + if hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + send_packet(self, src_port_id, pkt, pkts_num_egr_mem + pkts_num_leak_out + pkts_num_fill_min) + else: + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) actual_pkts_num_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] if actual_pkts_num_leak_out > pkts_num_leak_out: diff --git a/tests/scripts/add_ip_backend.sh b/tests/scripts/add_ip_backend.sh new file mode 100644 index 0000000000..1fe651117a --- /dev/null +++ b/tests/scripts/add_ip_backend.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +# only seek for sub-interface and add ip for them +for sub_intf in `ls /sys/class/net | grep -E "^eth[0-9]+(\.[0-9]+)$"`; do + # for example, sub_intf be like: eth4.10, then corresponding properties be like: + # port: 4, vlan_id: 10, last_ed: 33, ip: 10.0.0.33/31 + port=`echo $sub_intf|awk -F'eth|\.' '{print $2}'` + vlan_id=`echo $sub_intf|awk -F'eth|\.' '{print $3}'` + last_el=$((25+port*2)) + ip address add 10.0.0.$last_el/31 dev "eth$port.$vlan_id" +done diff --git a/tests/sflow/test_sflow.py b/tests/sflow/test_sflow.py index 602fe8a141..e5cccdcde0 100644 --- a/tests/sflow/test_sflow.py +++ b/tests/sflow/test_sflow.py @@ -450,7 +450,7 @@ def testRebootSflowEnable(self, sflowbase_config, config_sflow_agent, duthost, l verify_show_sflow(duthost,status='up',polling_int=80) duthost.command('sudo config save -y') reboot(duthost, localhost) - assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" verify_show_sflow(duthost,status='up',collector=['collector0','collector1'],polling_int=80) for intf in var['sflow_ports']: var['sflow_ports'][intf]['ifindex'] = get_ifindex(duthost,intf) @@ -475,7 +475,7 @@ def testRebootSflowDisable(self, sflowbase_config, duthost, localhost, partial_p active_collectors="[]" ) duthost.command('sudo config save -y') reboot(duthost, localhost) - assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" verify_show_sflow(duthost,status='down') for intf in var['sflow_ports']: var['sflow_ports'][intf]['ifindex'] = get_ifindex(duthost,intf) @@ -493,7 +493,7 @@ def testFastreboot(self, sflowbase_config, config_sflow_agent, duthost, localhos verify_show_sflow(duthost,status='up',collector=['collector0','collector1']) duthost.command('sudo config save -y') reboot(duthost, localhost,reboot_type='fast') - assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" verify_show_sflow(duthost,status='up',collector=['collector0','collector1']) for intf in var['sflow_ports']: var['sflow_ports'][intf]['ifindex'] = get_ifindex(duthost,intf) @@ -511,7 +511,7 @@ def testWarmreboot(self, sflowbase_config, duthost, localhost, partial_ptf_runne verify_show_sflow(duthost,status='up',collector=['collector0','collector1']) duthost.command('sudo config save -y') reboot(duthost, localhost,reboot_type='warm') - assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" verify_show_sflow(duthost,status='up',collector=['collector0','collector1']) for intf in var['sflow_ports']: var['sflow_ports'][intf]['ifindex'] = get_ifindex(duthost,intf) diff --git a/tests/show_techsupport/tech_support_cmds.py b/tests/show_techsupport/tech_support_cmds.py index cdaccb3964..8c21590344 100644 --- a/tests/show_techsupport/tech_support_cmds.py +++ b/tests/show_techsupport/tech_support_cmds.py @@ -113,6 +113,16 @@ ] docker_cmds = [ + "docker exec -it syncd{} saidump", + "docker stats --no-stream", + "docker ps -a", + "docker top pmon", + "docker exec lldp{} lldpcli show statistics", + "docker logs bgp{}", + "docker logs swss{}", +] + +docker_cmds_201911 = [ "docker exec -it syncd{} saidump", "docker stats --no-stream", "docker ps -a", @@ -168,6 +178,11 @@ "cp .{}/sai.profile", ] +copy_config_cmds_no_qos = [ + "cp .{}/port_config.ini", + "cp .{}/sai.profile", +] + broadcom_cmd_bcmcmd = [ 'bcmcmd{} -t5 version', 'bcmcmd{} -t5 soc', diff --git a/tests/show_techsupport/test_techsupport.py b/tests/show_techsupport/test_techsupport.py index 67a27061e8..8716891fc2 100644 --- a/tests/show_techsupport/test_techsupport.py +++ b/tests/show_techsupport/test_techsupport.py @@ -287,7 +287,7 @@ def test_techsupport(request, config, duthosts, enum_rand_one_per_hwsku_frontend for i in range(loop_range): logger.debug("Running show techsupport ... ") - wait_until(300, 20, execute_command, duthost, str(since)) + wait_until(300, 20, 0, execute_command, duthost, str(since)) tar_file = [j for j in pytest.tar_stdout.split('\n') if j != ''][-1] stdout = duthost.command("rm -rf {}".format(tar_file)) logger.debug("Sleeping for {} seconds".format(loop_delay)) @@ -357,7 +357,7 @@ def commands_to_check(duthosts, enum_rand_one_per_hwsku_frontend_hostname): "nat_cmds": cmds.nat_cmds, "bfd_cmds": add_asic_arg(" -n {}", cmds.bfd_cmds, num), "redis_db_cmds": add_asic_arg("asic{} ", cmds.redis_db_cmds, num), - "docker_cmds": add_asic_arg("{}", cmds.docker_cmds, num), + "docker_cmds": add_asic_arg("{}", cmds.docker_cmds_201911 if '201911' in duthost.os_version else cmds.docker_cmds, num), "misc_show_cmds": add_asic_arg("asic{} ", cmds.misc_show_cmds, num), "misc_cmds": cmds.misc_cmds, } @@ -369,10 +369,22 @@ def commands_to_check(duthosts, enum_rand_one_per_hwsku_frontend_hostname): add_asic_arg(" -n {}", cmds.broadcom_cmd_bcmcmd, num), "broadcom_cmd_misc": add_asic_arg("{}", cmds.broadcom_cmd_misc, num), - "copy_config_cmds": - add_asic_arg("/{}", cmds.copy_config_cmds, num), } ) + if duthost.facts["platform"] in ['x86_64-cel_e1031-r0']: + cmds_to_check.update( + { + "copy_config_cmds": + add_asic_arg("/{}", cmds.copy_config_cmds_no_qos, num), + } + ) + else: + cmds_to_check.update( + { + "copy_config_cmds": + add_asic_arg("/{}", cmds.copy_config_cmds, num), + } + ) # Remove /proc/dma for armh elif duthost.facts["asic_type"] == "marvell": if 'armhf-' in duthost.facts["platform"]: diff --git a/tests/snappi/pfc/test_pfc_pause_lossless_with_snappi.py b/tests/snappi/pfc/test_pfc_pause_lossless_with_snappi.py index e3df1b82fa..deb7921e2c 100644 --- a/tests/snappi/pfc/test_pfc_pause_lossless_with_snappi.py +++ b/tests/snappi/pfc/test_pfc_pause_lossless_with_snappi.py @@ -178,7 +178,7 @@ def test_pfc_pause_single_lossless_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=snappi_api, @@ -243,7 +243,7 @@ def test_pfc_pause_multi_lossless_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=snappi_api, diff --git a/tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py b/tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py index 4930eefe80..c33788391d 100644 --- a/tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py +++ b/tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py @@ -179,7 +179,7 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=snappi_api, @@ -244,7 +244,7 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfc_test(api=snappi_api, diff --git a/tests/snappi/pfcwd/test_pfcwd_basic_with_snappi.py b/tests/snappi/pfcwd/test_pfcwd_basic_with_snappi.py index bca2201004..f265e8a0bd 100644 --- a/tests/snappi/pfcwd/test_pfcwd_basic_with_snappi.py +++ b/tests/snappi/pfcwd/test_pfcwd_basic_with_snappi.py @@ -165,7 +165,7 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=snappi_api, @@ -227,7 +227,7 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) reboot(duthost, localhost, reboot_type=reboot_type) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=snappi_api, @@ -289,7 +289,7 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=snappi_api, @@ -350,7 +350,7 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, duthost.critical_services_fully_started), + pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") run_pfcwd_basic_test(api=snappi_api, diff --git a/tests/snmp/conftest.py b/tests/snmp/conftest.py index 3877404cc4..ec6909c81f 100644 --- a/tests/snmp/conftest.py +++ b/tests/snmp/conftest.py @@ -4,7 +4,7 @@ @pytest.fixture(scope="module", autouse=True) def setup_check_snmp_ready(duthosts): for duthost in duthosts: - assert wait_until(300, 20, duthost.is_service_fully_started, "snmp"), "SNMP service is not running" + assert wait_until(300, 20, 0, duthost.is_service_fully_started, "snmp"), "SNMP service is not running" def pytest_addoption(parser): """ diff --git a/tests/snmp/test_snmp_fdb.py b/tests/snmp/test_snmp_fdb.py new file mode 100644 index 0000000000..af84ce2702 --- /dev/null +++ b/tests/snmp/test_snmp_fdb.py @@ -0,0 +1,106 @@ +import pytest +import ptf.testutils as testutils +import logging +import pprint + +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # lgtm[py/unused-import] +from tests.common.fixtures.duthost_utils import ports_list, vlan_ports_list +from tests.common.utilities import wait_until +from tests.common.helpers.snmp_helpers import get_snmp_facts + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('t0') +] + +# Use original ports intead of sub interfaces for ptfadapter if it's t0-backend +PTF_PORT_MAPPING_MODE = "use_orig_interface" + +DUMMY_MAC_PREFIX = "02:11:22:33" + +def get_fdb_dynamic_mac_count(duthost): + res = duthost.command('show mac') + logger.info('"show mac" output on DUT:\n{}'.format(pprint.pformat(res['stdout_lines']))) + total_mac_count = 0 + for l in res['stdout_lines']: + if "dynamic" in l.lower() and DUMMY_MAC_PREFIX in l.lower(): + total_mac_count += 1 + return total_mac_count + + +def fdb_table_has_no_dynamic_macs(duthost): + return (get_fdb_dynamic_mac_count(duthost) == 0) + + +@pytest.fixture(scope="module", autouse=True) +def fdb_cleanup(duthost): + """ cleanup FDB before test run """ + if fdb_table_has_no_dynamic_macs(duthost): + return + else: + duthost.command('sonic-clear fdb all') + assert wait_until(20, 2, 0, fdb_table_has_no_dynamic_macs, duthost), "FDB Table Cleanup failed" + + +def build_icmp_packet(vlan_id, src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff", + src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64): + + pkt = testutils.simple_icmp_packet(pktlen=100 if vlan_id == 0 else 104, + eth_dst=dst_mac, + eth_src=src_mac, + dl_vlan_enable=False if vlan_id == 0 else True, + vlan_vid=vlan_id, + vlan_pcp=0, + ip_src=src_ip, + ip_dst=dst_ip, + ip_ttl=ttl) + return pkt + + +@pytest.mark.bsl +def test_snmp_fdb_send_tagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor, duthost, localhost, creds_all_duts): + """ + Send tagged packets from each port. + Verify SNMP FDB entry + """ + cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + config_portchannels = cfg_facts.get('PORTCHANNEL', {}) + send_cnt = 0 + send_portchannels_cnt = 0 + for vlan_port in vlan_ports_list: + port_index = vlan_port["port_index"][0] + for permit_vlanid in map(int, vlan_port["permit_vlanid"]): + dummy_mac = '{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, (port_index>>8)&0xFF, port_index&0xFF) + pkt = build_icmp_packet(permit_vlanid, dummy_mac) + logger.info("Send tagged({}) packet from {} ...".format(permit_vlanid, port_index)) + logger.info(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%")) + testutils.send(ptfadapter, port_index, pkt) + send_cnt += 1 + if vlan_port['dev'] in config_portchannels: + send_portchannels_cnt += 1 + # Flush dataplane + ptfadapter.dataplane.flush() + + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts'] + assert 'snmp_fdb' in snmp_facts + assert 'snmp_interfaces' in snmp_facts + dummy_mac_cnt = 0 + recv_portchannels_cnt = 0 + for key in snmp_facts['snmp_fdb']: + # key is string: vlan.mac + items = key.split('.') + if len(items) != 2: + continue + logger.info("FDB entry: {}".format(items)) + if DUMMY_MAC_PREFIX in items[1]: + dummy_mac_cnt += 1 + idx = str(snmp_facts['snmp_fdb'][key]) + assert idx in snmp_facts['snmp_interfaces'] + assert 'name' in snmp_facts['snmp_interfaces'][idx] + if snmp_facts['snmp_interfaces'][idx]['name'] in config_portchannels: + recv_portchannels_cnt += 1 + assert send_cnt == dummy_mac_cnt, "Dummy MAC count does not match" + assert send_portchannels_cnt == recv_portchannels_cnt, "Portchannels count does not match" diff --git a/tests/snmp/test_snmp_phy_entity.py b/tests/snmp/test_snmp_phy_entity.py index 624165341a..d590636571 100644 --- a/tests/snmp/test_snmp_phy_entity.py +++ b/tests/snmp/test_snmp_phy_entity.py @@ -613,9 +613,9 @@ def test_turn_off_psu_and_check_psu_info(duthosts, enum_rand_one_per_hwsku_hostn # turn off the first PSU first_outlet = outlet_status[0] pdu_controller.turn_off_outlet(first_outlet) - assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet, False) + assert wait_until(30, 5, 0, check_outlet_status, pdu_controller, first_outlet, False) # wait for psud update the database - assert wait_until(180, 20, _check_psu_status_after_power_off, duthost, localhost, creds_all_duts) + assert wait_until(180, 20, 0, _check_psu_status_after_power_off, duthost, localhost, creds_all_duts) def _check_psu_status_after_power_off(duthost, localhost, creds_all_duts): diff --git a/tests/span/conftest.py b/tests/span/conftest.py index 4398384762..e5b99e93e0 100644 --- a/tests/span/conftest.py +++ b/tests/span/conftest.py @@ -4,8 +4,11 @@ import pytest +from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies + + @pytest.fixture(scope="module") -def cfg_facts(duthosts, rand_one_dut_hostname): +def cfg_facts(duthosts, rand_one_dut_hostname, skip_test_module_over_backend_topologies): ''' Used to get config facts for selected DUT @@ -55,7 +58,7 @@ def ports_for_test(cfg_facts): @pytest.fixture(scope='module', autouse=True) def skip_unsupported_asic_type(duthost): - SPAN_UNSUPPORTED_ASIC_TYPE = ["broadcom"] + SPAN_UNSUPPORTED_ASIC_TYPE = ["broadcom", "cisco-8000"] if duthost.facts["asic_type"] in SPAN_UNSUPPORTED_ASIC_TYPE: pytest.skip( "Skipping span test on {} platform".format(duthost.facts["asic_type"])) diff --git a/tests/ssh/test_ssh_stress.py b/tests/ssh/test_ssh_stress.py index 3098c487c6..ddea3acd22 100644 --- a/tests/ssh/test_ssh_stress.py +++ b/tests/ssh/test_ssh_stress.py @@ -96,10 +96,10 @@ def work(dut_mgmt_ip, commands, baselines): pytest_assert(duration < 3*baselines[command_ind], "Command {} took more than 3 times as long as baseline".format(commands[command_ind])) - # The commands are executed asyncronously. Reading from stdout will ensure that a command + # The commands are executed asyncronously. Reading from stdout will ensure that a command # is not sent again on the same ssh connection before this one is done. stdout.readlines() - command_ind += 1 if not command_ind else -1 + command_ind += 1 if not command_ind else -1 # Ran in case ACL is still loaded ssh.exec_command(REMOVE_ACL) @@ -114,7 +114,7 @@ def run_post_test_system_check(init_mem, init_cpu, duthost): pytest_assert(max_mem-post_mem > 0.1, "Memory increased by more than 20 points during test and did not reduce from raised value\n\ Initial Value: {}, Max value: {}, Post-Test Value: {}".format(init_mem, max_mem, post_mem)) logging.warning("Memory usage did not reduce to original value after test. Initial Value: {}, Max value: {}, Post-Test Value: {}".format(init_mem, max_mem, post_mem)) - + if post_cpu-init_cpu >= 0.2: pytest_assert(max_cpu-post_cpu > 0.1, "CPU usage increased by more than 20 points during test and did not reduce from raised value\n\ Initial Value: {}, Max value: {}, Post-Test Value: {}".format(init_cpu, max_cpu, post_cpu)) @@ -131,9 +131,7 @@ def get_baseline_time(ssh, command): logging.info("Baseline time for command {} : {} seconds".format(command, tot_time/5)) return tot_time/5 -# This test is not stable, skip it for now. -# known issue: https://github.com/paramiko/paramiko/issues/1508 -@pytest.mark.skip(reason="This test failed intermittent due to known issue of paramiko, skip for now") + def test_ssh_stress(duthosts, rand_one_dut_hostname, setup_teardown): """This test creates several SSH connections that all run different commands. CPU/Memory are tracked throughout""" global done, max_mem, max_cpu @@ -153,13 +151,13 @@ def test_ssh_stress(duthosts, rand_one_dut_hostname, setup_teardown): (SHUTDOWN_INTERFACE, STARTUP_INTERFACE), (CONFIGURE_ACL, REMOVE_ACL), (ADD_ROUTE, REMOVE_ROUTE), - (ADD_PORTCHANNEL, REMOVE_PORTCHANNEL) + (ADD_PORTCHANNEL, REMOVE_PORTCHANNEL) ] logging.info("Collecting baseline times for commands") ssh = start_SSH_connection(dut_mgmt_ip) baseline_times = [tuple((get_baseline_time(ssh, com) for com in pair)) for pair in command_pairs] - + logging.info("Starting system monitoring thread.") # Starts thread that will be monitoring cpu and memory usage monitor_thread = threading.Thread(target=monitor_system, args=(duthost,)) @@ -218,6 +216,6 @@ def test_ssh_stress(duthosts, rand_one_dut_hostname, setup_teardown): done = True monitor_thread.join() - + logging.info("Running post-test system check") run_post_test_system_check(init_mem, init_cpu, duthost) diff --git a/tests/sub_port_interfaces/Sub-ports-test-plan.md b/tests/sub_port_interfaces/Sub-ports-test-plan.md index 0430a43be4..91d13dc673 100644 --- a/tests/sub_port_interfaces/Sub-ports-test-plan.md +++ b/tests/sub_port_interfaces/Sub-ports-test-plan.md @@ -16,6 +16,7 @@ - [test_vlan_config_impact](#Test-case-test_vlan_config_impact) - [test_routing_between_sub_ports](#Test-case-test_routing_between_sub_ports) - [test_routing_between_sub_ports_and_port](#Test-case-test_routing_between_sub_ports_and_port) + - [test_tunneling_between_sub_ports](#Test-case-test_tunneling_between_sub_ports) ## Revision @@ -25,6 +26,7 @@ | 0.2 | 02/23/2021 | Intel: Oleksandr Kozodoi | New test cases | | 0.3 | 03/18/2021 | Intel: Oleksandr Kozodoi | New test cases | | 0.4 | 06/09/2021 | Intel: Oleksandr Kozodoi | New test cases | +| 0.5 | 07/12/2021 | Intel: Oleksandr Kozodoi | New test cases | ## Overview @@ -422,3 +424,79 @@ Example the customized testbed with applied T0 topo for test_routing_between_sub - reload_dut_config function: reload DUT configuration - reload_ptf_config function: remove all sub-ports configuration + +## Test case test_tunneling_between_sub_ports + +### Test objective + +Validates that encap-decap tunnel works over sub-port. + +### Test set up +- apply_config_on_the_dut fixture(scope="function"): enable and configures sub-port interfaces on the DUT +- apply_config_on_the_ptf fixture(scope="function"): enable and configures sub-port interfaces on the PTF +- apply_route_config fixture(scope="function"): add sub-ports to namespace on the PTF +- apply_tunnel_table_to_dut fixture(scope="function"): apply tunnel configuration on the DUT and remove after tests + +Example the customized testbed with applied T0 topo for test_tunneling_between_sub_ports test case: +##### Tunneling between sub-ports on the same port +``` + VM VM VM VM + [] [] [] [] + _______[]____[]____[]____[]______ + ╔═══|══════════╗ | + ║ | _______║ DUT _________ | + ║ | [Ethernet4] [Ethernet8] | + ║ |__[_.10_.20_]_______[_.10_.20_]__| + ║ [ | █ ] [ | | ] + ║ [ | █ ] [ | | ] + ║ ┌────[──|─┐ █ ] ┌────[──|─┐ | ] + ║ │ __[__|_│__█_]__│____[__|_│_|__]__ + ╚═│═|══[>.10│.20 ] │ [.10 │.20 ] | + │ | [__eth1___] │ [__eth2___] | + │ | │ │ │ | + │ |netns4 │ │ netns8 │ | + └─|───────┘ └─────────┘ | + | | + | PTF | + |_________________________________| + +``` +##### Tunneling between sub-ports on different ports +``` + VM VM VM VM + [] [] [] [] + _______[]____[]____[]____[]______ + ╔═══|════════════════════════════╗ | + ║ | _________ DUT _______║_ | + ║ | [Ethernet4] [Ethernet8] | + ║ |__[_.10_.20_]_______[_.10_.20_]__| + ║ [ | | ] [ | █ ] + ║ [ | | ] [ | █ ] + ║ ┌────[──|─┐ | ] ┌────[──|─┐ █ ] + ║ │ __[__|_│_| _]__│____[__|_│__█_]__ + ╚═│═|══[>.10│.20 ] │ [.10 │.20 ] | + │ | [__eth1___] │ [__eth2___] | + │ | │ │ │ | + │ |netns4 │ │ netns8 │ | + └─|───────┘ └─────────┘ | + | | + | PTF | + |_________________________________| + +``` +### Test steps +- Setup configuration of sub-ports on the DUT. +- Setup configuration of sub-ports on the PTF. +- Add one of the sub-ports to namespace on the PTF. +- Setup tunnel configuration on sub-ports of the DUT. +- Create encapsulated packet. +- Send encapsulated packet from sub-port to sub-port in namespace on the PTF. +- Verify that sub-port in namespace gets decapsulated packet on the PTF. +- Remove namespaces from PTF. +- Remove tunnel configuration from PTF. +- Clear configuration of sub-ports on the DUT. +- Clear configuration of sub-ports on the PTF. + +### Test teardown +- reload_dut_config function: reload DUT configuration +- reload_ptf_config function: remove all sub-ports configuration diff --git a/tests/sub_port_interfaces/conftest.py b/tests/sub_port_interfaces/conftest.py index 69e6cbd248..11002aab88 100644 --- a/tests/sub_port_interfaces/conftest.py +++ b/tests/sub_port_interfaces/conftest.py @@ -12,6 +12,7 @@ from sub_ports_helpers import DUT_TMP_DIR from sub_ports_helpers import TEMPLATE_DIR from sub_ports_helpers import SUB_PORTS_TEMPLATE +from sub_ports_helpers import TUNNEL_TEMPLATE from sub_ports_helpers import check_sub_port from sub_ports_helpers import remove_member_from_vlan from sub_ports_helpers import get_port @@ -60,7 +61,13 @@ def skip_unsupported_asic_type(duthost): @pytest.fixture(params=['port', 'port_in_lag']) -def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter): +def port_type(request): + """Port type to test, could be either port or port-channel.""" + return request.param + + +@pytest.fixture +def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter, port_type, tbinfo): """ Define configuration of sub-ports for TC run @@ -68,6 +75,7 @@ def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter): request: pytest request object duthost: DUT host object ptfhost: PTF host object + port_type: Port type to test Yields: Dictonary of sub-port parameters for configuration DUT and PTF host @@ -101,7 +109,7 @@ def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter): # but name of LAG port should have prefix 'PortChannel' and suffix # '<0-9999>' on SONiC. So max length of LAG port suffix have be 3 characters # For example: 'PortChannel1.99' - if 'port_in_lag' in request.param: + if 'port_in_lag' in port_type: vlan_range_end = min(100, max_numbers_of_sub_ports + 11) vlan_ranges_dut = range(11, vlan_range_end) vlan_ranges_ptf = range(11, vlan_range_end) @@ -111,7 +119,12 @@ def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter): prefix = 30 network = ipaddress.ip_network(ip_subnet) - config_port_indices, ptf_ports = get_port(duthost, ptfhost, interface_num, request.param) + # for normal t0, get_port tries to retrieve test ports from vlan members + # let's enforce same behavior for t0-backend + if "t0-backend" in tbinfo["topo"]["name"]: + config_port_indices, ptf_ports = get_port(duthost, ptfhost, interface_num, port_type, exclude_sub_interface_ports=True) + else: + config_port_indices, ptf_ports = get_port(duthost, ptfhost, interface_num, port_type) subnets = [i for i, _ in zip(network.subnets(new_prefix=22), config_port_indices)] @@ -130,7 +143,7 @@ def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter): 'ptf_ports': ptf_ports, 'subnet': network, 'interface_ranges': config_port_indices.keys(), - 'port_type': request.param + 'port_type': port_type } @@ -163,7 +176,7 @@ def apply_config_on_the_dut(define_sub_ports_configuration, duthost, reload_dut_ duthost.copy(content=config_template.render(sub_ports_vars), dest=sub_ports_config_path) duthost.command('sonic-cfggen -j {} --write-to-db'.format(sub_ports_config_path)) - py_assert(wait_until(3, 1, check_sub_port, duthost, sub_ports_vars['sub_ports'].keys()), + py_assert(wait_until(3, 1, 0, check_sub_port, duthost, sub_ports_vars['sub_ports'].keys()), "Some sub-ports were not created") yield sub_ports_vars @@ -229,8 +242,9 @@ def apply_route_config(request, ptfhost, define_sub_ports_configuration, apply_c sub_ports[next_hop_sub_port]['neighbor_port'], sub_ports[next_hop_sub_port]['neighbor_ip']) - add_static_route(ptfhost, src_port_network, sub_ports[next_hop_sub_port]['ip'], name_of_namespace) - add_static_route(ptfhost, dst_port_network, sub_ports[src_port]['ip']) + if 'tunneling' not in request.node.name: + add_static_route(ptfhost, src_port_network, sub_ports[next_hop_sub_port]['ip'], name_of_namespace) + add_static_route(ptfhost, dst_port_network, sub_ports[src_port]['ip']) new_sub_ports[src_port].append((next_hop_sub_port, name_of_namespace)) @@ -245,8 +259,11 @@ def apply_route_config(request, ptfhost, define_sub_ports_configuration, apply_c for next_hop_sub_port in next_hop_sub_ports: sub_port, name_of_namespace = next_hop_sub_port dst_port_network = ipaddress.ip_network(unicode(sub_ports[sub_port]['ip']), strict=False) - remove_static_route(ptfhost, src_port_network, sub_ports[sub_port]['ip'], name_of_namespace) - remove_static_route(ptfhost, dst_port_network, sub_ports[src_port]['ip']) + + if 'tunneling' not in request.node.name: + remove_static_route(ptfhost, src_port_network, sub_ports[sub_port]['ip'], name_of_namespace) + remove_static_route(ptfhost, dst_port_network, sub_ports[src_port]['ip']) + remove_namespace(ptfhost, name_of_namespace) @@ -382,6 +399,42 @@ def apply_route_config_for_port(request, duthost, ptfhost, define_sub_ports_conf remove_vlan(duthost, vlan_id) +@pytest.fixture() +def apply_tunnel_table_to_dut(duthost, apply_route_config): + """ + Apply tunnel configuration on the DUT and remove after tests + + Args: + duthost: DUT host object + apply_route_config: Fixture for applying route configuration on the PTF + """ + tunnel_addr_list = [] + + new_sub_ports = apply_route_config['new_sub_ports'] + sub_ports = apply_route_config['sub_ports'] + + for src_port in new_sub_ports: + tunnel_ip = sub_ports[src_port]['ip'].split('/')[0] + tunnel_addr_list.append(tunnel_ip) + + tunnel_vars = { + 'tunnel_addr_list': tunnel_addr_list + } + + tunnel_config_path = os.path.join(DUT_TMP_DIR, TUNNEL_TEMPLATE) + config_template = jinja2.Template(open(os.path.join(TEMPLATE_DIR, TUNNEL_TEMPLATE)).read()) + + duthost.command("mkdir -p {}".format(DUT_TMP_DIR)) + duthost.copy(content=config_template.render(tunnel_vars), dest=tunnel_config_path) + duthost.command('sonic-cfggen -j {} --write-to-db'.format(tunnel_config_path)) + + yield + + # Teardown + for index in range(1, len(tunnel_addr_list)+1): + duthost.command('docker exec -i database redis-cli -n 4 -c DEL "TUNNEL|MuxTunnel{}"'.format(index)) + + @pytest.fixture def reload_dut_config(request, duthost, define_sub_ports_configuration): """ @@ -396,9 +449,10 @@ def reload_dut_config(request, duthost, define_sub_ports_configuration): sub_ports = define_sub_ports_configuration['sub_ports'] dut_ports = define_sub_ports_configuration['dut_ports'] cfg_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] - - for sub_port, sub_port_info in sub_ports.items(): - remove_sub_port(duthost, sub_port, sub_port_info['ip']) + existing_sub_ports = cfg_facts.get("VLAN_SUB_INTERFACE", {}) + for sub_port in sub_ports: + if sub_port in existing_sub_ports: + remove_sub_port(duthost, sub_port, sub_ports[sub_port]['ip']) py_assert(check_sub_port(duthost, sub_ports.keys(), True), "Some sub-port were not deleted") @@ -409,7 +463,7 @@ def reload_dut_config(request, duthost, define_sub_ports_configuration): duthost.shell('sudo config load -y /etc/sonic/config_db.json') -@pytest.fixture(autouse=True) +@pytest.fixture def reload_ptf_config(request, ptfhost, define_sub_ports_configuration): """ PTF's configuration reload on teardown diff --git a/tests/sub_port_interfaces/sub_ports_helpers.py b/tests/sub_port_interfaces/sub_ports_helpers.py index 85778a9364..d50d3c9adb 100644 --- a/tests/sub_port_interfaces/sub_ports_helpers.py +++ b/tests/sub_port_interfaces/sub_ports_helpers.py @@ -1,5 +1,6 @@ import os import time +import random from collections import OrderedDict @@ -19,13 +20,14 @@ DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR)) TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates') SUB_PORTS_TEMPLATE = 'sub_port_config.j2' +TUNNEL_TEMPLATE = 'tunnel_config.j2' ACTION_FWD = 'fwd' ACTION_DROP = 'drop' TCP_PORT = 80 UDP_PORT = 161 -def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, tr_type, ttl, dl_vlan_enable=False, icmp_type=8, pktlen=100): +def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, tr_type, ttl, dl_vlan_enable=False, icmp_type=8, pktlen=100, ip_tunnel=None): """ Generate packet to send. @@ -40,6 +42,7 @@ def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, tr_type, ttl, dl_v dl_vlan_enable: True if the packet is with vlan, False otherwise icmp_type: ICMP type pktlen: packet length + ip_tunnel: Tunnel IP address of DUT Returns: simple packet """ @@ -52,10 +55,20 @@ def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, tr_type, ttl, dl_v elif 'ICMP' in tr_type: return testutils.simple_icmp_packet(eth_dst=eth_dst, eth_src=eth_src, ip_dst=ip_dst, ip_src=ip_src, icmp_type=icmp_type, vlan_vid=vlan_vid, dl_vlan_enable=dl_vlan_enable, ip_ttl=ttl, pktlen=pktlen) + elif 'decap' in tr_type: + inner_dscp = random.choice(range(0, 33)) + inner_ttl = random.choice(range(3, 65)) + + inner_packet = testutils.simple_tcp_packet(ip_dst=ip_dst, ip_src=ip_src, tcp_sport=TCP_PORT, tcp_dport=TCP_PORT, ip_ttl=inner_ttl, + ip_dscp=inner_dscp)[packet.IP] + + return testutils.simple_ipv4ip_packet(eth_dst=eth_dst, eth_src=eth_src, ip_src='1.1.1.1', ip_dst=ip_tunnel, ip_dscp=inner_dscp, ip_ttl=64, + vlan_vid=vlan_vid, dl_vlan_enable=dl_vlan_enable, inner_frame=inner_packet) return None -def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action=None, type_of_traffic=None, ttl=64, pktlen=100): +def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action=None, + type_of_traffic=None, ttl=64, pktlen=100, ip_tunnel=None, **kwargs): """ Send packet from PTF to DUT and verify that DUT sends/doesn't packet to PTF. @@ -71,6 +84,7 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, type_of_traffic: Type of traffic ttl: Time to live pktlen: packet length + ip_tunnel: Tunnel IP address of DUT """ if not type_of_traffic: type_of_traffic = ['ICMP',] @@ -79,7 +93,9 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, if 'TCP' in tr_type or 'UDP' in tr_type: generate_and_verify_tcp_udp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, tr_type, pktlen, ttl) elif 'ICMP' in tr_type: - generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action, tr_type, ttl) + generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action, tr_type, ttl, untagged_icmp_request=kwargs.pop("untagged_icmp_request", False)) + elif 'decap' in tr_type: + generate_and_verify_decap_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, tr_type, ip_tunnel) else: pytest.skip('Unsupported type of traffic') @@ -154,7 +170,7 @@ def generate_and_verify_tcp_udp_traffic(duthost, ptfadapter, src_port, dst_port, pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) -def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action, tr_type, ttl=64): +def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action, tr_type, ttl=64, untagged_icmp_request=False): """ Send ICMP request packet from PTF to DUT and verify that DUT sends/doesn't send ICMP reply packet to PTF. @@ -169,6 +185,7 @@ def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip pkt_action: Packet action (forwarded or drop) tr_type: Type of traffic (TCP or UDP) ttl: Time to live + untagged_icmp_request: send untagged ICMP request if True """ vlan_vid = None dl_vlan_enable = False @@ -189,7 +206,7 @@ def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip ip_src=ip_src, ip_dst=ip_dst, vlan_vid=vlan_vid, - dl_vlan_enable=dl_vlan_enable, + dl_vlan_enable=not untagged_icmp_request and dl_vlan_enable, tr_type=tr_type, ttl=64) @@ -220,6 +237,61 @@ def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip testutils.verify_no_packet_any(ptfadapter, masked_exp_pkt, dst_port_list) +def generate_and_verify_decap_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, tr_type, ip_tunnel=None): + """ + Send encapsulated packet from PTF to DUT and + verify that DUT sends/doesn't send TCP/UDP packet to PTF. + + Args: + duthost: DUT host object + ptfadapter: PTF adapter + src_port: Source port of PTF + dst_port: Destination port of PTF + ip_src: Source IP address of PTF + ip_dst: Destination IP address of PTF + tr_type: Type of traffic (TCP or UDP) + ip_tunnel: Tunnel IP address of DUT + """ + router_mac = duthost.facts['router_mac'] + src_port_number = int(get_port_number(src_port)) + dst_port_number = int(get_port_number(dst_port)) + + ip_src = ip_src.split('/')[0] + ip_dst = ip_dst.split('/')[0] + ip_tunnel = ip_tunnel.split('/')[0] + + # Define encapsulated packet + pkt = create_packet(eth_dst=router_mac, + eth_src=ptfadapter.dataplane.get_mac(0, src_port_number), + ip_src=ip_src, + ip_dst=ip_dst, + ip_tunnel=ip_tunnel, + vlan_vid=int(src_port.split('.')[1]), + dl_vlan_enable=True, + tr_type=tr_type, + ttl=64) + + # Build expected packet + inner_packet = pkt[packet.IP].payload[packet.IP].copy() + exp_pkt = Ether(src=router_mac, dst=ptfadapter.dataplane.get_mac(0, dst_port_number)) / Dot1Q(vlan=int(dst_port.split('.')[1])) / inner_packet + exp_pkt['IP'].ttl -= 1 + + update_dut_arp_table(duthost, ip_dst) + ptfadapter.dataplane.flush() + + testutils.send_packet(ptfadapter, src_port_number, pkt) + + pkt_filter = FilterPktBuffer(ptfadapter=ptfadapter, + exp_pkt=exp_pkt, + dst_port_number=dst_port_number, + match_fields=[("802.1Q", "vlan"), ("Ethernet", "src"), ("Ethernet", "dst"), ("IP", "src"), ("IP", "dst")], + ignore_fields=[]) + + pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() + + pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) + + def shutdown_port(duthost, interface): """ Shutdown port on the DUT @@ -229,7 +301,7 @@ def shutdown_port(duthost, interface): interface: Interface of DUT """ duthost.shutdown(interface) - pytest_assert(wait_until(3, 1, __check_interface_state, duthost, interface, 'down'), + pytest_assert(wait_until(3, 1, 0, __check_interface_state, duthost, interface, 'down'), "DUT's port {} didn't go down as expected".format(interface)) @@ -242,7 +314,7 @@ def startup_port(duthost, interface): interface: Interface of DUT """ duthost.no_shutdown(interface) - pytest_assert(wait_until(3, 1, __check_interface_state, duthost, interface), + pytest_assert(wait_until(3, 1, 0, __check_interface_state, duthost, interface), "DUT's port {} didn't go up as expected".format(interface)) @@ -276,7 +348,7 @@ def setup_vlan(duthost, vlan_id): """ duthost.shell('config vlan add %s' % vlan_id) - pytest_assert(wait_until(3, 1, __check_vlan, duthost, vlan_id), + pytest_assert(wait_until(3, 1, 0, __check_vlan, duthost, vlan_id), "VLAN RIF Vlan{} didn't create as expected".format(vlan_id)) @@ -294,8 +366,10 @@ def __check_vlan(duthost, vlan_id, removed=False): """ vlan_name = 'Vlan{}'.format(vlan_id) out = duthost.shell('redis-cli -n 4 keys "VLAN|{}"'.format(vlan_name))["stdout"] + if removed: return vlan_name not in out + return vlan_name in out @@ -314,8 +388,10 @@ def __check_vlan_member(duthost, vlan_id, vlan_member, removed=False): """ vlan_name = 'Vlan{}'.format(vlan_id) out = duthost.shell('redis-cli -n 4 keys "VLAN_MEMBER|{}|{}"'.format(vlan_name, vlan_member))["stdout"] + if removed: return vlan_name not in out + return vlan_name in out @@ -329,7 +405,7 @@ def remove_vlan(duthost, vlan_id): """ duthost.shell('config vlan del {}'.format(vlan_id)) - pytest_assert(wait_until(3, 1, __check_vlan, duthost, vlan_id, True), + pytest_assert(wait_until(3, 1, 0, __check_vlan, duthost, vlan_id, True), "VLAN RIF Vlan{} didn't remove as expected".format(vlan_id)) @@ -344,7 +420,7 @@ def remove_member_from_vlan(duthost, vlan_id, vlan_member): """ if __check_vlan_member(duthost, vlan_id, vlan_member): duthost.shell('config vlan member del {} {}'.format(vlan_id, vlan_member)) - pytest_assert(wait_until(3, 1, __check_vlan_member, duthost, vlan_id, vlan_member, True), + pytest_assert(wait_until(3, 1, 0, __check_vlan_member, duthost, vlan_id, vlan_member, True), "VLAN RIF Vlan{} have {} member".format(vlan_id, vlan_member)) @@ -755,7 +831,7 @@ def add_member_to_vlan(duthost, vlan_id, vlan_member): """ if not __check_vlan_member(duthost, vlan_id, vlan_member): duthost.shell('config vlan member add {} {}'.format(vlan_id, vlan_member)) - pytest_assert(wait_until(3, 1, __check_vlan_member, duthost, vlan_id, vlan_member), + pytest_assert(wait_until(3, 1, 0, __check_vlan_member, duthost, vlan_id, vlan_member), "VLAN RIF Vlan{} doesn't have {} member".format(vlan_id, vlan_member)) @@ -770,3 +846,14 @@ def remove_sub_port_from_ptf(ptfhost, sub_port, ip): """ ptfhost.shell("ip address del {} dev {}".format(ip, sub_port)) ptfhost.shell("ip link del {}".format(sub_port)) + + +def update_dut_arp_table(duthost, ip): + """ + Add entry to DUT ARP table + + Args: + duthost: DUT host object + ip: IP address of directly connected interface + """ + duthost.command("ping {} -c 3".format(ip), module_ignore_errors=True) diff --git a/tests/sub_port_interfaces/templates/tunnel_config.j2 b/tests/sub_port_interfaces/templates/tunnel_config.j2 new file mode 100644 index 0000000000..ef5a1af046 --- /dev/null +++ b/tests/sub_port_interfaces/templates/tunnel_config.j2 @@ -0,0 +1,14 @@ +{ + "TUNNEL": { +{% for ip in tunnel_addr_list %} + "{{ "MuxTunnel" + loop.index|string }}": { + "dst_ip" : "{{ ip }}", + "ttl_mode": "pipe", + "ecn_mode": "copy_from_outer", + "dscp_mode": "pipe", + "tunnel_type": "IPINIP" + }{% if not loop.last %},{% endif %} + +{% endfor %} + } +} diff --git a/tests/sub_port_interfaces/test_show_subinterface.py b/tests/sub_port_interfaces/test_show_subinterface.py new file mode 100644 index 0000000000..70bea0c7e9 --- /dev/null +++ b/tests/sub_port_interfaces/test_show_subinterface.py @@ -0,0 +1,109 @@ +import json +import pytest + +from tests.common import constants +from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import wait_until + + +pytestmark = [ + pytest.mark.topology("t0", "t1") +] + + +@pytest.fixture +def subintf_expected_config(duthost, apply_config_on_the_dut): + """Return expected config of the subinterfaces created.""" + subinterfaces = apply_config_on_the_dut["sub_ports"] + show_int_status = {intf["interface"]: intf for intf in duthost.show_and_parse("show interface status")} + + for subinterface, config in subinterfaces.items(): + interface, vlan = subinterface.split(constants.VLAN_SUB_INTERFACE_SEPARATOR) + config["speed"] = show_int_status[interface]["speed"] + config["mtu"] = show_int_status[interface]["mtu"] + config["vlan"] = vlan + + return subinterfaces + + +# limit the port_type to port +@pytest.mark.parametrize("port_type", ["port"]) +def test_subinterface_status(duthost, subintf_expected_config): + """ + Verify subinterface status after creation/deletion. + + @param duthost: fixture duthost + @param subintf_expected_config: fixture subintf_expected_config to get expected sub interfaces configuration + + 1. add new subinterfaces + 2. verify new subinterfaces status via `show subinterface status` + 3. verify new subinterfaces IP address via `show ip/ipv6 interfaces` + 4. verify no syslog error during creation + 5. remove new subinterfaces + 6. verify subinterface removal via `show subinterface status` + 7. verify subinterface IP address removal via `show ip/ipv6 interfaces` + 8. verify no syslog error during removal + """ + + def _verify_subintf_creation(subintf_config, success_show_sub_status): + """Verify subintf existence after creation.""" + show_sub_status = {_["sub port interface"]: _ for _ in duthost.show_and_parse("show subinterfaces status") + if _["sub port interface"] in subintf_config} + if len(show_sub_status) == len(subintf_config): + success_show_sub_status.append(show_sub_status) + return True + return False + + def _verify_subintf_removal(subintf_config): + """Verify subintf existence after removal.""" + show_sub_status = {_["sub port interface"]: _ for _ in duthost.show_and_parse("show subinterfaces status") + if _["sub port interface"] in subintf_config} + return len(show_sub_status) == 0 + + def _remove_subintf(subintf_config): + """Remove the created subintf from VLAN_SUB_INTERFACE table.""" + for subintf in subintf_config: + entries = json.loads(duthost.shell("redis-dump -d 4 -k \"VLAN_SUB_INTERFACE|%s*\"" % subintf)["stdout"]) + for entry in entries: + duthost.shell("redis-cli -n 4 del \"%s\"" % entry) + + # creation verification + success_show_sub_status = [] + if not wait_until(20, 5, 0, _verify_subintf_creation, subintf_expected_config, success_show_sub_status): + pytest.fail("Failed to create subinterfaces") + + show_sub_status = success_show_sub_status[0] + show_ip_interfaces = {_["interface"]: _ for _ in duthost.show_and_parse("show ip interface") + if _["interface"] in subintf_expected_config} + + for subintf, config in subintf_expected_config.items(): + # verify show subinterface status after creation + status = show_sub_status[subintf] + pytest_assert(status.get("admin") == "up", "subinterface %s should be admin up" % subintf) + pytest_assert(status.get("vlan") == config["vlan"], + "subinterface %s should have vlan %s, actual vlan %s" % (subintf, config["vlan"], status.get("vlan"))) + pytest_assert(status.get("speed") == config["speed"], + "subinterface %s should have inherited speed as %s, actual speed %s" % (subintf, config["speed"], status.get("speed"))) + pytest_assert(status.get("mtu") == config["mtu"], + "subinterface %s should have inherited mtu as %s, actual mtu %s" % (subintf, config["mtu"], status.get("mtu"))) + pytest_assert(status.get("type") == "802.1q-encapsulation", + "subinterface %s should have type as 802.1q-encapsulation, actual type %s" % (subintf, status.get("type"))) + + # verify show ip interface status after creation + if subintf not in show_ip_interfaces: + pytest.fail("subinterface %s doesn't have IP address assigned as expected" % subintf) + ip_status = show_ip_interfaces[subintf] + pytest_assert(ip_status.get("ipv4 address/mask") == config["ip"], + "subinterface %s should have IP address assigned as %s, actual IP address %s" % (subintf, config["ip"], ip_status.get("ipv4 address/mask"))) + + # deletion verification + _remove_subintf(subintf_expected_config) + if not wait_until(20, 5, 0, _verify_subintf_removal, subintf_expected_config): + pytest.fail("Failed to remove subinterfaces") + + show_ip_interfaces = {_["interface"]: _ for _ in duthost.show_and_parse("show ip interface") + if _["interface"] in subintf_expected_config} + + for subintf in subintf_expected_config: + # verify show ip interface status after removal + pytest_assert(subintf not in show_ip_interfaces, "subinterface %s still have IP address assigned" % subintf) diff --git a/tests/sub_port_interfaces/test_sub_port_interfaces.py b/tests/sub_port_interfaces/test_sub_port_interfaces.py index ec9904156f..cf299903c9 100644 --- a/tests/sub_port_interfaces/test_sub_port_interfaces.py +++ b/tests/sub_port_interfaces/test_sub_port_interfaces.py @@ -13,6 +13,7 @@ from sub_ports_helpers import setup_vlan from sub_ports_helpers import remove_vlan from sub_ports_helpers import check_sub_port +from sub_ports_helpers import remove_sub_port pytestmark = [ @@ -78,6 +79,34 @@ def test_packet_routed_with_invalid_vlan(self, duthost, ptfadapter, apply_config ip_dst=value['ip'], pkt_action='drop') + @pytest.mark.parametrize("port_type", ["port"]) + def test_untagged_packet_not_routed(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf): + """ + Validates that untagged packet aren't routed. + + Test steps: + 1.) Setup configuration of sub-ports on the DUT. + 2.) Setup configuration of sub-ports on the PTF. + 3.) Create untagged ICMP packet. + 4.) Send untagged ICMP request packet from PTF to DUT. + 5.) Verify that DUT doesn't sends ICMP reply packet to PTF. + 6.) Clear configuration of sub-ports on the DUT. + 7.) Clear configuration of sub-ports on the DUT. + + Pass Criteria: PTF doesn't gets ICMP reply packet from DUT. + """ + sub_ports = apply_config_on_the_dut["sub_ports"] + + for sub_port, config in sub_ports.items(): + generate_and_verify_traffic(duthost=duthost, + ptfadapter=ptfadapter, + src_port=config["neighbor_port"], + dst_port=sub_port, + ip_src=config["neighbor_ip"], + ip_dst=config["ip"], + pkt_action="drop", + untagged_icmp_request=True + ) def test_admin_status_down_disables_forwarding(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf): """ @@ -273,6 +302,60 @@ def test_routing_between_sub_ports(self, type_of_traffic, duthost, ptfadapter, a type_of_traffic=type_of_traffic, ttl=63) + @pytest.mark.parametrize("type_of_traffic, port_type", [["TCP-UDP-ICMP", "PORT"]]) + def test_routing_between_sub_ports_unaffected_by_sub_ports_removal(self, type_of_traffic, duthost, ptfadapter, apply_route_config): + """ + Validates that the routing of packets between sub-ports are not affected by the removal of other sub ports. + + Test steps: + Test steps: + 1.) Setup configuration of sub-ports on the DUT. + 2.) Setup configuration of sub-ports on the PTF. + 3.) Add one of the sub-ports to namespace on the PTF. + 4.) Setup static routes between sub-port and sub-port in namespace on the PTF + 5.) Create packet (TCP, UDP or ICMP). + 6.) Remove some other sub ports + 7.) Send packet from sub-port to sub-port in namespace on the PTF. + 8.) Verify that sub-port gets received packet on the PTF. + 9.) Remove static routes from PTF + 10.) Remove namespaces from PTF + 11.) Clear configuration of sub-ports on the DUT. + 12.) Clear configuration of sub-ports on the PTF. + + Note: + Test verifies two cases of routing between sub-ports: + 1.) Routing between sub-ports on the same port + 2.) Routing between sub-ports on the different ports + + Pass Criteria: PTF port gets packets from port in namespace on the PTF. + """ + new_sub_ports = apply_route_config['new_sub_ports'] + sub_ports = apply_route_config['sub_ports'] + type_of_traffic = type_of_traffic.split('-') + + # find to-be-removed sub ports + sub_ports_to_remove = set(sub_ports.keys()) + for sub_port, next_hop_sub_ports in new_sub_ports.items(): + sub_ports_to_remove.remove(sub_port) + for next_hop_sub_port, _ in next_hop_sub_ports: + sub_ports_to_remove.remove(next_hop_sub_port) + + # remove those to-be-removed sub ports + for sub_port in sub_ports_to_remove: + remove_sub_port(duthost, sub_port, sub_ports[sub_port]["ip"]) + + for src_port, next_hop_sub_ports in new_sub_ports.items(): + for sub_port, _ in next_hop_sub_ports: + generate_and_verify_traffic(duthost=duthost, + ptfadapter=ptfadapter, + src_port=sub_ports[src_port]['neighbor_port'], + ip_src=sub_ports[src_port]['neighbor_ip'], + dst_port=sub_ports[sub_port]['neighbor_port'], + ip_dst=sub_ports[sub_port]['neighbor_ip'], + pkt_action='fwd', + type_of_traffic=type_of_traffic, + ttl=63) + @pytest.mark.parametrize("type_of_traffic", ['TCP-UDP-ICMP',]) def test_routing_between_sub_ports_and_port(self, request, type_of_traffic, duthost, ptfadapter, apply_route_config_for_port): @@ -321,3 +404,39 @@ def test_routing_between_sub_ports_and_port(self, request, type_of_traffic, duth type_of_traffic=type_of_traffic, ttl=63, pktlen=pktlen) + + + def test_tunneling_between_sub_ports(self, duthost, ptfadapter, apply_tunnel_table_to_dut, apply_route_config): + """ + Validates that packets are routed between sub-ports. + + Test steps: + 1.) Setup configuration of sub-ports on the DUT. + 2.) Setup configuration of sub-ports on the PTF. + 3.) Add one of the sub-ports to namespace on the PTF. + 4.) Setup tunnel configuration on sub-ports of the DUT. + 5.) Create encapsulated packet. + 6.) Send encapsulated packet from sub-port to sub-port in namespace on the PTF. + 7.) Verify that sub-port in namespace gets decapsulated packet on the PTF. + 8.) Remove namespaces from PTF. + 9.) Remove tunnel configuration from PTF. + 10.) Clear configuration of sub-ports on the DUT. + 11.) Clear configuration of sub-ports on the PTF. + + Pass Criteria: PTF port gets decapsulated packet from port in namespace on the PTF. + """ + new_sub_ports = apply_route_config['new_sub_ports'] + sub_ports = apply_route_config['sub_ports'] + + for src_port, next_hop_sub_ports in new_sub_ports.items(): + for sub_port, _ in next_hop_sub_ports: + generate_and_verify_traffic(duthost=duthost, + ptfadapter=ptfadapter, + src_port=sub_ports[src_port]['neighbor_port'], + ip_src=sub_ports[src_port]['neighbor_ip'], + dst_port=sub_ports[sub_port]['neighbor_port'], + ip_dst=sub_ports[sub_port]['neighbor_ip'], + ip_tunnel=sub_ports[src_port]['ip'], + pkt_action='fwd', + type_of_traffic=['decap',], + ttl=63) diff --git a/tests/sub_port_interfaces/test_sub_port_l2_forwarding.py b/tests/sub_port_interfaces/test_sub_port_l2_forwarding.py new file mode 100644 index 0000000000..e688248807 --- /dev/null +++ b/tests/sub_port_interfaces/test_sub_port_l2_forwarding.py @@ -0,0 +1,124 @@ +import pytest +import random +import logging +import contextlib +import time +import tempfile + +from ptf import testutils +from scapy.all import * +from tests.common import constants +from tests.common import utilities +from tests.common.helpers.assertions import pytest_assert + + +pytestmark = [ + pytest.mark.topology("t0") +] + + +PTF_PORT_MAPPING_MODE = "use_orig_interface" +PACKET_PAYLOAD_FINGERPRINT = "SUBPORTL2TESTING" +PACKET_SAVE_PATH = "/tmp/eth_packets.pcap" +PACKET_COUNT = 1000 +TIME_WAIT_AFTER_SENDING_PACKET = 10 + + +@pytest.fixture +def testbed_params(define_sub_ports_configuration, duthosts, rand_one_dut_hostname, tbinfo): + """Collect test params.""" + testbed_params = define_sub_ports_configuration["sub_ports"].copy() + duthost = duthosts[rand_one_dut_hostname] + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + for sub_port, config in testbed_params.items(): + port, vlanid = sub_port.split(constants.VLAN_SUB_INTERFACE_SEPARATOR) + config["port"] = port + config["vlanid"] = vlanid + config["neighbor_ptf_index"] = mg_facts["minigraph_ptf_indices"][port] + return testbed_params + + +@pytest.fixture +def test_sub_port(testbed_params): + """Select a test sub port.""" + test_sub_port = random.choice(testbed_params.keys()) + logging.info("Select test sub port %s", test_sub_port) + return test_sub_port + + +@pytest.fixture +def generate_eth_packets(test_sub_port, testbed_params, ptfadapter): + """Generate Ethernet packets that will be sent to test sub port to verify L2 forwarding.""" + + def _simple_tagged_eth_packet(eth_dst, eth_src, vlanid): + pkt = Ether(src=eth_src, dst=eth_dst) + pkt /= Dot1Q(vlan=vlanid) + pkt /= ("0" * (60 - len(pkt)) + PACKET_PAYLOAD_FINGERPRINT) + return pkt + + # first packet has a dummy MAC dst MAC, second packet has a broadcast dst MAC + eth_dsts = ["00:01:02:03:04:05", "ff:ff:ff:ff:ff:ff"] + + # select the MAC address of a different sub port's neighbor server port as third packet dst MAC + dst_sub_port = [_ for _ in testbed_params if testbed_params[test_sub_port]["port"] not in _][0] + dst_port_neighbor_ptf_index = testbed_params[dst_sub_port]["neighbor_ptf_index"] + dst_port_ptf_mac = ptfadapter.dataplane.get_mac(0, dst_port_neighbor_ptf_index) + eth_dsts.append(dst_port_ptf_mac) + + # generate test packets + test_port_neighbor_ptf_index = testbed_params[test_sub_port]["neighbor_ptf_index"] + test_port_vlan_id = testbed_params[test_sub_port]["vlanid"] + eth_src = ptfadapter.dataplane.get_mac(0, test_port_neighbor_ptf_index) + + packets = [] + for eth_dst in eth_dsts: + packets.append(_simple_tagged_eth_packet(eth_src, eth_dst, int(test_port_vlan_id))) + + return packets + + +# limit the port_type to port +@pytest.mark.parametrize("port_type", ["port"]) +def test_sub_port_l2_forwarding(apply_config_on_the_dut, duthosts, rand_one_dut_hostname, test_sub_port, generate_eth_packets, testbed_params, ptfadapter): + """Verify sub port doesn't have L2 forwarding capability.""" + + @contextlib.contextmanager + def check_no_cpu_packets(duthost, port, packet_fingerprint): + start_pcap = "tcpdump -i %s -w %s" % (port, PACKET_SAVE_PATH) + stop_pcap = "pkill -f '%s'" % start_pcap + start_pcap = "nohup %s &" % start_pcap + + duthost.shell(start_pcap) + try: + yield + finally: + time.sleep(1.0) + duthost.shell(stop_pcap, module_ignore_errors=True) + + with tempfile.NamedTemporaryFile() as tmp_pcap: + duthost.fetch(src=PACKET_SAVE_PATH, dest=tmp_pcap.name, flat=True) + received_packets = sniff(offline=tmp_pcap.name) + + logging.debug("Packets received from port %s:", port) + for i, pkt in enumerate(received_packets): + logging.debug("%d: %s" % (i, utilities.dump_scapy_packet_show_output(pkt))) + + packets_with_fingerprint = [_ for _ in received_packets if packet_fingerprint in str(_)] + pytest_assert(len(packets_with_fingerprint) == 0, "Received packets with fingerprint %s" % packet_fingerprint) + + def verify_no_packet_received(ptfadapter, ports, packet_fingerprint): + for port in ports: + for packet, _ in ptfadapter.dataplane.packet_queues[(0, port)]: + if packet_fingerprint in packet: + logging.error("Received packet with fingerprint '%s' on port %s: %s\n", port, packet_fingerprint, packet) + pytest.fail("Received packet on port %s" % port) + + duthost = duthosts[rand_one_dut_hostname] + packets = generate_eth_packets + ptf_ports_to_check = list(set(_["neighbor_ptf_index"] for _ in testbed_params.values())) + ptfadapter.dataplane.flush() + for packet in packets: + with check_no_cpu_packets(duthost, test_sub_port, PACKET_PAYLOAD_FINGERPRINT): + testutils.send(ptfadapter, testbed_params[test_sub_port]["neighbor_ptf_index"], packet, count=PACKET_COUNT) + time.sleep(TIME_WAIT_AFTER_SENDING_PACKET) + verify_no_packet_received(ptfadapter, ptf_ports_to_check, PACKET_PAYLOAD_FINGERPRINT) diff --git a/tests/syslog/test_syslog.py b/tests/syslog/test_syslog.py index a500abf60b..f4547f3e95 100644 --- a/tests/syslog/test_syslog.py +++ b/tests/syslog/test_syslog.py @@ -1,7 +1,9 @@ import logging import pytest +import os +import time -from tests.common.helpers.assertions import pytest_assert +from scapy.all import rdpcap logger = logging.getLogger(__name__) @@ -12,6 +14,25 @@ DUT_PCAP_FILEPATH = "/tmp/test_syslog_tcpdump.pcap" DOCKER_TMP_PATH = "/tmp/" +# If any dummy IP type doesn't have a matching default route, skip test for this parametrize +def check_dummy_addr_and_default_route(dummy_ip_a, dummy_ip_b, has_v4_default_route, has_v6_default_route): + skip_v4 = False + skip_v6 = False + + if dummy_ip_a is not None and ":" not in dummy_ip_a and not has_v4_default_route: + skip_v4 = True + if dummy_ip_a is not None and ":" in dummy_ip_a and not has_v6_default_route: + skip_v6 = True + + if dummy_ip_b is not None and ":" not in dummy_ip_b and not has_v4_default_route: + skip_v4 = True + if dummy_ip_b is not None and ":" in dummy_ip_b and not has_v6_default_route: + skip_v6 = True + + if skip_v4 | skip_v6: + proto = "IPv4" if skip_v4 else "IPv6" + pytest.skip("DUT has no matching default route for dummy syslog ips: ({}, {}), has no {} default route".format(dummy_ip_a, dummy_ip_b, proto)) + # Check pcap file for the destination IPs def _check_pcap(dummy_ip_a, dummy_ip_b, filepath): is_ok_a = False @@ -32,14 +53,42 @@ def _check_pcap(dummy_ip_a, dummy_ip_b, filepath): if is_ok_a and is_ok_b: return True + missed_ip = [] + if not is_ok_a: + missed_ip.append(dummy_ip_a) + if not is_ok_b: + missed_ip.append(dummy_ip_b) + logger.error("Pcap file doesn't contain dummy syslog ips: ({})".format(", ".join(missed_ip))) return False -@pytest.mark.parametrize("dummy_syslog_server_ip_a, dummy_syslog_server_ip_b", [("10.0.80.166", None), ("fd82:b34f:cc99::100", None), ("10.0.80.165", "10.0.80.166"), ("fd82:b34f:cc99::100", "10.0.80.166"), ("fd82:b34f:cc99::100", "fd82:b34f:cc99::200")]) -def test_syslog(duthosts, enum_rand_one_per_hwsku_frontend_hostname, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b): - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] +# Before real test, check default route on DUT: +# If DUT has no IPv4 and IPv6 default route, skip syslog test. If DUT has at least one type default route, tell test_syslog function to do further check +@pytest.fixture(scope="module") +def check_default_route(rand_selected_dut): + duthost = rand_selected_dut + ret = {'IPv4': False, 'IPv6': False} + + logger.info("Checking DUT default route") + result = duthost.shell("ip route show default | grep via", module_ignore_errors=True)['rc'] + if result == 0: + ret['IPv4'] = True + result = duthost.shell("ip -6 route show default | grep via", module_ignore_errors=True)['rc'] + if result == 0: + ret['IPv6'] = True + + if not ret['IPv4'] and not ret['IPv6']: + pytest.skip("DUT has no default route, skiped") + + yield ret + +@pytest.mark.parametrize("dummy_syslog_server_ip_a, dummy_syslog_server_ip_b", [("7.0.80.166", None), ("fd82:b34f:cc99::100", None), ("7.0.80.165", "7.0.80.166"), ("fd82:b34f:cc99::100", "7.0.80.166"), ("fd82:b34f:cc99::100", "fd82:b34f:cc99::200")]) +def test_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route): + duthost = rand_selected_dut logger.info("Starting syslog tests") test_message = "Basic Test Message" + check_dummy_addr_and_default_route(dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route['IPv4'], check_default_route['IPv6']) + logger.info("Configuring the DUT") # Add dummy rsyslog destination for testing if dummy_syslog_server_ip_a is not None: @@ -50,7 +99,12 @@ def test_syslog(duthosts, enum_rand_one_per_hwsku_frontend_hostname, dummy_syslo logger.debug("Added new rsyslog server IP {}".format(dummy_syslog_server_ip_b)) logger.info("Start tcpdump") - tcpdump_task, tcpdump_result = duthost.shell("sudo timeout 20 tcpdump -i any -s0 -A -w {} \"udp and port 514\"".format(DUT_PCAP_FILEPATH), module_async=True) + # Make sure that the DUT_PCAP_FILEPATH dose not exist + duthost.shell("sudo rm -f {}".format(DUT_PCAP_FILEPATH)) + # Scapy doesn't support LINUX_SLL2 (Linux cooked v2), and tcpdump on Bullseye + # defaults to writing in that format when listening on any interface. Therefore, + # have it use LINUX_SLL (Linux cooked) instead. + tcpdump_task, tcpdump_result = duthost.shell("sudo timeout 20 tcpdump -y LINUX_SLL -i any -s0 -A -w {} \"udp and port 514\"".format(DUT_PCAP_FILEPATH), module_async=True) # wait for starting tcpdump time.sleep(5) @@ -71,5 +125,12 @@ def test_syslog(duthosts, enum_rand_one_per_hwsku_frontend_hostname, dummy_syslo duthost.fetch(src=DUT_PCAP_FILEPATH, dest=DOCKER_TMP_PATH) filepath = os.path.join(DOCKER_TMP_PATH, duthost.hostname, DUT_PCAP_FILEPATH.lstrip(os.path.sep)) - pytest_assert(_check_pcap(dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, filepath), - "Dummy syslog server IP not seen in the pcap file") \ No newline at end of file + if not _check_pcap(dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, filepath): + default_route_v4 = duthost.shell("ip route show default")['stdout'] + logger.debug("DUT's IPv4 default route:\n%s" % default_route_v4) + default_route_v6 = duthost.shell("ip -6 route show default")['stdout'] + logger.debug("DUT's IPv6 default route:\n%s" % default_route_v6) + syslog_config = duthost.shell("grep 'remote syslog server' -A 7 /etc/rsyslog.conf")['stdout'] + logger.debug("DUT's syslog server IPs:\n%s" % syslog_config) + + pytest.fail("Dummy syslog server IP not seen in the pcap file") diff --git a/tests/system_health/test_system_health.py b/tests/system_health/test_system_health.py index 9ad6fba272..44903fbed1 100644 --- a/tests/system_health/test_system_health.py +++ b/tests/system_health/test_system_health.py @@ -281,7 +281,7 @@ def test_system_health_config(duthosts, enum_rand_one_per_hwsku_hostname, device def wait_system_health_boot_up(duthost): boot_timeout = get_system_health_config(duthost, 'boot_timeout', DEFAULT_BOOT_TIMEOUT) - assert wait_until(boot_timeout, 10, redis_table_exists, duthost, STATE_DB, HEALTH_TABLE_NAME), \ + assert wait_until(boot_timeout, 10, 0, redis_table_exists, duthost, STATE_DB, HEALTH_TABLE_NAME), \ 'System health service is not working' diff --git a/tests/system_health/test_system_status.py b/tests/system_health/test_system_status.py new file mode 100644 index 0000000000..719c03be7a --- /dev/null +++ b/tests/system_health/test_system_status.py @@ -0,0 +1,18 @@ +import time + +import pytest + +from tests.common.utilities import wait_until + +pytestmark = [ + pytest.mark.topology('any') +] + + +def test_system_is_running(duthost): + def is_system_ready(duthost): + status = duthost.shell('sudo systemctl is-system-running', module_ignore_errors=True)['stdout'] + return status != "starting" + + if not wait_until(180, 10, 0, is_system_ready, duthost): + pytest.fail('Failed to find routed interface in 180 s') diff --git a/tests/tacacs/conftest.py b/tests/tacacs/conftest.py new file mode 100644 index 0000000000..133b144b2c --- /dev/null +++ b/tests/tacacs/conftest.py @@ -0,0 +1,28 @@ +import pytest +from .utils import setup_tacacs_client, setup_tacacs_server, cleanup_tacacs + +@pytest.fixture(scope="module") +def check_tacacs(ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + tacacs_server_ip = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] + setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip) + setup_tacacs_server(ptfhost, creds_all_duts, duthost) + + yield + + cleanup_tacacs(ptfhost, duthost, tacacs_server_ip) + + +@pytest.fixture(scope="module") +def check_tacacs_v6(ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + ptfhost_vars = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars + if 'ansible_hostv6' not in ptfhost_vars: + pytest.skip("Skip IPv6 test. ptf ansible_hostv6 not configured.") + tacacs_server_ip = ptfhost_vars['ansible_hostv6'] + setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip) + setup_tacacs_server(ptfhost, creds_all_duts, duthost) + + yield + + cleanup_tacacs(ptfhost, duthost, tacacs_server_ip) \ No newline at end of file diff --git a/tests/tacacs/test_jit_user.py b/tests/tacacs/test_jit_user.py index 876a31c334..526fcff39c 100644 --- a/tests/tacacs/test_jit_user.py +++ b/tests/tacacs/test_jit_user.py @@ -1,8 +1,7 @@ import pytest from tests.common.helpers.assertions import pytest_assert -from tests.common.plugins.tacacs import setup_tacacs_server from .test_ro_user import ssh_remote_run -from .utils import check_output +from .utils import check_output, setup_tacacs_server pytestmark = [ pytest.mark.disable_loganalyzer, @@ -12,7 +11,7 @@ logger = logging.getLogger(__name__) -def test_jit_user(localhost, duthosts, ptfhost, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_jit_user(localhost, duthosts, ptfhost, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): """check jit user. netuser -> netadmin -> netuser""" duthost = duthosts[enum_rand_one_per_hwsku_hostname] diff --git a/tests/tacacs/test_ro_disk.py b/tests/tacacs/test_ro_disk.py index 806113bc3f..e56fd7dfbb 100644 --- a/tests/tacacs/test_ro_disk.py +++ b/tests/tacacs/test_ro_disk.py @@ -2,7 +2,6 @@ import crypt import json import logging -import time from pkg_resources import parse_version from tests.common.utilities import wait_until @@ -30,7 +29,7 @@ def check_disk_ro(duthost): def simulate_ro(duthost): duthost.shell("echo u > /proc/sysrq-trigger") logger.info("Disk turned to RO state; pause for 30s before attempting to ssh") - assert wait_until(30, 2, check_disk_ro, duthost), "disk not in ro state" + assert wait_until(30, 2, 0, check_disk_ro, duthost), "disk not in ro state" def chk_ssh_remote_run(localhost, remote_ip, username, password, cmd): @@ -65,7 +64,7 @@ def do_reboot(duthost, localhost, dutip, rw_user, rw_pass): wait(wait_time, msg="Wait {} seconds for system to be stable.".format(wait_time)) -def test_ro_disk(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_ro_disk(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): """test tacacs rw user """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] @@ -100,14 +99,14 @@ def test_ro_disk(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_al logger.debug("user={}".format(ro_user)) - assert wait_until(600, 20, chk_ssh_remote_run, localhost, dutip, + assert wait_until(600, 20, 0, chk_ssh_remote_run, localhost, dutip, ro_user, ro_pass, "cat /etc/passwd"), "Failed to ssh as ro user" finally: logger.debug("START: reboot {} to restore disk RW state". format(enum_rand_one_per_hwsku_hostname)) do_reboot(duthost, localhost, dutip, rw_user, rw_pass) - assert wait_until(600, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(600, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" logger.debug(" END: reboot {} to restore disk RW state". format(enum_rand_one_per_hwsku_hostname)) diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py index 763e210e58..32814ca8eb 100644 --- a/tests/tacacs/test_ro_user.py +++ b/tests/tacacs/test_ro_user.py @@ -72,7 +72,7 @@ def wait_for_tacacs(localhost, remote_ip, username, password): else: current_attempt += 1 -def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], @@ -80,7 +80,7 @@ def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_al check_output(res, 'test', 'remote_user') -def test_ro_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs_v6): +def test_ro_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs_v6): duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] res = ssh_remote_run(localhost, dutip, creds_all_duts[duthost]['tacacs_ro_user'], @@ -88,7 +88,7 @@ def test_ro_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, cre check_output(res, 'test', 'remote_user') -def test_ro_user_allowed_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_ro_user_allowed_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutip = duthost.host.options["inventory_manager"].get_host(duthost.hostname).vars["ansible_host"] @@ -157,7 +157,7 @@ def test_ro_user_allowed_command(localhost, duthosts, enum_rand_one_per_hwsku_ho " 'sudo sonic-installer list' is banned") -def test_ro_user_banned_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_ro_user_banned_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] diff --git a/tests/tacacs/test_rw_user.py b/tests/tacacs/test_rw_user.py index 654c367d47..a382455eae 100644 --- a/tests/tacacs/test_rw_user.py +++ b/tests/tacacs/test_rw_user.py @@ -11,7 +11,7 @@ ] -def test_rw_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs): +def test_rw_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs): """test tacacs rw user """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] @@ -21,7 +21,7 @@ def test_rw_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_al check_output(res, 'testadmin', 'remote_user_su') -def test_rw_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, test_tacacs_v6): +def test_rw_user_ipv6(localhost, duthosts, enum_rand_one_per_hwsku_hostname, creds_all_duts, check_tacacs_v6): """test tacacs rw user """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] diff --git a/tests/tacacs/utils.py b/tests/tacacs/utils.py index b8f3ae77f5..e8b79fb15a 100644 --- a/tests/tacacs/utils.py +++ b/tests/tacacs/utils.py @@ -1,9 +1,76 @@ +import crypt +import logging + +from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert +logger = logging.getLogger(__name__) def check_output(output, exp_val1, exp_val2): pytest_assert(not output['failed'], output['stderr']) for l in output['stdout_lines']: fds = l.split(':') if fds[0] == exp_val1: - pytest_assert(fds[4] == exp_val2) \ No newline at end of file + pytest_assert(fds[4] == exp_val2) + +def check_all_services_status(ptfhost): + res = ptfhost.command("service --status-all") + logger.info(res["stdout_lines"]) + + +def start_tacacs_server(ptfhost): + ptfhost.command("service tacacs_plus restart", module_ignore_errors=True) + return "tacacs+ running" in ptfhost.command("service tacacs_plus status", module_ignore_errors=True)["stdout_lines"] + + +def setup_tacacs_client(duthost, creds_all_duts, tacacs_server_ip): + """setup tacacs client""" + + # configure tacacs client + duthost.shell("sudo config tacacs passkey %s" % creds_all_duts[duthost]['tacacs_passkey']) + + # get default tacacs servers + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + for tacacs_server in config_facts.get('TACPLUS_SERVER', {}): + duthost.shell("sudo config tacacs delete %s" % tacacs_server) + duthost.shell("sudo config tacacs add %s" % tacacs_server_ip) + duthost.shell("sudo config tacacs authtype login") + + # enable tacacs+ + duthost.shell("sudo config aaa authentication login tacacs+") + + +def setup_tacacs_server(ptfhost, creds_all_duts, duthost): + """setup tacacs server""" + + # configure tacacs server + extra_vars = {'tacacs_passkey': creds_all_duts[duthost]['tacacs_passkey'], + 'tacacs_rw_user': creds_all_duts[duthost]['tacacs_rw_user'], + 'tacacs_rw_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_rw_user_passwd'], 'abc'), + 'tacacs_ro_user': creds_all_duts[duthost]['tacacs_ro_user'], + 'tacacs_ro_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_ro_user_passwd'], 'abc'), + 'tacacs_jit_user': creds_all_duts[duthost]['tacacs_jit_user'], + 'tacacs_jit_user_passwd': crypt.crypt(creds_all_duts[duthost]['tacacs_jit_user_passwd'], 'abc'), + 'tacacs_jit_user_membership': creds_all_duts[duthost]['tacacs_jit_user_membership']} + + ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) + ptfhost.template(src="tacacs/tac_plus.conf.j2", dest="/etc/tacacs+/tac_plus.conf") + ptfhost.lineinfile(path="/etc/default/tacacs+", line="DAEMON_OPTS=\"-d 10 -l /var/log/tac_plus.log -C /etc/tacacs+/tac_plus.conf\"", regexp='^DAEMON_OPTS=.*') + check_all_services_status(ptfhost) + + # FIXME: This is a short term mitigation, we need to figure out why the tacacs+ server does not start + # reliably all of a sudden. + wait_until(5, 1, 0, start_tacacs_server, ptfhost) + check_all_services_status(ptfhost) + + +def cleanup_tacacs(ptfhost, duthost, tacacs_server_ip): + # stop tacacs server + ptfhost.service(name="tacacs_plus", state="stopped") + check_all_services_status(ptfhost) + + # reset tacacs client configuration + duthost.shell("sudo config tacacs delete %s" % tacacs_server_ip) + duthost.shell("sudo config tacacs default passkey") + duthost.shell("sudo config aaa authentication login default") + duthost.shell("sudo config aaa authentication failthrough default") diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index d0e7344c30..d5e2b2fc54 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -93,7 +93,7 @@ def setup_streaming_telemetry(duthosts, rand_one_dut_hostname, localhost, ptfho setup_telemetry_forpyclient(duthost) # Wait until telemetry was restarted - pytest_assert(wait_until(100, 10, duthost.is_service_fully_started, "telemetry"), "TELEMETRY not started.") + pytest_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, "telemetry"), "TELEMETRY not started.") logger.info("telemetry process restarted. Now run pyclient on ptfdocker") # Wait until the TCP port was opened diff --git a/tests/templates/ptf_nn_agent.conf.dut.j2 b/tests/templates/ptf_nn_agent.conf.dut.j2 index ba0245578b..62433b5668 100644 --- a/tests/templates/ptf_nn_agent.conf.dut.j2 +++ b/tests/templates/ptf_nn_agent.conf.dut.j2 @@ -1,5 +1,5 @@ [program:ptf_nn_agent] -command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 1@tcp://0.0.0.0:10900 -i 1-{{ nn_target_port }}@{{ nn_target_interface }} --set-nn-rcv-buffer=609430400 --set-iface-rcv-buffer=609430400 --set-nn-snd-buffer=609430400 --set-iface-snd-buffer=609430400 +command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 1@tcp://0.0.0.0:10900 -i 1-{{ nn_target_port }}@{{ nn_target_interface }}{% if nn_target_vlanid %}.{{nn_target_vlanid}}{% else %}{% endif %} --set-nn-rcv-buffer=609430400 --set-iface-rcv-buffer=609430400 --set-nn-snd-buffer=609430400 --set-iface-snd-buffer=609430400 process_name=ptf_nn_agent stdout_logfile=/tmp/ptf_nn_agent.out.log stderr_logfile=/tmp/ptf_nn_agent.err.log diff --git a/tests/templates/ptf_nn_agent.conf.ptf.j2 b/tests/templates/ptf_nn_agent.conf.ptf.j2 index bb1282bc4a..4fb677f3b7 100644 --- a/tests/templates/ptf_nn_agent.conf.ptf.j2 +++ b/tests/templates/ptf_nn_agent.conf.ptf.j2 @@ -1,5 +1,6 @@ [program:ptf_nn_agent] -command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 0@tcp://127.0.0.1:10900 -i 0-{{ nn_target_port }}@eth{{ nn_target_port }} +command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 0@tcp://127.0.0.1:10900 -i 0-{{ nn_target_port }}@eth{{ nn_target_port }}{% if nn_target_vlanid %}.{{nn_target_vlanid}}{% else %}{% endif %} + process_name=ptf_nn_agent stdout_logfile=/tmp/ptf_nn_agent.out.log stderr_logfile=/tmp/ptf_nn_agent.err.log diff --git a/tests/test_pretest.py b/tests/test_pretest.py index 54623a1a6d..a8d4ba7869 100644 --- a/tests/test_pretest.py +++ b/tests/test_pretest.py @@ -41,13 +41,13 @@ def test_features_state(duthosts, enum_dut_hostname, localhost): """ duthost = duthosts[enum_dut_hostname] logger.info("Checking the state of each feature in 'CONFIG_DB' ...") - if not wait_until(180, FEATURE_STATE_VERIFYING_INTERVAL_SECS, verify_features_state, duthost): + if not wait_until(180, FEATURE_STATE_VERIFYING_INTERVAL_SECS, 0, verify_features_state, duthost): logger.warn("Not all states of features in 'CONFIG_DB' are valid, rebooting DUT {}".format(duthost.hostname)) reboot(duthost, localhost) # Some services are not ready immeidately after reboot wait_critical_processes(duthost) - pytest_assert(wait_until(FEATURE_STATE_VERIFYING_THRESHOLD_SECS, FEATURE_STATE_VERIFYING_INTERVAL_SECS, + pytest_assert(wait_until(FEATURE_STATE_VERIFYING_THRESHOLD_SECS, FEATURE_STATE_VERIFYING_INTERVAL_SECS, 0, verify_features_state, duthost), "Not all service states are valid!") logger.info("The states of features in 'CONFIG_DB' are all valid.") @@ -101,12 +101,13 @@ def collect_dut_info(dut): # for multi ASIC randomly select one frontend ASIC # and one backend ASIC if dut.sonichost.is_multi_asic: - fe = random.choice(front_end_asics) - be = random.choice(back_end_asics) - asic_services[service] = [ - dut.get_docker_name(service, asic_index=fe), - dut.get_docker_name(service, asic_index=be) - ] + asic_services[service] = [] + if len(front_end_asics): + fe = random.choice(front_end_asics) + asic_services[service].append(dut.get_docker_name(service, asic_index=fe)) + if len(back_end_asics): + be = random.choice(back_end_asics) + asic_services[service].append(dut.get_docker_name(service, asic_index=be)) dut_info = { "intf_status": status, diff --git a/tests/upgrade_path/upgrade_helpers.py b/tests/upgrade_path/upgrade_helpers.py index a3605c20b7..628d855361 100644 --- a/tests/upgrade_path/upgrade_helpers.py +++ b/tests/upgrade_path/upgrade_helpers.py @@ -66,6 +66,7 @@ def prepare_ptf(ptfhost, duthost, tbinfo): arp_responder_conf = Template(open("../ansible/roles/test/templates/arp_responder.conf.j2").read()) ptfhost.copy(content=arp_responder_conf.render(arp_responder_args="-e"), dest="/etc/supervisor/conf.d/arp_responder.conf") + ptfhost.copy(src='scripts/dual_tor_sniffer.py', dest="/root/ptftests/advanced_reboot_sniffer.py") ptfhost.shell("supervisorctl reread") ptfhost.shell("supervisorctl update") diff --git a/tests/vlan/test_vlan.py b/tests/vlan/test_vlan.py index 0cde089a99..6cd9c46b13 100644 --- a/tests/vlan/test_vlan.py +++ b/tests/vlan/test_vlan.py @@ -3,26 +3,21 @@ import ptf.packet as scapy import ptf.testutils as testutils from ptf.mask import Mask -from collections import defaultdict -import json import itertools import logging +import ipaddress from tests.common.errors import RunAnsibleModuleFail -from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] - from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # lgtm[py/unused-import] - from tests.common.config_reload import config_reload +from tests.common.fixtures.duthost_utils import ports_list, vlan_ports_list logger = logging.getLogger(__name__) -vlan_id_list = [ 100, 200 ] - pytestmark = [ - pytest.mark.topology('t0') + pytest.mark.topology('t0', 't0-56-po2vlan') ] # Use original ports intead of sub interfaces for ptfadapter if it's t0-backend @@ -35,59 +30,102 @@ def cfg_facts(duthosts, rand_one_dut_hostname): return duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] +def compare_network(src_ipprefix, dst_ipprefix): + src_network = ipaddress.IPv4Interface(src_ipprefix).network + dst_network = ipaddress.IPv4Interface(dst_ipprefix).network + return src_network.overlaps(dst_network) + + @pytest.fixture(scope="module") -def vlan_intfs_list(): - return [ { 'vlan_id': vlan, 'ip': '192.168.{}.1/24'.format(vlan) } for vlan in vlan_id_list ] +def vlan_intfs_dict(cfg_facts, tbinfo): + vlan_intfs_dict = {} + for k, v in cfg_facts['VLAN'].items(): + vlanid = v['vlanid'] + for addr in cfg_facts['VLAN_INTERFACE']['Vlan'+vlanid]: + if addr.find(':') == -1: + ip = addr + break + else: + continue + logger.info("Original VLAN {}, ip {}".format(vlanid, ip)) + vlan_intfs_dict[int(vlanid)] = {'ip': ip, 'orig': True} + # For t0 topo, will add 2 VLANs for test. + # Need to make sure vlan id is unique, and avoid vlan ip network overlapping. + # For example, ip prefix is 192.168.0.1/21 for VLAN 1000, + # Below ip prefix overlaps with 192.168.0.1/21, and need to skip: + # 192.168.0.1/24, 192.168.1.1/24, 192.168.2.1/24, 192.168.3.1/24, + # 192.168.4.1/24, 192.168.5.1/24, 192.168.6.1/24, 192.168.7.1/24 + if tbinfo['topo']['name'] != 't0-56-po2vlan': + vlan_cnt = 0 + for i in xrange(0, 255): + vid = 100 + i + if vid in vlan_intfs_dict: + continue + ip = u'192.168.{}.1/24'.format(i) + for v in vlan_intfs_dict.values(): + if compare_network(ip, v['ip']): + break + else: + logger.info("Add VLAN {}, ip {}".format(vid, ip)) + vlan_intfs_dict[vid] = {'ip': ip, 'orig': False} + vlan_cnt += 1 + if vlan_cnt >= 2: + break + assert vlan_cnt == 2 + return vlan_intfs_dict @pytest.fixture(scope="module") -def vlan_ports_list(rand_selected_dut, tbinfo, cfg_facts): +def work_vlan_ports_list(rand_selected_dut, tbinfo, cfg_facts, ports_list, vlan_ports_list, vlan_intfs_dict): + if tbinfo['topo']['name'] == 't0-56-po2vlan': + return vlan_ports_list + mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) - vlan_ports_list = [] + work_vlan_ports_list = [] config_ports = {k: v for k,v in cfg_facts['PORT'].items() if v.get('admin_status', 'down') == 'up'} config_portchannels = cfg_facts.get('PORTCHANNEL', {}) config_port_indices = {k: v for k, v in mg_facts['minigraph_ptf_indices'].items() if k in config_ports} - ptf_ports_available_in_topo = {port_index: 'eth{}'.format(port_index) for port_index in config_port_indices.values()} - config_port_channel_members = [port_channel[1]['members'] for port_channel in config_portchannels.items()] - config_port_channel_member_ports = list(itertools.chain.from_iterable(config_port_channel_members)) + + # For t0 topo, will add port to new VLAN, use 'orig' field to identify new VLAN. + vlan_id_list = [k for k, v in vlan_intfs_dict.items() if v['orig'] == False] pvid_cycle = itertools.cycle(vlan_id_list) # when running on t0 we can use the portchannel members if config_portchannels: - for po in config_portchannels.keys()[:2]: - port = config_portchannels[po]['members'][0] - vlan_ports_list.append({ + portchannel_cnt = 0 + for po in config_portchannels: + vlan_port = { 'dev' : po, 'port_index' : [config_port_indices[member] for member in config_portchannels[po]['members']], - 'pvid' : pvid_cycle.next(), - 'permit_vlanid' : { vid : { - 'peer_ip' : '192.168.{}.{}'.format(vid, 2 + config_port_indices.keys().index(port)), - 'remote_ip' : '{}.1.1.{}'.format(vid, 2 + config_port_indices.keys().index(port)) - } for vid in vlan_id_list } - }) - - ports = [port for port in config_ports - if config_port_indices[port] in ptf_ports_available_in_topo - and config_ports[port].get('admin_status', 'down') == 'up' - and port not in config_port_channel_member_ports] - - for port in ports[:4]: - vlan_ports_list.append({ + 'permit_vlanid' : [] + } + # Add 2 portchannels for test + if portchannel_cnt < 2: + portchannel_cnt += 1 + vlan_port['pvid'] = pvid_cycle.next() + vlan_port['permit_vlanid'] = vlan_id_list[:] + if 'pvid' in vlan_port: + work_vlan_ports_list.append(vlan_port) + + for i, port in enumerate(ports_list): + vlan_port = { 'dev' : port, 'port_index' : [config_port_indices[port]], - 'pvid' : pvid_cycle.next(), - 'permit_vlanid' : { vid : { - 'peer_ip' : '192.168.{}.{}'.format(vid, 2 + config_port_indices.keys().index(port)), - 'remote_ip' : '{}.1.1.{}'.format(vid, 2 + config_port_indices.keys().index(port)) - } for vid in vlan_id_list } - }) + 'permit_vlanid' : [] + } + # Add 4 ports for test + if i < 4: + vlan_port['pvid'] = pvid_cycle.next() + vlan_port['permit_vlanid'] = vlan_id_list[:] + if 'pvid' in vlan_port: + work_vlan_ports_list.append(vlan_port) - return vlan_ports_list + return work_vlan_ports_list -def create_vlan_interfaces(vlan_ports_list, ptfhost): +def create_vlan_interfaces(work_vlan_ports_list, ptfhost): logger.info("Create PTF VLAN intfs") - for vlan_port in vlan_ports_list: - for permit_vlanid in vlan_port["permit_vlanid"].keys(): + for vlan_port in work_vlan_ports_list: + for permit_vlanid in vlan_port["permit_vlanid"]: if int(permit_vlanid) != vlan_port["pvid"]: ptfhost.command("ip link add link eth{idx} name eth{idx}.{pvid} type vlan id {pvid}".format( @@ -100,6 +138,7 @@ def create_vlan_interfaces(vlan_ports_list, ptfhost): pvid=permit_vlanid )) + def shutdown_portchannels(duthost, portchannel_interfaces): cmds = [] logger.info("Shutdown lags, flush IP addresses") @@ -110,18 +149,21 @@ def shutdown_portchannels(duthost, portchannel_interfaces): duthost.shell_cmds(cmds=cmds) -def create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list): + +def create_test_vlans(duthost, cfg_facts, work_vlan_ports_list, vlan_intfs_dict): cmds = [] logger.info("Add vlans, assign IPs") - for vlan in vlan_intfs_list: - cmds.append('config vlan add {}'.format(vlan['vlan_id'])) - cmds.append("config interface ip add Vlan{} {}".format(vlan['vlan_id'], vlan['ip'].upper())) + for k, v in vlan_intfs_dict.items(): + if v['orig'] == True: + continue + cmds.append('config vlan add {}'.format(k)) + cmds.append("config interface ip add Vlan{} {}".format(k, v['ip'].upper())) # Delete untagged vlans from interfaces to avoid error message # when adding untagged vlan to interface that already have one if '201911' not in duthost.os_version: logger.info("Delete untagged vlans from interfaces") - for vlan_port in vlan_ports_list: + for vlan_port in work_vlan_ports_list: vlan_members = cfg_facts.get('VLAN_MEMBER', {}) vlan_name, vid = vlan_members.keys()[0], vlan_members.keys()[0].replace("Vlan", '') try: @@ -131,8 +173,10 @@ def create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list): continue logger.info("Add members to Vlans") - for vlan_port in vlan_ports_list: - for permit_vlanid in vlan_port['permit_vlanid'].keys(): + for vlan_port in work_vlan_ports_list: + for permit_vlanid in vlan_port['permit_vlanid']: + if vlan_intfs_dict[int(permit_vlanid)]['orig'] == True: + continue cmds.append('config vlan member add {tagged} {id} {port}'.format( tagged=('--untagged' if vlan_port['pvid'] == permit_vlanid else ''), id=permit_vlanid, @@ -141,6 +185,7 @@ def create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list): duthost.shell_cmds(cmds=cmds) + def startup_portchannels(duthost, portchannel_interfaces): cmds =[] logger.info("Bringup lags") @@ -149,21 +194,9 @@ def startup_portchannels(duthost, portchannel_interfaces): duthost.shell_cmds(cmds=cmds) -def add_test_routes(duthost, vlan_ports_list): - cmds = [] - logger.info("Configure route for remote IP") - for item in vlan_ports_list: - for i in vlan_ports_list[0]['permit_vlanid']: - cmds.append('ip route add {} via {}'.format( - item['permit_vlanid'][i]['remote_ip'], - item['permit_vlanid'][i]['peer_ip'] - )) - - duthost.shell_cmds(cmds=cmds) - @pytest.fixture(scope="module", autouse=True) -def setup_vlan(duthosts, rand_one_dut_hostname, ptfhost, vlan_ports_list, vlan_intfs_list, cfg_facts): +def setup_vlan(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, work_vlan_ports_list, vlan_intfs_dict, cfg_facts): duthost = duthosts[rand_one_dut_hostname] # --------------------- Setup ----------------------- try: @@ -171,30 +204,26 @@ def setup_vlan(duthosts, rand_one_dut_hostname, ptfhost, vlan_ports_list, vlan_i shutdown_portchannels(duthost, portchannel_interfaces) - create_vlan_interfaces(vlan_ports_list, ptfhost) - - setUpArpResponder(vlan_ports_list, ptfhost) + create_vlan_interfaces(work_vlan_ports_list, ptfhost) - create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_list) + if tbinfo['topo']['name'] != 't0-56-po2vlan': + create_test_vlans(duthost, cfg_facts, work_vlan_ports_list, vlan_intfs_dict) startup_portchannels(duthost, portchannel_interfaces) - add_test_routes(duthost, vlan_ports_list) # --------------------- Testing ----------------------- yield # --------------------- Teardown ----------------------- finally: - tearDown(vlan_ports_list, duthost, ptfhost) + tearDown(work_vlan_ports_list, duthost, ptfhost) -def tearDown(vlan_ports_list, duthost, ptfhost): +def tearDown(work_vlan_ports_list, duthost, ptfhost): logger.info("VLAN test ending ...") - logger.info("Stop arp_responder") - ptfhost.command('supervisorctl stop arp_responder') logger.info("Delete VLAN intf") - for vlan_port in vlan_ports_list: - for permit_vlanid in vlan_port["permit_vlanid"].keys(): + for vlan_port in work_vlan_ports_list: + for permit_vlanid in vlan_port["permit_vlanid"]: if int(permit_vlanid) != vlan_port["pvid"]: try: ptfhost.command("ip link delete eth{idx}.{pvid}".format( @@ -206,36 +235,6 @@ def tearDown(vlan_ports_list, duthost, ptfhost): config_reload(duthost) -def setUpArpResponder(vlan_ports_list, ptfhost): - logger.info("Copy arp_responder to ptfhost") - d = defaultdict(list) - for vlan_port in vlan_ports_list: - for permit_vlanid in vlan_port["permit_vlanid"].keys(): - if int(permit_vlanid) == vlan_port["pvid"]: - iface = "eth{}".format(vlan_port["port_index"][0]) - else: - iface = "eth{}".format(vlan_port["port_index"][0]) - # iface = "eth{}.{}".format(vlan_port["port_index"][0], permit_vlanid) - d[iface].append(vlan_port["permit_vlanid"][permit_vlanid]["peer_ip"]) - - with open('/tmp/from_t1.json', 'w') as file: - json.dump(d, file) - ptfhost.copy(src='/tmp/from_t1.json', dest='/tmp/from_t1.json') - - extra_vars = { - 'arp_responder_args': '' - } - - ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) - ptfhost.template(src='templates/arp_responder.conf.j2', dest='/tmp') - ptfhost.command("cp /tmp/arp_responder.conf.j2 /etc/supervisor/conf.d/arp_responder.conf") - - ptfhost.command('supervisorctl reread') - ptfhost.command('supervisorctl update') - - logger.info("Start arp_responder") - ptfhost.command('supervisorctl start arp_responder') - def build_icmp_packet(vlan_id, src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff", src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64): @@ -284,7 +283,7 @@ def verify_packets_with_portchannel(test, pkt, ports=[], portchannel_ports=[], d % (device_number, str(port_group))) -def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): +def verify_icmp_packets(ptfadapter, work_vlan_ports_list, vlan_port, vlan_id): untagged_pkt = build_icmp_packet(0) tagged_pkt = build_icmp_packet(vlan_id) untagged_dst_ports = [] @@ -296,7 +295,7 @@ def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): masked_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q, "prio") logger.info("Verify untagged packets from ports " + str(vlan_port["port_index"][0])) - for port in vlan_ports_list: + for port in work_vlan_ports_list: if vlan_port["port_index"] == port["port_index"]: # Skip src port continue @@ -305,7 +304,7 @@ def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): untagged_dst_pc_ports.append(port["port_index"]) else: untagged_dst_ports += port["port_index"] - elif vlan_id in map(int, port["permit_vlanid"].keys()): + elif vlan_id in map(int, port["permit_vlanid"]): if len(port["port_index"]) > 1: tagged_dst_pc_ports.append(port["port_index"]) else: @@ -321,8 +320,18 @@ def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): portchannel_ports=tagged_dst_pc_ports) +def verify_unicast_packets(ptfadapter, send_pkt, exp_pkt, src_port, dst_ports): + testutils.send(ptfadapter, src_port, send_pkt) + try: + testutils.verify_packets_any(ptfadapter, exp_pkt, ports=dst_ports) + except AssertionError as detail: + if "Did not receive expected packet on any of ports" in str(detail): + logger.error("Expected packet was not received") + raise + + @pytest.mark.bsl -def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): +def test_vlan_tc1_send_untagged(ptfadapter, work_vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #1 Verify packets egress without tag from ports whose PVID same with ingress port @@ -331,16 +340,26 @@ def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list, toggle_all_simulato logger.info("Test case #1 starting ...") - for vlan_port in vlan_ports_list: + for vlan_port in work_vlan_ports_list: pkt = build_icmp_packet(0) logger.info("Send untagged packet from {} ...".format(vlan_port["port_index"][0])) logger.info(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%")) - testutils.send(ptfadapter, vlan_port["port_index"][0], pkt) - verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_port["pvid"]) + if vlan_port['pvid'] != 0: + testutils.send(ptfadapter, vlan_port["port_index"][0], pkt) + verify_icmp_packets(ptfadapter, work_vlan_ports_list, vlan_port, vlan_port["pvid"]) + else: + exp_pkt = Mask(pkt) + exp_pkt.set_do_not_care_scapy(scapy.Dot1Q, "vlan") + dst_ports = [] + for port in work_vlan_ports_list: + dst_ports += port["port_index"] if port != vlan_port else [] + testutils.send(ptfadapter, vlan_port["port_index"][0], pkt) + logger.info("Check on " + str(dst_ports) + "...") + testutils.verify_no_packet_any(ptfadapter, exp_pkt, dst_ports) @pytest.mark.bsl -def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): +def test_vlan_tc2_send_tagged(ptfadapter, work_vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #2 Send tagged packets from each port. @@ -350,17 +369,17 @@ def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list, toggle_all_simulator_ logger.info("Test case #2 starting ...") - for vlan_port in vlan_ports_list: - for permit_vlanid in map(int, vlan_port["permit_vlanid"].keys()): + for vlan_port in work_vlan_ports_list: + for permit_vlanid in map(int, vlan_port["permit_vlanid"]): pkt = build_icmp_packet(permit_vlanid) logger.info("Send tagged({}) packet from {} ...".format(permit_vlanid, vlan_port["port_index"][0])) logger.info(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%")) testutils.send(ptfadapter, vlan_port["port_index"][0], pkt) - verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, permit_vlanid) + verify_icmp_packets(ptfadapter, work_vlan_ports_list, vlan_port, permit_vlanid) @pytest.mark.bsl -def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): +def test_vlan_tc3_send_invalid_vid(ptfadapter, work_vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #3 Send packets with invalid VLAN ID @@ -372,11 +391,11 @@ def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list, toggle_all_simul invalid_tagged_pkt = build_icmp_packet(4095) masked_invalid_tagged_pkt = Mask(invalid_tagged_pkt) masked_invalid_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q, "vlan") - for vlan_port in vlan_ports_list: + for vlan_port in work_vlan_ports_list: dst_ports = [] src_port = vlan_port["port_index"][0] - dst_ports += [port["port_index"] for port in vlan_ports_list - if port != vlan_port ] + for port in work_vlan_ports_list: + dst_ports += port["port_index"] if port != vlan_port else [] logger.info("Send invalid tagged packet " + " from " + str(src_port) + "...") logger.info(invalid_tagged_pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%")) testutils.send(ptfadapter, src_port, invalid_tagged_pkt) @@ -385,118 +404,144 @@ def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list, toggle_all_simul @pytest.mark.bsl -def test_vlan_tc4_tagged_non_broadcast(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): +def test_vlan_tc4_tagged_unicast(ptfadapter, work_vlan_ports_list, vlan_intfs_dict, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #4 Send packets w/ src and dst specified over tagged ports in vlan Verify that bidirectional communication between two tagged ports work """ - vlan_ids = vlan_ports_list[0]['permit_vlanid'].keys() - tagged_test_vlan = vlan_ids[0] - - ports_for_test = [] + for tagged_test_vlan in vlan_intfs_dict: + ports_for_test = [] - for vlan_port in vlan_ports_list: - if vlan_port['pvid'] != tagged_test_vlan: - ports_for_test.append(vlan_port['port_index']) - - #take two tagged ports for test - src_port = ports_for_test[0] - dst_port = ports_for_test[-1] + for vlan_port in work_vlan_ports_list: + if vlan_port['pvid'] != tagged_test_vlan and tagged_test_vlan in vlan_port['permit_vlanid']: + ports_for_test.append(vlan_port['port_index']) + if len(ports_for_test) < 2: + continue - src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) - dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) + #take two tagged ports for test + src_port = ports_for_test[0] + dst_port = ports_for_test[-1] - transmit_tagged_pkt = build_icmp_packet(vlan_id=tagged_test_vlan, src_mac=src_mac, dst_mac=dst_mac) - return_transmit_tagged_pkt = build_icmp_packet(vlan_id=tagged_test_vlan, src_mac=dst_mac, dst_mac=src_mac) + src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) + dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) - logger.info("Tagged packet to be sent from port {} to port {}".format(src_port[0], dst_port)) + transmit_tagged_pkt = build_icmp_packet(vlan_id=tagged_test_vlan, src_mac=src_mac, dst_mac=dst_mac) + return_transmit_tagged_pkt = build_icmp_packet(vlan_id=tagged_test_vlan, src_mac=dst_mac, dst_mac=src_mac) - testutils.send(ptfadapter, src_port[0], transmit_tagged_pkt) + logger.info("Tagged({}) packet to be sent from port {} to port {}".format(tagged_test_vlan, src_port, dst_port)) - try: - testutils.verify_packets_any(ptfadapter, transmit_tagged_pkt, ports=dst_port) - except Exception as detail: - if "Did not receive expected packet on any of ports" in str(detail): - logger.error("Expected packet was not received") - raise + verify_unicast_packets(ptfadapter, transmit_tagged_pkt, transmit_tagged_pkt, src_port[0], dst_port) - logger.info("One Way Tagged Packet Transmission Works") - logger.info("Tagged packet successfully sent from port {} to port {}".format(src_port[0], dst_port)) + logger.info("One Way Tagged Packet Transmission Works") + logger.info("Tagged({}) packet successfully sent from port {} to port {}".format(tagged_test_vlan, src_port, dst_port)) - logger.info("Tagged packet to be sent from port {} to port {}".format(dst_port[0], src_port)) + logger.info("Tagged({}) packet to be sent from port {} to port {}".format(tagged_test_vlan, dst_port, src_port)) - testutils.send(ptfadapter, dst_port[0], return_transmit_tagged_pkt) - - try: - testutils.verify_packets_any(ptfadapter, return_transmit_tagged_pkt, ports=src_port) - except Exception as detail: - if "Did not receive expected packet on any of ports" in str(detail): - logger.error("Expected packet was not received") - raise + verify_unicast_packets(ptfadapter, return_transmit_tagged_pkt, return_transmit_tagged_pkt, dst_port[0], src_port) - logger.info("Two Way Tagged Packet Transmission Works") - logger.info("Tagged packet successfully sent from port {} to port {}".format(dst_port[0], src_port)) + logger.info("Two Way Tagged Packet Transmission Works") + logger.info("Tagged({}) packet successfully sent from port {} to port {}".format(tagged_test_vlan, dst_port[0], src_port)) @pytest.mark.bsl -def test_vlan_tc5_untagged_non_broadcast(ptfadapter, vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor): +def test_vlan_tc5_untagged_unicast(ptfadapter, work_vlan_ports_list, vlan_intfs_dict, toggle_all_simulator_ports_to_rand_selected_tor): """ Test case #5 Send packets w/ src and dst specified over untagged ports in vlan Verify that bidirectional communication between two untagged ports work """ - vlan_ids = vlan_ports_list[0]['permit_vlanid'].keys() - tagged_test_vlan = vlan_ids[0] + for untagged_test_vlan in vlan_intfs_dict: - ports_for_test = [] + ports_for_test = [] - for vlan_port in vlan_ports_list: - if vlan_port['pvid'] != tagged_test_vlan: - ports_for_test.append(vlan_port['port_index']) + for vlan_port in work_vlan_ports_list: + if vlan_port['pvid'] == untagged_test_vlan: + ports_for_test.append(vlan_port['port_index']) + if len(ports_for_test) < 2: + continue - #take two tagged ports for test - src_port = ports_for_test[0] - dst_port = ports_for_test[-1] + #take two tagged ports for test + src_port = ports_for_test[0] + dst_port = ports_for_test[-1] - src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) - dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) + src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) + dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) - transmit_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=src_mac, dst_mac=dst_mac) - return_transmit_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=dst_mac, dst_mac=src_mac) + transmit_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=src_mac, dst_mac=dst_mac) + return_transmit_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=dst_mac, dst_mac=src_mac) - logger.info("Untagged packet to be sent from port {} to port {}".format(src_port[0], dst_port)) + logger.info("Untagged({}) packet to be sent from port {} to port {}".format(untagged_test_vlan, src_port, dst_port)) - testutils.send(ptfadapter, src_port[0], transmit_untagged_pkt) + verify_unicast_packets(ptfadapter, transmit_untagged_pkt, transmit_untagged_pkt, src_port[0], dst_port) - try: - testutils.verify_packets_any(ptfadapter, transmit_untagged_pkt, ports=dst_port) - except Exception as detail: - if "Did not receive expected packet on any of ports" in str(detail): - logger.error("Expected packet was not received") - raise + logger.info("One Way Untagged Packet Transmission Works") + logger.info("Untagged({}) packet successfully sent from port {} to port {}".format(untagged_test_vlan, src_port, dst_port)) - logger.info("One Way Untagged Packet Transmission Works") - logger.info("Untagged packet successfully sent from port {} to port {}".format(src_port[0], dst_port)) + logger.info("Untagged({}) packet to be sent from port {} to port {}".format(untagged_test_vlan, dst_port, src_port)) - logger.info("Untagged packet to be sent from port {} to port {}".format(dst_port[0], src_port)) + verify_unicast_packets(ptfadapter, return_transmit_untagged_pkt, return_transmit_untagged_pkt, dst_port[0], src_port) - testutils.send(ptfadapter, dst_port[0], return_transmit_untagged_pkt) + logger.info("Two Way Untagged Packet Transmission Works") + logger.info("Untagged({}) packet successfully sent from port {} to port {}".format(untagged_test_vlan, dst_port, src_port)) - try: - testutils.verify_packets_any(ptfadapter, return_transmit_untagged_pkt, ports=src_port) - except Exception as detail: - if "Did not receive expected packet on any of ports" in str(detail): - logger.error("Expected packet was not received") - raise - logger.info("Two Way Untagged Packet Transmission Works") - logger.info("Untagged packet successfully sent from port {} to port {}".format(dst_port[0], src_port)) +@pytest.mark.bsl +def test_vlan_tc6_tagged_untagged_unicast(ptfadapter, work_vlan_ports_list, vlan_intfs_dict, toggle_all_simulator_ports_to_rand_selected_tor): + """ + Test case #6 + Send packets w/ src and dst specified over tagged port and untagged port in vlan + Verify that bidirectional communication between tagged port and untagged port work + """ + for test_vlan in vlan_intfs_dict: + untagged_ports_for_test = [] + tagged_ports_for_test = [] + + for vlan_port in work_vlan_ports_list: + if test_vlan not in vlan_port['permit_vlanid']: + continue + if vlan_port['pvid'] == test_vlan: + untagged_ports_for_test.append(vlan_port['port_index']) + else: + tagged_ports_for_test.append(vlan_port['port_index']) + if not untagged_ports_for_test: + continue + if not tagged_ports_for_test: + continue + + #take two ports for test + src_port = untagged_ports_for_test[0] + dst_port = tagged_ports_for_test[0] + + src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) + dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) + transmit_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=src_mac, dst_mac=dst_mac) + exp_tagged_pkt = build_icmp_packet(vlan_id=test_vlan, src_mac=src_mac, dst_mac=dst_mac) + exp_tagged_pkt = Mask(exp_tagged_pkt) + exp_tagged_pkt.set_do_not_care_scapy(scapy.Dot1Q, "prio") -def test_vlan_tc6_tagged_qinq_switch_on_outer_tag(ptfadapter, vlan_ports_list, duthost, toggle_all_simulator_ports_to_rand_selected_tor): + return_transmit_tagged_pkt = build_icmp_packet(vlan_id=test_vlan, src_mac=dst_mac, dst_mac=src_mac) + exp_untagged_pkt = build_icmp_packet(vlan_id=0, src_mac=dst_mac, dst_mac=src_mac) + + logger.info("Untagged({}) packet to be sent from port {} to port {}".format(test_vlan, src_port, dst_port)) + + verify_unicast_packets(ptfadapter, transmit_untagged_pkt, exp_tagged_pkt, src_port[0], dst_port) + + logger.info("One Way Untagged Packet Transmission Works") + logger.info("Untagged({}) packet successfully sent from port {} to port {}".format(test_vlan, src_port, dst_port)) + + logger.info("Tagged({}) packet to be sent from port {} to port {}".format(test_vlan, dst_port, src_port)) + + verify_unicast_packets(ptfadapter, return_transmit_tagged_pkt, exp_untagged_pkt, dst_port[0], src_port) + + logger.info("Two Way tagged Packet Transmission Works") + logger.info("Tagged({}) packet successfully sent from port {} to port {}".format(test_vlan, dst_port, src_port)) + + +def test_vlan_tc7_tagged_qinq_switch_on_outer_tag(ptfadapter, work_vlan_ports_list, vlan_intfs_dict, duthost, toggle_all_simulator_ports_to_rand_selected_tor): """ - Test case #6 + Test case #7 Send qinq packets w/ src and dst specified over tagged ports in vlan Verify that the qinq packet is switched based on outer vlan tag + src/dst mac """ @@ -506,24 +551,24 @@ def test_vlan_tc6_tagged_qinq_switch_on_outer_tag(ptfadapter, vlan_ports_list, d if duthost.facts["asic_type"] not in qinq_switching_supported_platforms: pytest.skip("Unsupported platform") - vlan_ids = vlan_ports_list[0]['permit_vlanid'].keys() - tagged_test_vlan = vlan_ids[0] + for tagged_test_vlan in vlan_intfs_dict: + ports_for_test = [] + for vlan_port in work_vlan_ports_list: + if vlan_port['pvid'] != tagged_test_vlan and tagged_test_vlan in vlan_port['permit_vlanid']: + ports_for_test.append(vlan_port['port_index']) + if len(ports_for_test) < 2: + continue - ports_for_test = [] - for vlan_port in vlan_ports_list: - if vlan_port['pvid'] != tagged_test_vlan: - ports_for_test.append(vlan_port['port_index'][0]) + #take two tagged ports for test + src_port = ports_for_test[0] + dst_port = ports_for_test[-1] - #take two tagged ports for test - src_port = ports_for_test[0] - dst_port = ports_for_test[-1] + src_mac = ptfadapter.dataplane.get_mac(0, src_port[0]) + dst_mac = ptfadapter.dataplane.get_mac(0, dst_port[0]) - src_mac = ptfadapter.dataplane.get_mac(0, src_port) - dst_mac = ptfadapter.dataplane.get_mac(0, dst_port) + transmit_qinq_pkt = build_qinq_packet(outer_vlan_id=tagged_test_vlan, vlan_id=250, src_mac=src_mac, dst_mac=dst_mac) + logger.info ("QinQ({}) packet to be sent from port {} to port {}".format(tagged_test_vlan, src_port, dst_port)) - transmit_qinq_pkt = build_qinq_packet(outer_vlan_id=tagged_test_vlan, vlan_id=250, src_mac=src_mac, dst_mac=dst_mac) - logger.info ("QinQ packet to be sent from port {} to port {}".format(src_port, dst_port)) - testutils.send(ptfadapter, src_port, transmit_qinq_pkt) + verify_unicast_packets(ptfadapter, transmit_qinq_pkt, transmit_qinq_pkt, src_port[0], dst_port) - testutils.verify_packet(ptfadapter, transmit_qinq_pkt, dst_port) - logger.info ("QinQ packet switching worked successfully...") \ No newline at end of file + logger.info ("QinQ packet switching worked successfully...") diff --git a/tests/voq/conftest.py b/tests/voq/conftest.py index 694a8063cd..6caeeeb722 100644 --- a/tests/voq/conftest.py +++ b/tests/voq/conftest.py @@ -22,7 +22,7 @@ def chassis_facts(duthosts, request): host_vars = get_host_visible_vars(inv_files, a_host.hostname) assert 'slot_num' in host_vars, "Variable 'slot_num' not found in inventory for host {}".format(a_host.hostname) slot_num = host_vars['slot_num'] - a_host.facts['slot_num'] = int(slot_num) + a_host.facts['slot_num'] = int(slot_num[len("slot"):]) @pytest.fixture(scope="module") diff --git a/tests/voq/test_voq_ipfwd.py b/tests/voq/test_voq_ipfwd.py index 2f3e362943..9f59aa6e05 100644 --- a/tests/voq/test_voq_ipfwd.py +++ b/tests/voq/test_voq_ipfwd.py @@ -983,7 +983,7 @@ def test_front_panel_linkflap_port(self, duthosts, all_cfg_facts, self.linkflap_up(fanout, fanport, ports['portA']['dut'], lport) # need bgp to establish - wait_until(200, 20, bgp_established, ports['portA']['dut'], ports['portA']['asic']) + wait_until(200, 20, 0, bgp_established, ports['portA']['dut'], ports['portA']['asic']) # Validate from port A and neighbor A that everything is good after port is up. logger.info("=" * 80) diff --git a/tests/voq/test_voq_nbr.py b/tests/voq/test_voq_nbr.py index 26b202eac5..fe9bacdee7 100644 --- a/tests/voq/test_voq_nbr.py +++ b/tests/voq/test_voq_nbr.py @@ -744,10 +744,10 @@ def test_neighbor_hw_mac_change(duthosts, enum_rand_one_per_hwsku_frontend_hostn # Check neighbor on local linecard logger.info("*" * 60) logger.info("Verify initial neighbor: %s, port %s", neighbor, local_port) - pytest_assert(wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, original_mac, checkstate=False), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, original_mac, checkstate=False), "MAC {} didn't change in ARP table".format(original_mac)) sonic_ping(asic, neighbor, verbose=True) - pytest_assert(wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, original_mac), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, original_mac), "MAC {} didn't change in ARP table".format(original_mac)) dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, nbr_to_test, nbrhosts, all_cfg_facts, nbr_macs) @@ -761,11 +761,11 @@ def test_neighbor_hw_mac_change(duthosts, enum_rand_one_per_hwsku_frontend_hostn if ":" in neighbor: logger.info("Force neighbor solicitation to workaround long IPV6 timer.") asic_cmd(asic, "ndisc6 %s %s" % (neighbor, local_port)) - pytest_assert(wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, NEW_MAC, checkstate=False), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, NEW_MAC, checkstate=False), "MAC {} didn't change in ARP table".format(NEW_MAC)) sonic_ping(asic, neighbor, verbose=True) - pytest_assert(wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, NEW_MAC), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, NEW_MAC), "MAC {} didn't change in ARP table".format(NEW_MAC)) logger.info("Verify neighbor after mac change: %s, port %s", neighbor, local_port) check_one_neighbor_present(duthosts, per_host, asic, neighbor, nbrhosts, all_cfg_facts) @@ -783,10 +783,10 @@ def test_neighbor_hw_mac_change(duthosts, enum_rand_one_per_hwsku_frontend_hostn logger.info("Force neighbor solicitation to workaround long IPV6 timer.") asic_cmd(asic, "ndisc6 %s %s" % (neighbor, local_port)) pytest_assert( - wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, original_mac, checkstate=False), + wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, original_mac, checkstate=False), "MAC {} didn't change in ARP table".format(original_mac)) sonic_ping(asic, neighbor, verbose=True) - pytest_assert(wait_until(60, 2, check_arptable_mac, per_host, asic, neighbor, original_mac), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, per_host, asic, neighbor, original_mac), "MAC {} didn't change in ARP table".format(original_mac)) dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, nbr_to_test, nbrhosts, all_cfg_facts, nbr_macs) @@ -829,7 +829,7 @@ def linkflap_down(self, fanout, fanport, dut, dut_intf): """ logger.info("Bring down link: %s/%s <-> %s/%s", fanout.hostname, fanport, dut.hostname, dut_intf) fanout.shutdown(fanport) - pytest_assert(wait_until(30, 1, self.check_intf_status, dut, dut_intf, 'down'), + pytest_assert(wait_until(30, 1, 0, self.check_intf_status, dut, dut_intf, 'down'), "dut port {} didn't go down as expected".format(dut_intf)) def linkflap_up(self, fanout, fanport, dut, dut_intf): @@ -848,7 +848,7 @@ def linkflap_up(self, fanout, fanport, dut, dut_intf): """ logger.info("Bring up link: %s/%s <-> %s/%s", fanout.hostname, fanport, dut.hostname, dut_intf) fanout.no_shutdown(fanport) - pytest_assert(wait_until(60, 1, self.check_intf_status, dut, dut_intf, 'up'), + pytest_assert(wait_until(60, 1, 0, self.check_intf_status, dut, dut_intf, 'up'), "dut port {} didn't go up as expected".format(dut_intf)) def localport_admindown(self, dut, asic, dut_intf): @@ -865,7 +865,7 @@ def localport_admindown(self, dut, asic, dut_intf): """ logger.info("Admin down port %s/%s", dut.hostname, dut_intf) asic.shutdown_interface(dut_intf) - pytest_assert(wait_until(30, 1, self.check_intf_status, dut, dut_intf, 'down'), + pytest_assert(wait_until(30, 1, 0, self.check_intf_status, dut, dut_intf, 'down'), "dut port {} didn't go down as expected".format(dut_intf)) def localport_adminup(self, dut, asic, dut_intf): @@ -882,7 +882,7 @@ def localport_adminup(self, dut, asic, dut_intf): """ logger.info("Admin up port %s/%s", dut.hostname, dut_intf) asic.startup_interface(dut_intf) - pytest_assert(wait_until(30, 1, self.check_intf_status, dut, dut_intf, 'up'), + pytest_assert(wait_until(30, 1, 0, self.check_intf_status, dut, dut_intf, 'up'), "dut port {} didn't go up as expected".format(dut_intf)) @@ -978,7 +978,7 @@ def test_front_panel_admindown_port(self, duthosts, enum_rand_one_per_hwsku_fron sonic_ping(asic, neighbor) for neighbor in neighbors: - pytest_assert(wait_until(60, 2, check_arptable_state, per_host, asic, neighbor, "REACHABLE"), + pytest_assert(wait_until(60, 2, 0, check_arptable_state, per_host, asic, neighbor, "REACHABLE"), "STATE for neighbor {} did not change to reachable".format(neighbor)) dump_and_verify_neighbors_on_asic(duthosts, per_host, asic, neighbors, nbrhosts, all_cfg_facts, nbr_macs) @@ -1167,7 +1167,7 @@ def test_gratarp_macchange(self, duthosts, enum_rand_one_per_hwsku_frontend_host logger.info("%s port %s is on ptf port: %s", duthost.hostname, local_port, tb_port) logger.info("-" * 60) sonic_ping(asic, neighbor) - pytest_assert(wait_until(60, 2, check_arptable_mac, duthost, asic, neighbor, original_mac), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, duthost, asic, neighbor, original_mac), "MAC {} didn't change in ARP table".format(original_mac)) check_one_neighbor_present(duthosts, duthost, asic, neighbor, nbrhosts, all_cfg_facts) @@ -1176,13 +1176,13 @@ def test_gratarp_macchange(self, duthosts, enum_rand_one_per_hwsku_frontend_host change_mac(nbrhosts[nbrinfo['vm']], nbrinfo['shell_intf'], NEW_MAC) self.send_grat_pkt(NEW_MAC, neighbor, int(tb_port)) - pytest_assert(wait_until(60, 2, check_arptable_mac, duthost, asic, neighbor, NEW_MAC, checkstate=False), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, duthost, asic, neighbor, NEW_MAC, checkstate=False), "MAC {} didn't change in ARP table of neighbor {}".format(NEW_MAC, neighbor)) try: sonic_ping(asic, neighbor) except AssertionError: logging.info("No initial response from ping, begin poll to see if ARP table responds.") - pytest_assert(wait_until(60, 2, check_arptable_mac, duthost, asic, neighbor, NEW_MAC, checkstate=True), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, duthost, asic, neighbor, NEW_MAC, checkstate=True), "MAC {} didn't change in ARP table of neighbor {}".format(NEW_MAC, neighbor)) check_one_neighbor_present(duthosts, duthost, asic, neighbor, nbrhosts, all_cfg_facts) ping_all_neighbors(duthosts, all_cfg_facts, [neighbor]) @@ -1195,10 +1195,10 @@ def test_gratarp_macchange(self, duthosts, enum_rand_one_per_hwsku_frontend_host logger.info("Force neighbor solicitation to workaround long IPV6 timer.") asic_cmd(asic, "ndisc6 %s %s" % (neighbor, local_port)) pytest_assert( - wait_until(60, 2, check_arptable_mac, duthost, asic, neighbor, original_mac, checkstate=False), + wait_until(60, 2, 0, check_arptable_mac, duthost, asic, neighbor, original_mac, checkstate=False), "MAC {} didn't change in ARP table".format(original_mac)) sonic_ping(asic, neighbor, verbose=True) - pytest_assert(wait_until(60, 2, check_arptable_mac, duthost, asic, neighbor, original_mac), + pytest_assert(wait_until(60, 2, 0, check_arptable_mac, duthost, asic, neighbor, original_mac), "MAC {} didn't change in ARP table".format(original_mac)) check_one_neighbor_present(duthosts, duthost, asic, neighbor, nbrhosts, all_cfg_facts) diff --git a/tests/vrf/test_vrf.py b/tests/vrf/test_vrf.py index e5b10c1587..ad183a68d9 100644 --- a/tests/vrf/test_vrf.py +++ b/tests/vrf/test_vrf.py @@ -7,6 +7,7 @@ import random import logging import tempfile +import traceback from collections import OrderedDict from natsort import natsorted @@ -14,8 +15,9 @@ import pytest -from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] -from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # lgtm[py/unused-import] from tests.ptf_runner import ptf_runner from tests.common.utilities import wait_until from tests.common.reboot import reboot @@ -310,8 +312,9 @@ def cleanup_vlan_peer(ptfhost, vlan_peer_vrf2ns_map): for vrf, ns in vlan_peer_vrf2ns_map.iteritems(): ptfhost.shell("ip netns del {}".format(ns)) -def gen_vrf_fib_file(vrf, tbinfo, ptfhost, dst_intfs, \ - render_file, limited_podset_number=10, limited_tor_number=10): +def gen_vrf_fib_file(vrf, tbinfo, ptfhost, render_file, dst_intfs=None, \ + limited_podset_number=10, limited_tor_number=10): + dst_intfs = dst_intfs if dst_intfs else get_default_vrf_fib_dst_intfs(vrf, tbinfo) extra_vars = { 'testbed_type': tbinfo['topo']['name'], 'props': g_vars['props'], @@ -325,6 +328,23 @@ def gen_vrf_fib_file(vrf, tbinfo, ptfhost, dst_intfs, \ ptfhost.template(src="vrf/vrf_fib.j2", dest=render_file) +def get_default_vrf_fib_dst_intfs(vrf, tbinfo): + ''' + Get default vrf fib destination interfaces(PortChannels) according to the given vrf. + The test configuration is dynamic and can work with 4 and 8 PCs as the number of VMs. + The first half of PCs are related to Vrf1 and the second to Vrf2. + ''' + dst_intfs = [] + vms_num = len(tbinfo['topo']['properties']['topology']['VMs']) + if vrf == 'Vrf1': + dst_intfs_range = list(range(1, int(vms_num / 2) + 1)) + else: + dst_intfs_range = list(range(int(vms_num / 2) + 1, vms_num + 1)) + for intfs_num in dst_intfs_range: + dst_intfs.append('PortChannel000{}'.format(intfs_num)) + + return dst_intfs + def gen_vrf_neigh_file(vrf, ptfhost, render_file): extra_vars = { 'intf_member_indices': g_vars['vrf_intf_member_port_indices'][vrf], @@ -389,7 +409,7 @@ def restore_config_db(localhost, duthost, ptfhost): cleanup_vlan_peer(ptfhost, g_vars['vlan_peer_vrf2ns_map']) @pytest.fixture(scope="module", autouse=True) -def setup_vrf(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, localhost): +def setup_vrf(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, localhost, skip_test_module_over_backend_topologies): duthost = duthosts[rand_one_dut_hostname] # backup config_db.json @@ -582,11 +602,9 @@ class TestVrfFib(): @pytest.fixture(scope="class", autouse=True) def setup_fib_test(self, ptfhost, tbinfo): gen_vrf_fib_file('Vrf1', tbinfo, ptfhost, - dst_intfs=['PortChannel0001', 'PortChannel0002'], render_file='/tmp/vrf1_fib.txt') gen_vrf_fib_file('Vrf2', tbinfo, ptfhost, - dst_intfs=['PortChannel0003', 'PortChannel0004'], render_file='/tmp/vrf2_fib.txt') def test_show_bgp_summary(self, duthosts, rand_one_dut_hostname, cfg_facts): @@ -628,11 +646,9 @@ class TestVrfIsolation(): @pytest.fixture(scope="class", autouse=True) def setup_vrf_isolation(self, ptfhost, tbinfo): gen_vrf_fib_file('Vrf1', tbinfo, ptfhost, - dst_intfs=['PortChannel0001', 'PortChannel0002'], render_file='/tmp/vrf1_fib.txt') gen_vrf_fib_file('Vrf2', tbinfo, ptfhost, - dst_intfs=['PortChannel0003', 'PortChannel0004'], render_file='/tmp/vrf2_fib.txt') gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt") @@ -691,7 +707,7 @@ def is_redirect_supported(self, duthosts, rand_one_dut_hostname): pytest.skip("Switch does not support ACL REDIRECT_ACTION") @pytest.fixture(scope="class", autouse=True) - def setup_acl_redirect(self, duthosts, rand_one_dut_hostname, cfg_facts): + def setup_acl_redirect(self, duthosts, rand_one_dut_hostname, cfg_facts, tbinfo): duthost = duthosts[rand_one_dut_hostname] # -------- Setup ---------- @@ -713,16 +729,16 @@ def setup_acl_redirect(self, duthosts, rand_one_dut_hostname, cfg_facts): pc2_v4_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv4'] ] pc2_v6_neigh_ips = [ (pc2_if_name, str(ip.ip+1)) for ip in pc2_if_ips['ipv6'] ] - pc4_if_name = 'PortChannel0004' - pc4_if_ips = get_intf_ips(pc4_if_name, cfg_facts) - pc4_v4_neigh_ips = [ (pc4_if_name, str(ip.ip+1)) for ip in pc4_if_ips['ipv4'] ] - pc4_v6_neigh_ips = [ (pc4_if_name, str(ip.ip+1)) for ip in pc4_if_ips['ipv6'] ] + pc_vrf2_if_name = 'PortChannel000{}'.format(len(tbinfo['topo']['properties']['topology']['VMs'])) + pc_vrf2_if_ips = get_intf_ips(pc_vrf2_if_name, cfg_facts) + pc_vrf2_v4_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv4'] ] + pc_vrf2_v6_neigh_ips = [ (pc_vrf2_if_name, str(ip.ip+1)) for ip in pc_vrf2_if_ips['ipv6'] ] - redirect_dst_ips = pc2_v4_neigh_ips + pc4_v4_neigh_ips - redirect_dst_ipv6s = pc2_v6_neigh_ips + pc4_v6_neigh_ips + redirect_dst_ips = pc2_v4_neigh_ips + pc_vrf2_v4_neigh_ips + redirect_dst_ipv6s = pc2_v6_neigh_ips + pc_vrf2_v6_neigh_ips redirect_dst_ports = [] redirect_dst_ports.append(vrf_intf_ports['Vrf1'][pc2_if_name]) - redirect_dst_ports.append(vrf_intf_ports['Vrf2'][pc4_if_name]) + redirect_dst_ports.append(vrf_intf_ports['Vrf2'][pc_vrf2_if_name]) self.c_vars['src_ports'] = src_ports self.c_vars['dst_ports'] = dst_ports @@ -980,7 +996,6 @@ class TestVrfWarmReboot(): def setup_vrf_warm_reboot(self, ptfhost, tbinfo): # -------- Setup ---------- gen_vrf_fib_file('Vrf1', tbinfo, ptfhost, - dst_intfs=['PortChannel0001', 'PortChannel0002'], render_file='/tmp/vrf1_fib.txt', limited_podset_number=50, limited_tor_number=16) @@ -1032,11 +1047,11 @@ def test_vrf_swss_warm_reboot(self, duthosts, rand_one_dut_hostname, cfg_facts, "Some components didn't finish reconcile: {} ...".format(tbd_comp_list) # basic check after warm reboot - assert wait_until(300, 20, duthost.critical_services_fully_started), \ + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), \ "All critical services should fully started!{}".format(duthost.critical_services) up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ] - assert wait_until(300, 20, check_interface_status, duthost, up_ports), \ + assert wait_until(300, 20, 0, check_interface_status, duthost, up_ports), \ "All interfaces should be up!" def test_vrf_system_warm_reboot(self, duthosts, rand_one_dut_hostname, localhost, cfg_facts, partial_ptf_runner): @@ -1075,10 +1090,10 @@ def test_vrf_system_warm_reboot(self, duthosts, rand_one_dut_hostname, localhost assert len(tbd_comp_list) == 0, "Some components didn't finish reconcile: {} ...".format(tbd_comp_list) # basic check after warm reboot - assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" + assert wait_until(300, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started" up_ports = [p for p, v in cfg_facts['PORT'].items() if v.get('admin_status', None) == 'up' ] - assert wait_until(300, 20, check_interface_status, duthost, up_ports), "Not all interfaces are up" + assert wait_until(300, 20, 0, check_interface_status, duthost, up_ports), "Not all interfaces are up" class TestVrfCapacity(): @@ -1316,7 +1331,7 @@ def setup_vrf_unbindintf(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, # -------- Teardown ---------- if self.c_vars['rebind_intf']: self.rebind_intf(duthost) - wait_until(120, 10, check_bgp_facts, duthost, cfg_facts) + wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts) def rebind_intf(self, duthost): duthost.shell("config interface vrf bind PortChannel0001 Vrf1") @@ -1331,7 +1346,7 @@ def setup_vrf_rebind_intf(self, duthosts, rand_one_dut_hostname, cfg_facts): self.c_vars['rebind_intf'] = False # Mark to skip rebind interface during teardown # check bgp session state after rebind - assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \ + assert wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts), \ "Bgp sessions should be re-estabalished after Portchannel0001 rebind to Vrf" def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname): @@ -1426,7 +1441,6 @@ def test_pc1_neigh_after_rebind(self, partial_ptf_runner): @pytest.mark.usefixtures('setup_vrf_rebind_intf') def test_vrf1_fib_after_rebind(self, ptfhost, tbinfo, partial_ptf_runner): gen_vrf_fib_file('Vrf1', tbinfo, ptfhost, - dst_intfs=['PortChannel0001', 'PortChannel0002'], render_file='/tmp/rebindvrf_vrf1_fib.txt') partial_ptf_runner( @@ -1453,11 +1467,9 @@ def setup_vrf_deletion(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, c duthost = duthosts[rand_one_dut_hostname] # -------- Setup ---------- gen_vrf_fib_file('Vrf1', tbinfo, ptfhost, - dst_intfs=['PortChannel0001', 'PortChannel0002'], render_file="/tmp/vrf1_fib.txt") gen_vrf_fib_file('Vrf2', tbinfo, ptfhost, - dst_intfs=['PortChannel0003', 'PortChannel0004'], render_file="/tmp/vrf2_fib.txt") gen_vrf_neigh_file('Vrf1', ptfhost, render_file="/tmp/vrf1_neigh.txt") @@ -1472,7 +1484,7 @@ def setup_vrf_deletion(self, duthosts, rand_one_dut_hostname, ptfhost, tbinfo, c # -------- Teardown ---------- if self.c_vars['restore_vrf']: self.restore_vrf(duthost) - wait_until(120, 10, check_bgp_facts, duthost, cfg_facts) + wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts) @pytest.fixture(scope='class') def setup_vrf_restore(self, duthosts, rand_one_dut_hostname, cfg_facts): @@ -1481,7 +1493,7 @@ def setup_vrf_restore(self, duthosts, rand_one_dut_hostname, cfg_facts): self.c_vars['restore_vrf'] = False # Mark to skip restore vrf during teardown # check bgp session state after restore - assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \ + assert wait_until(120, 10, 0, check_bgp_facts, duthost, cfg_facts), \ "Bgp sessions should be re-estabalished after restore Vrf1" def test_pc1_ip_addr_flushed(self, duthosts, rand_one_dut_hostname): diff --git a/tests/vrf/test_vrf_attr.py b/tests/vrf/test_vrf_attr.py index 12d19130b1..1671a183b3 100644 --- a/tests/vrf/test_vrf_attr.py +++ b/tests/vrf/test_vrf_attr.py @@ -11,7 +11,9 @@ from test_vrf import PTF_TEST_PORT_MAP from tests.ptf_runner import ptf_runner -from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # lgtm[py/unused-import] + pytestmark = [ pytest.mark.topology('t0') diff --git a/tests/vrf/vrf_config_db.j2 b/tests/vrf/vrf_config_db.j2 index 0caa1cdd31..a8abe650ab 100644 --- a/tests/vrf/vrf_config_db.j2 +++ b/tests/vrf/vrf_config_db.j2 @@ -3,10 +3,19 @@ {% if k == 'BGP_NEIGHBOR' %} "BGP_NEIGHBOR": { {% for neigh in cfg_t0['BGP_NEIGHBOR'] | sort %} -{% if cfg_t0['BGP_NEIGHBOR'][neigh]['name'] in ['ARISTA01T1', 'ARISTA02T1'] %} +{# to detect number of pcs, used multiplier 2, because each neigh have ipv4 and ipv6 key #} +{% if cfg_t0['BGP_NEIGHBOR'] | length == 8 %} +{% if cfg_t0['BGP_NEIGHBOR'][neigh]['name'] in ['ARISTA01T1', 'ARISTA02T1'] %} "Vrf1|{{ neigh }}": {{ cfg_t0['BGP_NEIGHBOR'][neigh] | to_nice_json | indent(width=8) }} +{%- else %} + "Vrf2|{{ neigh }}": {{ cfg_t0['BGP_NEIGHBOR'][neigh] | to_nice_json | indent(width=8) }} +{%- endif %} {%- else %} +{% if cfg_t0['BGP_NEIGHBOR'][neigh]['name'] in ['ARISTA01T1', 'ARISTA02T1', 'ARISTA03T1', 'ARISTA04T1'] %} + "Vrf1|{{ neigh }}": {{ cfg_t0['BGP_NEIGHBOR'][neigh] | to_nice_json | indent(width=8) }} +{%- else %} "Vrf2|{{ neigh }}": {{ cfg_t0['BGP_NEIGHBOR'][neigh] | to_nice_json | indent(width=8) }} +{%- endif %} {%- endif %} {%- if not loop.last %},{%endif %} @@ -55,12 +64,23 @@ {% elif k == 'PORTCHANNEL_INTERFACE' %} "PORTCHANNEL_INTERFACE": { {% for pc in cfg_t0['PORTCHANNEL_INTERFACE'] | sort %} -{% if pc in ['PortChannel0001', 'PortChannel0002'] %} +{# each pc have 3 keys: pc, pc|ipv4 and pc|ipv6 #} +{% if cfg_t0['PORTCHANNEL_INTERFACE'] | length == 12 %} +{% if pc in ['PortChannel0001', 'PortChannel0002'] %} "{{ pc }}": {"vrf_name": "Vrf1"} -{%- elif pc in ['PortChannel0003', 'PortChannel0004'] %} +{%- elif pc in ['PortChannel0003', 'PortChannel0004'] %} "{{ pc }}": {"vrf_name": "Vrf2"} +{%- else %} + "{{ pc }}": {{ cfg_t0['PORTCHANNEL_INTERFACE'][pc] }} +{%- endif %} {%- else %} +{% if pc in ['PortChannel0001', 'PortChannel0002', 'PortChannel0003', 'PortChannel0004'] %} + "{{ pc }}": {"vrf_name": "Vrf1"} +{%- elif pc in ['PortChannel0005', 'PortChannel0006', 'PortChannel0007', 'PortChannel0008'] %} + "{{ pc }}": {"vrf_name": "Vrf2"} +{%- else %} "{{ pc }}": {{ cfg_t0['PORTCHANNEL_INTERFACE'][pc] }} +{%- endif %} {%- endif %} {%- if not loop.last %},{% endif %} diff --git a/tests/vrf/vrf_fib.j2 b/tests/vrf/vrf_fib.j2 index 5411555db9..67d76357ad 100644 --- a/tests/vrf/vrf_fib.j2 +++ b/tests/vrf/vrf_fib.j2 @@ -5,7 +5,7 @@ {%- endmacro %} {# defualt route#} -{% if testbed_type == 't0' or testbed_type == 't0-64' %} +{% if testbed_type == 't0' or testbed_type == 't0-64' or testbed_type == 't0-56'%} 0.0.0.0/0 {{ gen_dst_ports(dst_intfs) }} ::/0 {{ gen_dst_ports(dst_intfs) }} diff --git a/tests/vxlan/test_vnet_route_leak.py b/tests/vxlan/test_vnet_route_leak.py index d72ebaace8..c4bf63d877 100644 --- a/tests/vxlan/test_vnet_route_leak.py +++ b/tests/vxlan/test_vnet_route_leak.py @@ -73,7 +73,7 @@ def configure_dut(minigraph_facts, duthosts, rand_one_dut_hostname, vnet_config, logger.info("Restarting BGP and waiting for BGP sessions") duthost.shell(RESTART_BGP_CMD) - if not wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected, duthost): + if not wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0, bgp_connected, duthost): logger.warning("BGP sessions not up {} seconds after BGP restart, restoring with `config_reload`".format(BGP_WAIT_TIMEOUT)) config_reload(duthost) else: @@ -189,7 +189,7 @@ def test_vnet_route_leak(configure_dut, duthosts, rand_one_dut_hostname): logger.info("Restarting BGP") duthost.shell(RESTART_BGP_CMD) - pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT)) + pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0, bgp_connected, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT)) leaked_routes = get_leaked_routes(duthost) pytest_assert(not leaked_routes, LEAKED_ROUTES_TEMPLATE.format(leaked_routes)) @@ -198,7 +198,7 @@ def test_vnet_route_leak(configure_dut, duthosts, rand_one_dut_hostname): duthost.shell(CONFIG_SAVE_CMD) config_reload(duthost) - pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, bgp_connected, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT)) + pytest_assert(wait_until(BGP_WAIT_TIMEOUT, BGP_POLL_RATE, 0, bgp_connected, duthost), BGP_ERROR_TEMPLATE.format(BGP_WAIT_TIMEOUT)) leaked_routes = get_leaked_routes(duthost) pytest_assert(not leaked_routes, LEAKED_ROUTES_TEMPLATE.format(leaked_routes))