From c14ff80337d1c02c78ef2b32dce62b9d02ad0248 Mon Sep 17 00:00:00 2001 From: noaOrMlnx <58519608+noaOrMlnx@users.noreply.github.com> Date: Tue, 19 May 2020 18:47:14 +0300 Subject: [PATCH] Merge Azure/master to local fork (#8) * Convert dip_sip ansible test to pytest (#1488) * Convert dip_sip ansible test to pytest * Fix Review Comments: - Add ansible wrapper to dip_sip.yml - Delete unneccessary files Signed-off-by: Noa Or * Fix review comments Check if topology is lag group by minigraph facts. * Update test_dip_sip.py * [pmon daemon check] refactoring pmon daemon state check code (#1537) * [daemon utils] move platform/check_daemon_status.py to common/platform/daemon_utils.py Signed-off-by: Ying Xie * [pmon daemon] refactoring pmon daemon state checking - Refactor get_pmon_daemon_list to get_pmon_daemon_states: returning list of daemon states. not returning daemons that are known short lived. not returning daemons disabled by configuration. - Refactor daemon state check to check the returned states. Signed-off-by: Ying Xie * Fix test_techsupport.py so it could run with T1 topology (#1538) * Fix test_techsupport.py so it could run with T1 topology * [lgtm] Configure LGTM to analyze test code (#1544) Signed-off-by: Danny Allen * [tests] Fix LGTM errors in tests folder (#1545) Signed-off-by: Danny Allen * Convert vxlan-decap testing to pytest (#1518) Signed-off-by: Xin Wang Co-authored-by: Xin Wang * Add the APC PSU support for SNMP PSU controller (#1461) Add APC SNMP MIB for PDU control PORT_NAME_BASE_OID = ".1.3.6.1.4.1.318.1.1.4.4.2.1.4" PORT_STATUS_BASE_OID = ".1.3.6.1.4.1.318.1.1.12.3.5.1.1.4" PORT_CONTROL_BASE_OID = ".1.3.6.1.4.1.318.1.1.12.3.3.1.1.4" How did you verify/test it? {username}@{sonic-mgmt-docker}:/var/sonic-mgmt-int/tests/common/plugins/psu_controller$ python Python 2.7.12 (default, Oct 8 2019, 14:14:10) [GCC 5.4.0 20160609] on linux2 Type "help", "copyright", "credits" or "license" for more information. from snmp_psu_controllers import * psucntl = get_psu_controller('',"") psucntl.get_psu_status() [{'psu_id': 0, 'psu_on': True}] psucntl.turn_off_psu('0') True psucntl.get_psu_status() [{'psu_id': 0, 'psu_on': False}] psucntl.turn_on_psu('0') True psucntl.get_psu_status() [{'psu_id': 0, 'psu_on': True}] print psucntl.psuType APC * [Mellanox] Fix issues for thermal control test cases (#1524) * [thermal fix] 1. should not mock PSU fan speed less than 100; 2. wait 5 seconds after turning on PSU * Add some check for test case test_thermal_control_psu_absence * [pytest/hash] add hash key ingress-port test (#1509) 1. Iterate once for every port when hash key is ingress port. 2. In loose mode, do not need to enter the check_balancing function. * [advanced reboot] Add Paramiko module for device connection (#1542) * [advanced reboot] Add Paramiko module for device connection Parmiko module provides fallback mechanism to using username/password This is required if we are rebooting into new image using advanced reboot test fixture. signed-off-by: Tamer Ahmed * [pytest] Port advanced reboot based test cases (sad path) (#1543) * [pytest] Port advanced reboot based test cases (sad path) This patch ports remaining sad path advanced reboot test cases to pytest infra. signed-off-by: Tamer Ahmed * Update the importing of conn_graph_facts after platform_fixtures.py is removed (#1547) PR #1492 copied the implementation of conn_graph_facts in tests/platform/platform_fixtures.py to tests/common/fixtures/conn_graph_facts.py. Then PR #1503 removed file tests/platform/platform_fixtures.py. But not all related importings were updated. This change is to update all the platform_fixtures related importings. Signed-off-by: Xin Wang Co-authored-by: Xin Wang * Create README.testbed.cEOS.md (#1549) * Configure and enable core uploader (#1522) * If core-storage secret key is available, add to /etc/sonic/core_analyzer.rc.json and enable & start core_uploader service If https_proxy is provided, update /etc/sonic/core_analyzer.rc.json. * Check the entire dict path before de-referencing. * Improved regex per comments. * Fixed syntax error. * Add a sample file for newly introduced ansible facts.wq * Removed a redundant empty line. Co-authored-by: Ubuntu * [tests/common/devices] Add FanoutHost (#1532) * [tests/common/devices] Add FanoutHost - Add FanoutHost which aggregate different fanout switch host - Add fanouthosts fixture shortcut to get fanouthost easily - Support select host instance by os type - Support get fanout host by both mgmtip and hostname Co-authored-by: Jing Kan * [tacacs]: replace shell command with service module (#1550) Signed-off-by: Guohan Lu * [tests/lag]: Convert lag_fallback.yml to pytest (#1526) * [tests/lag]: Convert lag_fallback.yml to pytest - Convert lag_fallback.yml to pytest - Reuse int shutdown mehod to shut a member of lag - Use EOSHost class method shutdown/no_shutdown - Add post check for EOSHost shutdown/no_shutdown - Fix typos Co-authored-by: Jing Kan * [pytest] Fix get_asic_type method and logging (#1557) * [pytest] Fix get_asic_type method and logging The get_asic_type method dereferences dut which is not a member of the the class. the dut is an instance of SonicHost class and so should reference self. signed-off-by: Tamer Ahmed * Added iptables rules to make sure BGP don't ack back to BGP peer syn messages (#1561) * Added iptables rules to make sure BGP don't ack to peer messages. Otherwise this TX packets can make egress buffer accounting off Signed-off-by: Abhishek Dosi * Review Comments Address * [pytest/hash] Generate available send packet ports instead of hard coding src_ports in hash_test.py. (#1511) Generate available send packet ports instead of hard coding src_ports in hash_test.py. * [fanout switch] build fanout switch list from device_conn map (#1566) * [fanout switch] build fanout switch list from device_conn map - Build fanout switch from connection map. - Build fanout switch DUT port map for tests. - Build credential dict according to the DUt's inventory. - Set fanout switch password with right variable name. - Return command outpus. Signed-off-by: Ying Xie * Fix an indentation * add comment for the os type * Stop arp_update before crm test to prevent DUT learning mac address unexpectly, which caused the fdb test fail. (#1559) * Stop arp_update before crm test to prevent DUT learning mac address unexpectly, which caused the fdb test fail. * [pytest] Add replace fast-reboot script test option (#1563) Adding an option to replace fast reboot script on dut as part of testbed preparation. signed-off-by: Tamer Ahmed * [pytest/creds]: load groups vars into creds for the dut (#1575) Signed-off-by: Guohan Lu * [pytest/common/devices] Remove immediately checking of shut/no_shut (#1576) - The check can be put in a wait_unitl if user want Co-authored-by: Blueve * SPyTest documentation initial version (#1565) Co-authored-by: Rama Sasthri, Kristipati * [tests/conftest] Read os_type from ansible inventory vars to construct FanoutHost (#1577) - Read os var from ansible inventory manager Co-authored-by: Jing Kan * [link flap] add link flap pytest (#1573) [link flap] add link flap pytest * Use show interface status to obtain operational status of interfaces. * Filter out operational down interfaces from test list. * Address show_interface.py issue with single interface output. Signed-off-by: Ying Xie * Fix sensor data for 3700 (#1568) * [Mellanox] Fix issue: remove non-exist sensor sysfs files (#1567) * [pytest/snmp_lldp]: exclude mgmt interface from active interface list (#1564) align the criteria for checking lldpRemManAddrTable, mgmt interface 'eth0' is ruled out from the lldp neighbor list. * [pytest/features] Add test for show features command (#1546) * [pytest assersion] introduce assertion handling code (#1592) Signed-off-by: Ying Xie * [advanced-reboot] Refactor prepare ssh keys into ssh_utils (#1571) The ssh keys preparation is also required by wr_arp test suite, and so putting it into common utils file for sharing signed-off-by: Tamer Ahmed * [tests/pc] Use test_po_update.py in config.yml directly (#1589) * [tests/pc] Use test_po_update.py in config.yml directly - Replace origin logic of config.yml with test_po_update.py - Refine the test_po_udpate test with recommand fixture usage - Remove useless import - Remove useless var Co-authored-by: Blueve * [pytest] Convert control plane assisted warm-reboot test (wr_arp) (#1572) * [pytest] Convert control plane assisted warm-reboot test (wr_arp) This PR converts Ansible control assisted warm-reboot (wr_arp) test case to pytest. signed-off-by: Tamer Ahmed * [recovery] introduce adaptive recover method (#1583) * [recovery] introduce adaptive recover method - For interfaces down failure, try to bring up the interfaces. - For service down failure, reload config to bring them back up. - For all other failures, reboot to recover. - Make adaptive the default recover method. - reboot dut to recover if database service is down - Only allow cover service to override None action Signed-off-by: Ying Xie * [tests/lag_2] Refactor test_lag_2.py (#1582) * [tests/lag_2] Refactor test_lag_2.py - Refine the code structure by using class to share a static context - Remove most useless code and defines - Rename some vars - Use fanouthosts fixture for single_lag test - Fix existing LGMT alarm - Use wait_until to check if a int has been shut - Fix pytest warnings regarding Test case detector Co-authored-by: Jing Kan * [ansible] Fix symlink to ferret.conf.j2 (#1595) Template file was moved to arp/files/ferret.conf.j2 and symlink update was missed signed-off-by: Tamer Ahmed * [test/procdockerstatsd] Verify daemon is active/running and uploads data successfully (#1548) * [pfc_storm_icos.j2]: jinja2 template to start PFC storm on ICOS. (#1590) [pfc_storm_stop_icos.j2]: jinja2 template to stop PFC storm on ICOS. Note: Jinja2 considers physical to logical mapping of interfaces as per ICOS user manual. fpti1_0_1 0/1 fpti1_0_2 0/2 fpti1_0_3 0/3 Co-authored-by: Praveen Chaudhary * [pytest/hash] Add hash keys: 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id'. (#1512) * Revise according to the following review comments: 1. Not enable src-mac, dst-mac and vlan-id hash keys by default. 2. Get all untag vlan ports move to a separate function. 3. setup vlans for hash_key vlan-id test when vlan-id in hash_keys. * [pytest/default_route_check]: check various aspect related to default route learnt from bgp (#1598) - check if default route has correct set src address - check if ipv6 default route use global nexthop or not Signed-off-by: Guohan Lu * Fix below issue: (#1596) add topo is giving error as part of VEOS task TASK [eos : Get VM front panel interface number] with error FAILED! => {"msg": "Incorrect sudo password"} Instead of using ansible_sudo_password make it ansible_become_password based on below thread. https://github.com/ansible/ansible/issues/62042 * [tests/lag_2] Use test_lag_2.py in lag_2.yml directly (#1602) - Remove lag_fallback.yml - Remove single_lag_lacp_rate_test.yml - Remove single_lag_test.yml - Use pytest runner to run pytest lag_2 test in lag_2.yml Co-authored-by: Jing Kan * [tests/sensors] Convert sensors_check.yml to pytest (#1601) * [tests/sensors] Convert sensors_check.yml to pytest - Convert sensors_check.yml to test_sensors.py - Skip test if platform not supported - Replace origin playbook with pytest runner - Use pytest_assert helper instead of native assert to reduce noise - Refactor _platform_info method in SonicHost and expose it function as a getter Co-authored-by: JIng Kan * [tests/telemetry] Verify default config parameters (#1530) * [pytest/test_default_route]: support 4.9 kernel 201911/201811 release (#1600) Signed-off-by: Guohan Lu * [TestbedProcessing] Minor fixes to suite latest changes (#1604) * Remove witespases * Add ptf_pb_ip parsing * Fix testbed.csv generating * Use ansible_become_pass * [pytest]: reorg tests info subfolder (#1613) Signed-off-by: Guohan Lu * [sanity_checks]: add critical process check in sanity checks (#1617) - first read the critical process list (/etc/supervisord/critical_process) from the container, and then check if any of the process crashed. - add snmp container to the critical service list - add auto_recover support Signed-off-by: Guohan Lu * add comment Signed-off-by: Guohan Lu * add auto_recover support if process check failed * [tests/sensors] Move test_sensors.py to domain specific folder (#1616) Co-authored-by: Jing Kan * Fix sensors_check link (#1611) * [pytest]: add get_ip_route_info in SonicHost (#1618) get_ip_route_info returns ip route info in the kernel for a given destination ip refactor test_default_route to use the new method Signed-off-by: Guohan Lu * [Mellanox/platform]Compare CPU's temperature against critical threshold (#1585) * [loganalyzer] Fail flag support in context manager and callback execution (#1619) * Fail flag support in context manager and callback execution Signed-off-by: Neetha John * [Mellanox] add test cases for dynamic minimum fan speed and psu fan speed policy (#1552) * [pytest/nbrhost]: add neighbor config info in nbrhost class (#1621) * [vxlan-decap]: Flush ptf rx buffer before ptf send packet. (#1608) * add links to virtual switch testbed setup * [telemetry certs] deploy certs for telemetry in deploy-mg (#1614) * adding server and dsmsroot certs for telemetry * adding support for ptfhost copy certs * [pytest] Add support to populate DUT FDB entries (#1593) * [pytest] Add support to populate DUT FDB entries This new PTF plugin and test case that populates DUT FDb entries. It generates n Packets and sends them to DUT vlan IP. The number of distinct MAC to distinct ip addresses is configurable. signed-off-by: Tamer Ahmed * Revert "[telemetry certs] deploy certs for telemetry in deploy-mg (#1614)" (#1628) This reverts commit 19e92b19c0c7610429402ef15b30288947f6ca13. * [Mellanox]Fix sensor data for 3700/3700c/3800 (#1627) * Ignore test in test_turn_on_off_psu_and_check_psustatus (#1606) * Fixed fixture scope conflict for DUT monitor (#1630) Signed-off-by: Yuriy Volynets * [test plans] Moving test plans from sonic-wiki repo to sonic-mgmt repo (#1631) Signed-off-by: Ying Xie * [pytest/bgp_gr_test]: add bgp graceful restart helper test (#1623) Signed-off-by: Guohan Lu * [Mellanox] Adjust test cases for fan led support (#1580) * Fixes in test_drop_counters.py (#1610) - Added more attempts to read and verify if expected drop counter was changed to the specific - value. It was observed delays in counter refreshing, so this fix was added. - Fixed 'test_not_expected_vlan_tag_drop', removed vlan 1, because port channels are members of vlan 1. - Skipped 'test_loopback_filter', added description. Signed-off-by: Yuriy Volynets * [swap_syncd] Fix failures in swap syncd (#1632) Signed-off-by: Danny Allen * [pytest]: fix typo in parse default route output (#1634) * Update README.md * Update README.md * [pytest/ansible] Add support for multi-duts (#1432) * [pytest/ansible] Add support for multi-duts * review comment: use duts instead of dut * Fixing various test cases to use duthost instead of testbed_devices['dut'] * trim spaces if any when present in the duts list signed-off-by: Tamer Ahmed * Pytest organization proposal (#1605) * Pytest organization proposal Signed-off-by: Neetha John * [test/dir_bcast] Convert dir_bcast.yml to pytest (#1609) * [test/dir_bcast] Convert dir_bcast.yml to pytest * Force refresh router before testing and move to ipfwd folder * Fix LGTM alerts Co-authored-by: Jing Kan * [copp] Convert COPP test to pytest (#1633) Signed-off-by: Danny Allen * [doc]: Update document to specify non-empty password file (#1638) ansible 2.8 expects a non-empty file * Revert "[copp] Convert COPP test to pytest (#1633)" (#1643) This reverts commit 9a8fc1ed62666f46b8f23b5587860799ace13704. * [pytest] Fix ptf_runner issue introduced by COPP test conversion (#1644) * [copp] Convert COPP test to pytest (#1633) * Fix missing space in ptf_runner Signed-off-by: Danny Allen * [fast-reboot]: Fix IP range overlapping. (#1637) Signed-off-by: Nazarii Hnydyn * Add support of specifying multiple inventory files (#1645) The pytest-ansible plugin always assumes that the value of cli argument '--inventory' is a single inventory file. With this enhancement, we can pass in multiple inventory files using the cli argument '--inventory'. The multiple inventory files can be separated by comma ','. For example: pytest --inventory "inventory1, inventory2" pytest --inventory inventory1,inventory2 Signed-off-by: Xin Wang * [pytest] Fix module import issue when running whole test suite (#1642) * [pytest] Fix module import issue when running whole test suite When having multiple conftest, dir leading to this conftest has to be Python package (presence of __init__.py.) Also, adding pytest basedir to conftest.py signed-off-by: Tamer Ahmed * [advanced-reboot] Fix IP range overalapping (#1653) This code ports PR 1637 to advanced reboot. ported-pr: https://github.com/Azure/sonic-mgmt/pull/1637 signed-off-by: Tamer Ahmed * Deprecate ansible_host.py (#1658) * Deprecate ansible_host.py The functionalities in ansible_host.py , including the AnsibleHost class and some exceptions, have been implemented in the common library. This file can be deprecated in case people new to sonic-mgmt waste time on old libraries. Changes: 1. Remove ansible_host.py 2. Replace all the AnsibleHost related calls with more appropriate fixtures or functions. 3. Remove duplicated localhost fixture definition Signed-off-by: Xin Wang * [Mellanox] Fix sensor data for SN4700 (#1660) * Fix for SN4700 sensors Signed-off-by: Shlomi Bitton * Edit sensors file with correct labels * [drop counters] Improve support for combined L2/L3 drop counters (#1649) - Get the correct interface for combined L2/L3 counters - Add Arista and Dell SKUs to the combined counter list Signed-off-by: Danny Allen * [pytest/ntp] Use local time when behind proxy (#1640) When testbed is behind a proxy then NTP inside the ptf container cant synchronize with public NTP servers and test hung on ntpd -gq command * [recover] improve adaptive recover methods (#1652) - When process is missing, reload_config. - When port channle or vlan link is down, reload_config. - redirect output of config reload and load minigraph to /dev/null. Signed-off-by: Ying Xie * [sensors] Added support of Montara nad Maverics platforms to sku-sensors-data.yml (#1646) Signed-off-by: Vitaliy Senchyshyn * [pytest] PFCWD config test (#1620) * Pytest PFCWD config test Signed-off-by: Neetha John * Address LGTM Signed-off-by: Neetha John * Rename marker to syslog_marker Signed-off-by: Neetha John * Separate out verbose comments Signed-off-by: Neetha John * [advanced-reboot] Fix testing hang when doing BGP shutdown on Arista VM (#1579) Fix warm-reboot-sad-bgp testing hang after vEOS upgraded to 4.20.15M. It pops a confirm message when doing BGP shutdown. * [file organization] Rename folder names to allow markers to work again (#1671) * Revert "[pytest] Fix module import issue when running whole test suite (#1642)" This reverts commit f88cff271a1ab60527a36cbfe22d8c70f3156c31. * [file organization] Rename folder names to allow markers to work again pytest has an issue dealing with sub-folders with the same name and being included from. In this case, the problem was with several components has 'args' as sub-folder to hold argument parsing utilities. This issue was initially caught when we were tryign to run the whole test suite. Attemp was made to address this issue. However there are more in pytest that cannot tolerate same name sub-folders. Change 'args' to '_args' and change conftest.py accordingly. Signed-off-by: Ying Xie * Rename 'platform' directory to 'platform_tests' to prevent conflicts (#1662) * Deprecate fixture testbed_devices (#1665) The testbed_devices fixture is not a good design. Multiple testbed device objects are initialized in this fixture. It is not common that a test script needs all the devices. This fixture may cause unnecessary overhead. It would be better for test scripts to use the fixtures for different devices on needed basis. Changes: 1. Remove the definition of fixture testbed_devices. 2. Replace the call to testbed_devices fixture with other fixtures that fit better. Signed-off-by: Xin Wang * [Mellanox] Fix sensor data for 2010 (#1673) * Update Th buffer params for Alpha change (#1676) Signed-off-by: Neetha John * [QoS] Fix qos issues (#1664) Fix some issues found during QoS test. - Support ARP populate for ptf topo - Add debug info in WRRtest for the purpose of providing detail result in case of failure. Co-authored-by: Ying Xie Co-authored-by: Danny Allen Co-authored-by: Xin Wang Co-authored-by: Xin Wang Co-authored-by: Sujin Kang Co-authored-by: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Co-authored-by: William-zx <47626856+William-zx@users.noreply.github.com> Co-authored-by: Tamer Ahmed Co-authored-by: lguohan Co-authored-by: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Co-authored-by: Ubuntu Co-authored-by: Blueve <672454911@qq.com> Co-authored-by: abdosi <58047199+abdosi@users.noreply.github.com> Co-authored-by: Iris Hsu Co-authored-by: Blueve Co-authored-by: Rama Sasthri, Kristipati Co-authored-by: Rama Sasthri, Kristipati Co-authored-by: Kebo Liu Co-authored-by: pra-moh <49077256+pra-moh@users.noreply.github.com> Co-authored-by: Praveen Chaudhary Co-authored-by: Praveen Chaudhary Co-authored-by: Myron Sosyak <49795530+msosyak@users.noreply.github.com> Co-authored-by: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Co-authored-by: Neetha John Co-authored-by: yvolynets-mlnx <50697593+yvolynets-mlnx@users.noreply.github.com> Co-authored-by: Xin Liu Co-authored-by: Mahesh Maddikayala <10645050+smaheshm@users.noreply.github.com> Co-authored-by: Nazarii Hnydyn Co-authored-by: Xin Wang Co-authored-by: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Co-authored-by: Vitaliy Senchyshyn <43479243+vsenchyshyn@users.noreply.github.com> Co-authored-by: Vincent Chiang <47546216+vincentchiang-ec@users.noreply.github.com> Co-authored-by: Joe LeVeque --- README.md | 3 +- ansible/README.testbed.md | 1 + ansible/TestbedProcessing.py | 50 +- ansible/config_sonic_basedon_testbed.yml | 50 +- ansible/doc/README.testbed.VsSetup.md | 2 +- ansible/doc/README.testbed.cEOS.md | 131 +++++ ansible/group_vars/all/corefile_uploader.yml | 7 + ansible/group_vars/sonic/sku-sensors-data.yml | 512 ++++++++++------ ansible/group_vars/vm_host/creds.yml | 3 +- ansible/library/show_interface.py | 2 +- ansible/library/test_facts.py | 6 +- .../sonic-common/tasks/sensors_check.yml | 27 +- ansible/roles/test/files/helpers/ferret.py | 336 +---------- .../test/files/ptftests/advanced-reboot.py | 16 +- ansible/roles/test/files/ptftests/arista.py | 21 +- .../test/files/ptftests/device_connection.py | 63 ++ ansible/roles/test/files/ptftests/dip_sip.py | 199 ------- .../roles/test/files/ptftests/hash_test.py | 82 ++- .../roles/test/files/ptftests/populate_fdb.py | 176 ++++++ ansible/roles/test/files/ptftests/sad_path.py | 49 +- .../roles/test/files/ptftests/vxlan-decap.py | 4 + .../test/files/saitests/sai_qos_tests.py | 28 +- ansible/roles/test/tasks/bgp_speaker.yml | 2 +- ansible/roles/test/tasks/config.yml | 142 +---- .../test/tasks/crm/crm_test_fdb_entry.yml | 9 + ansible/roles/test/tasks/dhcp_relay.yml | 2 +- ansible/roles/test/tasks/dip_sip.yml | 133 +---- ansible/roles/test/tasks/everflow_testbed.yml | 2 +- ansible/roles/test/tasks/lag_2.yml | 82 +-- ansible/roles/test/tasks/lag_fallback.yml | 95 --- ansible/roles/test/tasks/lldp.yml | 2 +- ansible/roles/test/tasks/mtu.yml | 2 +- .../roles/test/tasks/ptf_runner_reboot.yml | 5 +- ansible/roles/test/tasks/qos_sai.yml | 45 +- .../test/tasks/single_lag_lacp_rate_test.yml | 99 ---- ansible/roles/test/tasks/single_lag_test.yml | 56 -- ansible/roles/test/tasks/sonic.yml | 7 +- ansible/roles/test/tasks/vxlan-decap.yml | 119 +--- ansible/roles/test/templates/ferret.conf.j2 | 11 +- .../roles/test/templates/pfc_storm_icos.j2 | 5 + .../test/templates/pfc_storm_stop_icos.j2 | 5 + .../test/templates/vxlan_db.maps.json.j2 | 9 - .../test/templates/vxlan_db.tunnel.json.j2 | 8 - .../roles/test/templates/vxlan_decap.json.j2 | 9 - ansible/testbed-cli.sh | 77 +-- ansible/vars/qos.yml | 78 +-- ansible/vtestbed.csv | 6 +- docs/ACL-test-plan.md | 426 ++++++++++++++ docs/BGP-GR-helper-mode-test-plan.md | 90 +++ docs/BGP-MP-test-plan.md | 55 ++ docs/CRM-test-plan.md | 221 +++++++ docs/Everflow-test-plan.md | 351 +++++++++++ docs/IPv4-Decapsulation-test.md | 187 ++++++ ...CN-WRED-configuration-utility-test-plan.md | 137 +++++ docs/VLAN-trunk-test-plan.md | 260 +++++++++ lgtm.yml | 7 + spytest/Doc/arch.jpg | Bin 0 -> 38295 bytes spytest/Doc/install.md | 56 ++ spytest/Doc/intro.md | 525 +++++++++++++++++ spytest/Doc/ptf.jpg | Bin 0 -> 53676 bytes spytest/Doc/scapy.gif | Bin 0 -> 60829 bytes spytest/Doc/tgen.jpg | Bin 0 -> 25549 bytes spytest/Doc/topo.png | Bin 0 -> 18747 bytes tests/acl/test_acl.py | 3 +- tests/ansible_host.py | 43 -- .../args => arp/arp_args}/__init__.py | 0 tests/arp/arp_args/wr_arp_args.py | 19 + tests/arp/conftest.py | 14 + tests/arp/files/ferret.conf.j2 | 10 + tests/arp/files/ferret.py | 335 +++++++++++ tests/arp/test_wr_arp.py | 216 +++++++ tests/bgp/conftest.py | 58 ++ tests/{ => bgp}/test_bgp_fact.py | 9 +- tests/bgp/test_bgp_gr_helper.py | 103 ++++ tests/{ => bgp}/test_bgp_speaker.py | 0 tests/common/devices.py | 391 ++++++++++++- tests/common/errors.py | 22 +- tests/common/fixtures/advanced_reboot.py | 93 +-- tests/common/fixtures/conn_graph_facts.py | 24 +- tests/common/fixtures/populate_fdb.py | 128 ++++ tests/common/helpers/assertions.py | 6 + tests/common/platform/daemon_utils.py | 23 + tests/common/platform/device_utils.py | 23 + tests/common/platform/ssh_utils.py | 45 ++ tests/common/plugins/ansible_fixtures.py | 14 - .../plugins/dut_monitor/pytest_dut_monitor.py | 12 +- .../common/plugins/loganalyzer/loganalyzer.py | 15 +- .../psu_controller/snmp_psu_controllers.py | 213 ++++--- tests/common/plugins/ptfadapter/__init__.py | 1 - tests/common/plugins/sanity_check/README.md | 2 +- tests/common/plugins/sanity_check/__init__.py | 21 +- tests/common/plugins/sanity_check/checks.py | 44 ++ .../common/plugins/sanity_check/constants.py | 13 +- tests/common/plugins/sanity_check/recover.py | 79 ++- tests/common/plugins/tacacs.py | 8 +- tests/common/reboot.py | 6 +- tests/common/system_utils/docker.py | 115 ++-- tests/common/utilities.py | 2 +- tests/conftest.py | 136 +++-- tests/{fdb/conftest.py => copp/__init__.py} | 0 tests/copp/conftest.py | 31 + tests/copp/copp_utils.py | 118 ++++ tests/copp/scripts/update_copp_config.py | 53 ++ tests/copp/test_copp.py | 201 +++++++ tests/{ => dhcp_relay}/test_dhcp_relay.py | 0 tests/docs/pytest.org.md | 128 ++++ .../drop_counters/combined_drop_counters.yml | 6 +- .../fanout/mellanox/mellanox_fanout.py | 7 +- tests/drop_counters/test_drop_counters.py | 116 ++-- tests/{ => everflow}/test_everflow_testbed.py | 23 +- tests/fdb/test_fdb.py | 6 +- tests/fib/test_fib.py | 79 ++- tests/ipfwd/test_dip_sip.py | 166 ++++++ tests/ipfwd/test_dir_bcast.py | 39 ++ tests/{ => ipfwd}/test_mtu.py | 0 tests/{ => lldp}/test_lldp.py | 33 +- tests/ntp/test_ntp.py | 16 +- tests/pc/test_lag_2.py | 261 +++++++++ tests/pc/test_po_update.py | 44 +- tests/pfcwd/conftest.py | 75 +++ .../conftest.py => pfcwd/files/__init__.py} | 0 tests/pfcwd/files/pfcwd_helper.py | 237 ++++++++ .../templates/config_test_ignore_messages | 8 + tests/pfcwd/templates/pfc_config_params.json | 42 ++ tests/pfcwd/test_pfc_config.py | 257 ++++++++ tests/platform/check_daemon_status.py | 31 - tests/platform/test_advanced_reboot.py | 17 - .../api/conftest.py | 3 +- .../api/test_watchdog.py | 11 +- .../api/watchdog.yml | 0 .../broadcom}/conftest.py | 0 .../broadcom/files/ser_injector.py | 0 .../broadcom/test_ser.py | 0 .../check_all_interface_info.py | 0 .../check_critical_services.py | 0 .../check_interface_status.py | 0 .../check_transceiver_status.py | 0 .../{platform => platform_tests}/conftest.py | 6 +- .../files/getportmap.py | 0 .../files/invalid_format_policy.json | 0 .../files/invalid_value_policy.json | 3 + .../files/valid_policy.json | 3 + .../mellanox/check_hw_mgmt_service.py | 0 .../mellanox/check_sysfs.py | 18 +- tests/platform_tests/mellanox/conftest.py | 0 .../mellanox_thermal_control_test_helper.py | 116 +++- .../platform_tests/mellanox/minimum_table.py | 90 +++ .../mellanox/test_check_sfp_presence.py | 13 +- .../mellanox/test_check_sfp_using_ethtool.py | 15 +- .../mellanox/test_check_sysfs.py | 13 +- .../mellanox/test_hw_management_service.py | 5 +- .../mellanox/test_thermal_control.py | 137 +++++ .../platform_tests/platform_args/__init__.py | 0 .../platform_args}/advanced_reboot_args.py | 8 + tests/platform_tests/test_advanced_reboot.py | 203 +++++++ tests/platform_tests/test_link_flap.py | 81 +++ .../test_platform_info.py | 214 +++---- .../test_reboot.py | 56 +- .../test_reload_config.py | 19 +- tests/platform_tests/test_sensors.py | 21 + .../test_sequential_restart.py | 14 +- .../{platform => platform_tests}/test_sfp.py | 62 +- .../test_xcvr_info_in_db.py | 8 +- .../thermal_control_test_helper.py | 14 +- tests/ptf_runner.py | 26 +- tests/pytest.ini | 2 + tests/pytest.org.md | 128 ++++ tests/qos/qos_fixtures.py | 46 +- tests/qos/qos_helpers.py | 129 ++--- tests/qos/test_pfc_counters.py | 74 ++- tests/qos/test_pfc_pause.py | 257 ++++---- tests/route/test_default_route.py | 51 ++ tests/scripts/add_ip.sh | 8 + tests/scripts/fast-reboot | 547 ++++++++++++++++++ tests/scripts/remove_ip.sh | 11 +- tests/{ => sflow}/test_sflow.py | 11 +- tests/show_techsupport/test_techsupport.py | 18 +- tests/snmp/conftest.py | 5 +- tests/snmp/test_snmp_cpu.py | 25 +- tests/snmp/test_snmp_interfaces.py | 10 +- tests/snmp/test_snmp_lldp.py | 26 +- tests/snmp/test_snmp_pfc_counters.py | 10 +- tests/snmp/test_snmp_psu.py | 17 +- tests/snmp/test_snmp_queue.py | 11 +- tests/tacacs/test_ro_user.py | 3 +- tests/telemetry/test_telemetry.py | 62 ++ tests/templates/ptf_nn_agent.conf.dut.j2 | 10 + tests/templates/ptf_nn_agent.conf.ptf.j2 | 10 + tests/test_features.py | 34 ++ tests/test_interfaces.py | 12 +- tests/test_lag_2.py | 220 ------- tests/test_mgmtvrf.py | 78 ++- tests/test_nbr_health.py | 5 +- tests/test_procdockerstatsd.py | 28 + tests/test_vrf.py | 34 +- tests/testbed_setup/conftest.py | 15 + tests/testbed_setup/setup_args/__init__.py | 0 .../setup_args/populate_fdb_args.py | 35 ++ tests/testbed_setup/test_populate_fdb.py | 15 + tests/veos.vtb | 5 + tests/vlan/test_vlan.py | 29 +- tests/vtestbed.csv | 6 +- tests/vxlan/test_vxlan_decap.py | 149 +++++ 203 files changed, 9492 insertions(+), 3136 deletions(-) create mode 100644 ansible/doc/README.testbed.cEOS.md create mode 100644 ansible/group_vars/all/corefile_uploader.yml mode change 100644 => 120000 ansible/roles/test/files/helpers/ferret.py create mode 100644 ansible/roles/test/files/ptftests/device_connection.py delete mode 100644 ansible/roles/test/files/ptftests/dip_sip.py create mode 100644 ansible/roles/test/files/ptftests/populate_fdb.py delete mode 100644 ansible/roles/test/tasks/lag_fallback.yml delete mode 100644 ansible/roles/test/tasks/single_lag_lacp_rate_test.yml delete mode 100644 ansible/roles/test/tasks/single_lag_test.yml mode change 100644 => 120000 ansible/roles/test/templates/ferret.conf.j2 create mode 100644 ansible/roles/test/templates/pfc_storm_icos.j2 create mode 100644 ansible/roles/test/templates/pfc_storm_stop_icos.j2 delete mode 100644 ansible/roles/test/templates/vxlan_db.maps.json.j2 delete mode 100644 ansible/roles/test/templates/vxlan_db.tunnel.json.j2 delete mode 100644 ansible/roles/test/templates/vxlan_decap.json.j2 create mode 100644 docs/ACL-test-plan.md create mode 100644 docs/BGP-GR-helper-mode-test-plan.md create mode 100644 docs/BGP-MP-test-plan.md create mode 100644 docs/CRM-test-plan.md create mode 100644 docs/Everflow-test-plan.md create mode 100644 docs/IPv4-Decapsulation-test.md create mode 100644 docs/QoS-configuration-in-Config-DB.-ECN-WRED-configuration-utility-test-plan.md create mode 100644 docs/VLAN-trunk-test-plan.md create mode 100644 lgtm.yml create mode 100755 spytest/Doc/arch.jpg create mode 100755 spytest/Doc/install.md create mode 100755 spytest/Doc/intro.md create mode 100755 spytest/Doc/ptf.jpg create mode 100755 spytest/Doc/scapy.gif create mode 100755 spytest/Doc/tgen.jpg create mode 100755 spytest/Doc/topo.png delete mode 100644 tests/ansible_host.py rename tests/{platform/args => arp/arp_args}/__init__.py (100%) create mode 100644 tests/arp/arp_args/wr_arp_args.py create mode 100644 tests/arp/conftest.py create mode 100644 tests/arp/files/ferret.conf.j2 create mode 100644 tests/arp/files/ferret.py create mode 100644 tests/arp/test_wr_arp.py create mode 100644 tests/bgp/conftest.py rename tests/{ => bgp}/test_bgp_fact.py (69%) create mode 100644 tests/bgp/test_bgp_gr_helper.py rename tests/{ => bgp}/test_bgp_speaker.py (100%) create mode 100644 tests/common/fixtures/populate_fdb.py create mode 100644 tests/common/helpers/assertions.py create mode 100644 tests/common/platform/daemon_utils.py create mode 100644 tests/common/platform/device_utils.py create mode 100644 tests/common/platform/ssh_utils.py rename tests/{fdb/conftest.py => copp/__init__.py} (100%) create mode 100644 tests/copp/conftest.py create mode 100644 tests/copp/copp_utils.py create mode 100644 tests/copp/scripts/update_copp_config.py create mode 100644 tests/copp/test_copp.py rename tests/{ => dhcp_relay}/test_dhcp_relay.py (100%) create mode 100644 tests/docs/pytest.org.md rename tests/{ => everflow}/test_everflow_testbed.py (97%) create mode 100644 tests/ipfwd/test_dip_sip.py create mode 100644 tests/ipfwd/test_dir_bcast.py rename tests/{ => ipfwd}/test_mtu.py (100%) rename tests/{ => lldp}/test_lldp.py (71%) create mode 100644 tests/pc/test_lag_2.py create mode 100644 tests/pfcwd/conftest.py rename tests/{platform/broadcom/conftest.py => pfcwd/files/__init__.py} (100%) create mode 100644 tests/pfcwd/files/pfcwd_helper.py create mode 100644 tests/pfcwd/templates/config_test_ignore_messages create mode 100644 tests/pfcwd/templates/pfc_config_params.json create mode 100644 tests/pfcwd/test_pfc_config.py delete mode 100644 tests/platform/check_daemon_status.py delete mode 100644 tests/platform/test_advanced_reboot.py rename tests/{platform => platform_tests}/api/conftest.py (96%) rename tests/{platform => platform_tests}/api/test_watchdog.py (95%) rename tests/{platform => platform_tests}/api/watchdog.yml (100%) rename tests/{platform/mellanox => platform_tests/broadcom}/conftest.py (100%) rename tests/{platform => platform_tests}/broadcom/files/ser_injector.py (100%) rename tests/{platform => platform_tests}/broadcom/test_ser.py (100%) rename tests/{platform => platform_tests}/check_all_interface_info.py (100%) rename tests/{platform => platform_tests}/check_critical_services.py (100%) rename tests/{platform => platform_tests}/check_interface_status.py (100%) rename tests/{platform => platform_tests}/check_transceiver_status.py (100%) rename tests/{platform => platform_tests}/conftest.py (67%) rename tests/{platform => platform_tests}/files/getportmap.py (100%) rename tests/{platform => platform_tests}/files/invalid_format_policy.json (100%) rename tests/{platform => platform_tests}/files/invalid_value_policy.json (89%) rename tests/{platform => platform_tests}/files/valid_policy.json (96%) rename tests/{platform => platform_tests}/mellanox/check_hw_mgmt_service.py (100%) rename tests/{platform => platform_tests}/mellanox/check_sysfs.py (94%) create mode 100644 tests/platform_tests/mellanox/conftest.py rename tests/{platform => platform_tests}/mellanox/mellanox_thermal_control_test_helper.py (88%) create mode 100644 tests/platform_tests/mellanox/minimum_table.py rename tests/{platform => platform_tests}/mellanox/test_check_sfp_presence.py (76%) rename tests/{platform => platform_tests}/mellanox/test_check_sfp_using_ethtool.py (69%) rename tests/{platform => platform_tests}/mellanox/test_check_sysfs.py (61%) rename tests/{platform => platform_tests}/mellanox/test_hw_management_service.py (75%) create mode 100644 tests/platform_tests/mellanox/test_thermal_control.py create mode 100644 tests/platform_tests/platform_args/__init__.py rename tests/{platform/args => platform_tests/platform_args}/advanced_reboot_args.py (89%) create mode 100644 tests/platform_tests/test_advanced_reboot.py create mode 100644 tests/platform_tests/test_link_flap.py rename tests/{platform => platform_tests}/test_platform_info.py (70%) rename tests/{platform => platform_tests}/test_reboot.py (78%) rename tests/{platform => platform_tests}/test_reload_config.py (75%) create mode 100644 tests/platform_tests/test_sensors.py rename tests/{platform => platform_tests}/test_sequential_restart.py (80%) rename tests/{platform => platform_tests}/test_sfp.py (83%) rename tests/{platform => platform_tests}/test_xcvr_info_in_db.py (70%) rename tests/{platform => platform_tests}/thermal_control_test_helper.py (97%) create mode 100644 tests/pytest.org.md create mode 100644 tests/route/test_default_route.py create mode 100755 tests/scripts/add_ip.sh create mode 100755 tests/scripts/fast-reboot mode change 120000 => 100755 tests/scripts/remove_ip.sh rename tests/{ => sflow}/test_sflow.py (98%) create mode 100644 tests/telemetry/test_telemetry.py create mode 100644 tests/templates/ptf_nn_agent.conf.dut.j2 create mode 100644 tests/templates/ptf_nn_agent.conf.ptf.j2 create mode 100644 tests/test_features.py delete mode 100644 tests/test_lag_2.py create mode 100644 tests/test_procdockerstatsd.py create mode 100644 tests/testbed_setup/conftest.py create mode 100644 tests/testbed_setup/setup_args/__init__.py create mode 100644 tests/testbed_setup/setup_args/populate_fdb_args.py create mode 100644 tests/testbed_setup/test_populate_fdb.py create mode 100644 tests/vxlan/test_vxlan_decap.py diff --git a/README.md b/README.md index 61e8009fce..d6c2310ac3 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,10 @@ # Description Tools for managing, configuring and monitoring SONiC +# CII Best Practices +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3933/badge)](https://bestpractices.coreinfrastructure.org/projects/3933) # Contribution guide - All contributors must sign a contribution license agreement before contributions can be accepted. [How to become a contributer](https://github.com/Azure/SONiC/wiki/Becoming-a-contributor) diff --git a/ansible/README.testbed.md b/ansible/README.testbed.md index 6fa93a1e82..d2b030f280 100644 --- a/ansible/README.testbed.md +++ b/ansible/README.testbed.md @@ -2,6 +2,7 @@ - [Overview](doc/README.testbed.Overview.md) - [Setup](doc/README.testbed.Setup.md) + - [Virtual Switch Testbed Setup](doc/README.testbed.VsSetup.md) - [Topology](doc/README.testbed.Topology.md) - [Configuration](doc/README.testbed.Config.md) - [Minigraph](doc/README.testbed.Minigraph.md) diff --git a/ansible/TestbedProcessing.py b/ansible/TestbedProcessing.py index 204d1bf629..c43737b8cf 100644 --- a/ansible/TestbedProcessing.py +++ b/ansible/TestbedProcessing.py @@ -9,14 +9,14 @@ Requirement: python version: 2.X - python package: PyYAML 3.12 (or later) - + python package: PyYAML 3.12 (or later) + PyYaml Install Instructions: - [1] Download PyYAML from https://pyyaml.org/wiki/PyYAML + [1] Download PyYAML from https://pyyaml.org/wiki/PyYAML [2] Unpack the archive [3] Install the package by executing (python setup.py install) - [4] Test if installation was successful (python setup.py test) - + [4] Test if installation was successful (python setup.py test) + Usage: put TestbedProcessing.py and testbed.yaml under sonic-mgmt/ansible python TestbedProcessing.py @@ -25,12 +25,12 @@ Arguments: -i : the testbed.yaml file to parse -basedir : the basedir for the project - -backupdir : the backup directory for the files + -backupdir : the backup directory for the files Script Procedure - [1] Backup the files we will be copying + [1] Backup the files we will be copying [2] Load testbed.yaml into dictionaries for easy processing - [3] Generate the files via methods defined below + [3] Generate the files via methods defined below """ # ARGUMENTS TO PARSE @@ -86,7 +86,7 @@ """ represent_none(self, _) -modifies yaml to replace null values with blanks +modifies yaml to replace null values with blanks SOURCE: https://stackoverflow.com/questions/37200150/can-i-dump-blank-instead-of-null-in-yaml-pyyaml/37201633#3720163 """ def represent_none(self, _): @@ -98,7 +98,7 @@ def represent_none(self, _): generateDictionary(data, result, category) @:parameter data - the dictionary to iterate through @:parameter result - the resulting dictionary -Generates the dictionaries that are used when creating csv, yml, or text files +Generates the dictionaries that are used when creating csv, yml, or text files """ def generateDictionary(data, result, category): for key, value in data[category].items(): @@ -108,7 +108,7 @@ def generateDictionary(data, result, category): """ makeMain(data, outfile) @:parameter data - the dictionary to look through -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to makeMain generates the vm_host/main.yml file it pulls two sets of information; dictionary data and proxy data """ @@ -122,7 +122,9 @@ def makeMain(data, outfile): "skip_image_downloading": veos.get("skip_image_downloading"), "vm_console_base": veos.get("vm_console_base"), "memory": veos.get("memory"), - "max_fp_num": veos.get("max_fp_num") + "max_fp_num": veos.get("max_fp_num"), + "ptf_bp_ip": veos.get("ptf_bp_ip"), + "ptf_bp_ipv6": veos.get("ptf_bp_ipv6") } proxy = { "proxy_env": { @@ -141,21 +143,21 @@ def makeMain(data, outfile): @:parameter data - the dictionary to look for (in this case: veos) @:parameter outfile - the file to write to generates /group_vars/vm_host/creds.yml -pulls ansible_user, ansible_password, ansible_sudo_pass from vm_host_ansible into a dictionary +pulls ansible_user, ansible_password, ansible_become_pass from vm_host_ansible into a dictionary """ def makeVMHostCreds(data, outfile): veos = data result = { "ansible_user": veos.get("vm_host_ansible").get("ansible_user"), "ansible_password": veos.get("vm_host_ansible").get("ansible_password"), - "ansible_sudo_password": veos.get("vm_host_ansible").get("ansible_sudo_pass") + "ansible_become_pass": veos.get("vm_host_ansible").get("ansible_become_pass") } with open(outfile, "w") as toWrite: toWrite.write("---\n") yaml.dump(result, stream=toWrite, default_flow_style=False) """ -makeSonicLabDevices(data, outfile) +makeSonicLabDevices(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) @:parameter outfile - the file to write to generates files/sonic_lab_devices.csv by pulling hostname, managementIP, hwsku, and type @@ -190,14 +192,14 @@ def makeSonicLabDevices(data, outfile): """ -makeTestbed(data, outfile) +makeTestbed(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) @:parameter outfile - the file to write to generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, server, vm_base, dut, and comment error handling: checks if attribute values are None type or string "None" """ def makeTestbed(data, outfile): - csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,ptf,comment" + csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,server,vm_base,dut,comment" topology = data csv_file = outfile @@ -236,7 +238,7 @@ def makeTestbed(data, outfile): if not comment: comment = "" - row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf_ip + "," + server + "," + vm_base + "," + dut + "," + ptf + "," + comment + row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf + "," + ptf_ip + "," + server + "," + vm_base + "," + dut + "," + comment f.write(row + "\n") except IOError: print("I/O error: issue creating testbed.csv") @@ -245,9 +247,9 @@ def makeTestbed(data, outfile): """ makeSonicLabLinks(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to generates /files/sonic_lab_links.csv by pulling startPort, endPort, bandWidth, vlanID, vlanMode -error handling: checks if attribute values are None type or string "None" +error handling: checks if attribute values are None type or string "None" """ def makeSonicLabLinks(data, outfile): csv_columns = "StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode" @@ -308,7 +310,7 @@ def makeEOSCreds(data, outfile): """ makeFanout_secrets(data, outfile) @:parameter data - reads from devices dictionary -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to Makes /group_vars/fanout/secrets.yml Finds the fanout secret credentials by using "fanout" as the value to search for under device_type Under github and personal topology configuration, there is only one designated fanout switch credential @@ -428,7 +430,7 @@ def makeLab(data, devices, testbed, outfile): """ makeVeos(data, veos, devices, outfile) @:parameter data - reads from either veos-groups, this helps separate the function into 3 components; children, host, vars -@:parameter veos - reads from either veos +@:parameter veos - reads from either veos @:parameter devices - reads from devices @:parameter outfile - writes to veos """ @@ -487,8 +489,8 @@ def makeHostVar(data): """ updateDockerRegistry -@:parameter outfile - the file to write to -hard codes the docker registry to search locally rather than externally +@:parameter outfile - the file to write to +hard codes the docker registry to search locally rather than externally """ def updateDockerRegistry(docker_registry, outfile): if (not docker_registry.get("docker_registry_host")) or (not docker_registry.get("docker_registry_username")) or (not docker_registry.get("docker_registry_password")): diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index 0c41728bd9..099e5dfc0a 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -41,12 +41,17 @@ testbed_file: testbed.csv when: testbed_file is not defined + - name: Set default dut index + set_fact: + dut_index: 0 + when: dut_index is not defined + - name: Gathering testbed information test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}" delegate_to: localhost - fail: msg="The DUT you are trying to run test does not belongs to this testbed" - when: testbed_facts['dut'] != inventory_hostname + when: testbed_facts['duts'][dut_index] != inventory_hostname - name: set testbed_type set_fact: @@ -144,6 +149,49 @@ become: true when: stat_result.stat.exists is defined and stat_result.stat.exists + - name: Init account key and proxy + set_fact: + core_key: "" + core_proxy: "" + + - name: read account key + set_fact: + core_key: "{{ corefile_uploader['azure_sonic_core_storage']['account_key'] }}" + when: corefile_uploader['azure_sonic_core_storage']['account_key'] is defined + + - name: read https proxy + set_fact: + core_proxy: "{{ corefile_uploader['env']['https_proxy'] }}" + when: corefile_uploader['env']['https_proxy'] is defined + + - name: Put secret in core_analyzer.rc.json + lineinfile: + name: /etc/sonic/core_analyzer.rc.json + regexp: '(^.*)account_key' + line: '\1account_key": "{{ core_key }}",' + backrefs: yes + become: true + when: core_key != "" + + - name: Put https-proxy in core_analyzer.rc.json + lineinfile: + name: /etc/sonic/core_analyzer.rc.json + regexp: '(^.*)https_proxy' + line: '\1https_proxy": "{{ core_proxy }}"' + backrefs: yes + become: true + when: core_proxy != "" + + - name: enable core uploader service + become: true + command: systemctl enable core_uploader.service + when: core_key != "" + + - name: start core uploader service + become: true + command: systemctl start core_uploader.service + when: core_key != "" + - name: Replace snmp community string lineinfile: name: /etc/sonic/snmp.yml diff --git a/ansible/doc/README.testbed.VsSetup.md b/ansible/doc/README.testbed.VsSetup.md index 86fcbed9bc..6c25473862 100644 --- a/ansible/doc/README.testbed.VsSetup.md +++ b/ansible/doc/README.testbed.VsSetup.md @@ -126,7 +126,7 @@ from the ```sonic-mgmt``` container. Then, test you can sudo without password pr ``` $ ./testbed-cli.sh -m veos.vtb -n 4 start-vms server_1 password.txt ``` - - please note: Here "password.txt" is the ansible vault password file name/path. Ansible allows user use ansible vault to encrypt password files. By default, this shell script require a password file. If you are not using ansible vault, just create an empty file and pass the filename to the command line. The file name and location is created and maintained by user. + - please note: Here "password.txt" is the ansible vault password file name/path. Ansible allows user use ansible vault to encrypt password files. By default, this shell script require a password file. If you are not using ansible vault, just create a file with a dummy pasword and pass the filename to the command line. The file name and location is created and maintained by user. Check that all VMs are up and running, and the passwd is ```123456``` ``` diff --git a/ansible/doc/README.testbed.cEOS.md b/ansible/doc/README.testbed.cEOS.md new file mode 100644 index 0000000000..07fb01a6de --- /dev/null +++ b/ansible/doc/README.testbed.cEOS.md @@ -0,0 +1,131 @@ +# cEOS + +This document discusses how to use cEOS as DUT neighbor device. + +cEOS is the container-based EOS. All the software running inside +the container. Compared with vEOS, cEOS has much smaller memory +footprint. + +Follow [instruction](README.testbed.VsSetup.md) to setup cEOS testbed. + +In below example, there are four cEOS containers. + +``` +lgh@jenkins-worker-15:~$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +fe48c207a51c ceosimage:4.23.2F-1 "/sbin/init systemd.…" 8 days ago Up 8 days ceos_vms6-1_VM0103 +52297010e66a ceosimage:4.23.2F-1 "/sbin/init systemd.…" 8 days ago Up 8 days ceos_vms6-1_VM0102 +8dd95269b312 ceosimage:4.23.2F-1 "/sbin/init systemd.…" 8 days ago Up 8 days ceos_vms6-1_VM0101 +3a50dd481bfb ceosimage:4.23.2F-1 "/sbin/init systemd.…" 8 days ago Up 8 days ceos_vms6-1_VM0100 +b91b48145def debian:jessie "bash" 8 days ago Up 8 days net_vms6-1_VM0103 +d1ff26d84249 debian:jessie "bash" 8 days ago Up 8 days net_vms6-1_VM0102 +1489f52b9617 debian:jessie "bash" 8 days ago Up 8 days net_vms6-1_VM0101 +ce1214a008ed debian:jessie "bash" 8 days ago Up 8 days net_vms6-1_VM0100 +``` + +## Resource consumption + +A cEOS containers consumes around 1G memory. + +``` +lgh@jenkins-worker-15:~$ docker stats --no-stream +CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 6 +fe48c207a51c ceos_vms6-1_VM0103 2.04% 970.9MiB / 125.9GiB 0.75% 0B / 0B 365MB / 55.8GB 138 +52297010e66a ceos_vms6-1_VM0102 2.19% 965.4MiB / 125.9GiB 0.75% 0B / 0B 237MB / 55.6GB 139 +8dd95269b312 ceos_vms6-1_VM0101 1.93% 980.9MiB / 125.9GiB 0.76% 0B / 0B 300MB / 55.9GB 138 +3a50dd481bfb ceos_vms6-1_VM0100 2.05% 970.2MiB / 125.9GiB 0.75% 0B / 0B 365MB / 56.1GB 138 +``` + +## Network Setup + +We first create a base container `net_${testbed_name}_${vm_name}`, inject six ethernet ports into the base container, +and then start cEOS `ceos_${testbed_name}_${vm_name}` container on top of the base container. The six ethernet ports +are used for +- 1 management port +- 4 front panel ports to DUT +- 1 backplane port to PTF docker + +``` + +------------+ +----+ + | cEOS Ma0 +--------- VM0100-m ---+ br | + | | +----+ + | | + | | +--------------+ + | Et1 +----------VM0100-t0---+ br-VM0100-0 | + | | +--------------+ + | | + | | +--------------+ + | Et2 +----------VM0100-t1---+ br-VM0100-1 | + | | +--------------+ + | | + | | +--------------+ + | Et3 +----------VM0100-t2---+ br-VM0100-2 | + | | +--------------+ + | | + | | +--------------+ + | Et4 +----------VM0100-t3---+ br-VM0100-3 | + | | +--------------+ + | | + | | +--------------+ + | Et5 +----------VM0100-back--+ br-b-vms6-1 | + | | +--------------+ + +------------+ +``` + +## Configuration + +The `/mnt/flash` in cEOS container is mount to `/data/ceos/ceos_${testbed_name}_${vm_name}` on the host. The `/mnt/flash` +contiains the configuration file and logs. + +``` +lgh@jenkins-worker-15:~$ ls -l /data/ceos/ceos_vms6-1_VM0100/ +total 40 +-rw-rw-r--+ 1 root root 924 Mar 31 07:35 AsuFastPktTransmit.log +drwxrwxr-x+ 2 root root 4096 Mar 31 03:31 Fossil +-rw-rw-r--+ 1 root root 568 Mar 31 07:35 SsuRestore.log +-rw-rw-r--+ 1 root root 568 Mar 31 07:35 SsuRestoreLegacy.log +drwxr-xr-x+ 4 897 88 4096 Mar 31 07:35 archive +drwxrwx---+ 3 root root 4096 Mar 18 06:12 debug +drwxrwxr-x+ 2 root root 4096 Mar 18 06:12 fastpkttx.backup +-rw-rw-r--+ 1 root root 180 Mar 31 07:35 kickstart-config +drwxrwxr-x+ 3 root root 4096 Apr 8 09:11 persist +-rw-rwxr--+ 1 root root 1915 Mar 18 06:12 startup-config +``` + +## Login + +There are two ways to get into cEOS container + +1. docker exec +``` +lgh@jenkins-worker-15:~$ docker exec -it ceos_vms6-1_VM0100 Cli +ARISTA01T1>show int status +Port Name Status Vlan Duplex Speed Type Flags Encapsulation +Et1 connected in Po1 full unconf EbraTestPhyPort +Et2 connected 1 full unconf EbraTestPhyPort +Et3 connected 1 full unconf EbraTestPhyPort +Et4 connected 1 full unconf EbraTestPhyPort +Et5 backplane connected routed full unconf EbraTestPhyPort +Ma0 connected routed full 10G 10/100/1000 +Po1 connected routed full unconf N/A + +ARISTA01T1> +``` + +2. ssh +``` +lgh@jenkins-worker-15:~$ ssh admin@10.250.0.51 +Password: +ARISTA01T1>show int status +Port Name Status Vlan Duplex Speed Type Flags Encapsulation +Et1 connected in Po1 full unconf EbraTestPhyPort +Et2 connected 1 full unconf EbraTestPhyPort +Et3 connected 1 full unconf EbraTestPhyPort +Et4 connected 1 full unconf EbraTestPhyPort +Et5 backplane connected routed full unconf EbraTestPhyPort +Ma0 connected routed full 10G 10/100/1000 +Po1 connected routed full unconf N/A + +ARISTA01T1> +``` + diff --git a/ansible/group_vars/all/corefile_uploader.yml b/ansible/group_vars/all/corefile_uploader.yml new file mode 100644 index 0000000000..c2c57b86d5 --- /dev/null +++ b/ansible/group_vars/all/corefile_uploader.yml @@ -0,0 +1,7 @@ +# Configure core file storage secret key and https-proxy as required +# +#corefile_uploader: +# azure_sonic_core_storage: +# account_key: "Your Secret" +# env: +# https_proxy: "http://10.10.10.10:8000" diff --git a/ansible/group_vars/sonic/sku-sensors-data.yml b/ansible/group_vars/sonic/sku-sensors-data.yml index 29ed5be68c..2e76c5dd23 100644 --- a/ansible/group_vars/sonic/sku-sensors-data.yml +++ b/ansible/group_vars/sonic/sku-sensors-data.yml @@ -785,20 +785,22 @@ sensors_checks: alarms: fan: [] power: - - tps53679-i2c-5-70/vin/in1_alarm - - tps53679-i2c-5-70/vout1/in2_lcrit_alarm - - tps53679-i2c-5-70/vout1/in2_crit_alarm - - tps53679-i2c-5-70/vout2/in3_lcrit_alarm - - tps53679-i2c-5-70/vout2/in3_crit_alarm + - tps53679-i2c-5-70/vin1/in1_alarm + - tps53679-i2c-5-70/vin2/in2_alarm + - tps53679-i2c-5-70/vout1/in3_lcrit_alarm + - tps53679-i2c-5-70/vout1/in3_crit_alarm + - tps53679-i2c-5-70/vout2/in4_lcrit_alarm + - tps53679-i2c-5-70/vout2/in4_crit_alarm - tps53679-i2c-5-70/iout1/curr1_max_alarm - tps53679-i2c-5-70/iout1/curr1_crit_alarm - tps53679-i2c-5-70/iout2/curr2_max_alarm - tps53679-i2c-5-70/iout2/curr2_crit_alarm - - tps53679-i2c-5-71/vin/in1_alarm - - tps53679-i2c-5-71/vout1/in2_lcrit_alarm - - tps53679-i2c-5-71/vout1/in2_crit_alarm - - tps53679-i2c-5-71/vout2/in3_lcrit_alarm - - tps53679-i2c-5-71/vout2/in3_crit_alarm + - tps53679-i2c-5-71/vin1/in1_alarm + - tps53679-i2c-5-71/vin2/in2_alarm + - tps53679-i2c-5-71/vout1/in3_lcrit_alarm + - tps53679-i2c-5-71/vout1/in3_crit_alarm + - tps53679-i2c-5-71/vout2/in4_lcrit_alarm + - tps53679-i2c-5-71/vout2/in4_crit_alarm - tps53679-i2c-5-71/iout1/curr1_max_alarm - tps53679-i2c-5-71/iout1/curr1_crit_alarm - tps53679-i2c-5-71/iout2/curr2_max_alarm @@ -859,37 +861,41 @@ sensors_checks: - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_lcrit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm @@ -1061,37 +1067,41 @@ sensors_checks: - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 3.3V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-71/PMIC-2 ASIC 1.8V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-58/PMIC-3 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_crit_alarm + - tps53679-i2c-15-58/PMIC-3 COMEX 1.05V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-61/PMIC-4 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-61/PMIC-4 COMEX 1.2V Rail (out)/in3_lcrit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm @@ -1252,53 +1262,59 @@ sensors_checks: - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-70/PMIC-1 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 0.8V VCORE Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-70/PMIC-1 ASIC 1.2V Rail (out)/in4_lcrit_alarm - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-71/PMIC-2 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-71/PMIC-2 GB 0.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-71/PMIC-2 GB 1.125V Rail (out)/in4_lcrit_alarm - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-72/PMIC-3 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-5-72/PMIC-3 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-72/PMIC-3 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-72/PMIC-3 ASIC 1.8V Rail (out)/in3_lcrit_alarm - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-73/PMIC-4 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in3_crit_alarm - - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-73/PMIC-4 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-5-73/PMIC-4 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-5-73/PMIC-4 GB 0.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in4_crit_alarm + - tps53679-i2c-5-73/PMIC-4 GB 1.125V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-5 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-5 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-58/PMIC-5 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-58/PMIC-5 COMEX 1.8V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in4_crit_alarm + - tps53679-i2c-15-58/PMIC-5 COMEX 1.05V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-6 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-15-61/PMIC-6 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-61/PMIC-6 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-61/PMIC-6 COMEX 1.2V Rail (out)/in3_lcrit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_crit_alarm - dps460-i2c-4-58/PSU-1 220V Rail Curr (in)/curr1_max_alarm @@ -1522,87 +1538,127 @@ sensors_checks: - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 1/fan11_fault - mlxreg_fan-isa-0000/Chassis Fan Drawer-6 Tach 2/fan12_fault power: - - tps53679-i2c-5-62/PMIC-1 ASIC 0.8V VCORE_MAIN Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-62/PMIC-1 ASIC 0.8V VCORE_MAIN Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-62/PMIC-1 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-62/PMIC-1 ASIC 0.8V VCORE_MAIN Rail (out)/in2_crit_alarm - - tps53679-i2c-5-62/PMIC-1 ASIC 0.8V VCORE_MAIN Rail (out)/in2_lcrit_alarm - - - tps53679-i2c-5-64/PMIC-2 ASIC 1.8V MAIN Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.8V MAIN Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.2V MAIN Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.2V MAIN Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-64/PMIC-2 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.8V MAIN Rail (out)/in2_crit_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.8V MAIN Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.2V MAIN Rail (out)/in3_crit_alarm - - tps53679-i2c-5-64/PMIC-2 ASIC 1.2V MAIN Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T0_1 Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T0_1 Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-66/PMIC-3 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T0_1 Rail (out)/in2_crit_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T0_1 Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail (out)/in3_crit_alarm - - tps53679-i2c-5-66/PMIC-3 ASIC 1.8V T0_1 Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T2_3 Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T2_3 Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 1.8V T2_3 Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 1.8V T2_3 Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-68/PMIC-4 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T2_3 Rail (out)/in2_crit_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T2_3 Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 1.8V T2_3 Rail (out)/in3_crit_alarm - - tps53679-i2c-5-68/PMIC-4 ASIC 1.8V T2_3 Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T4_5 Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T4_5 Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 1.8V T4_5 Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 1.8V T4_5 Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-6a/PMIC-5 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T4_5 Rail (out)/in2_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T4_5 Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 1.8V T4_5 Rail (out)/in3_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 ASIC 1.8V T4_5 Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-6c/PMIC-6 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in2_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in3_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in3_lcrit_alarm - - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T0_3 Rail Curr (out)/curr1_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T0_3 Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T4_7 Rail Curr (out)/curr2_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T4_7 Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-5-6e/PMIC-7 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T0_3 Rail (out)/in2_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T0_3 Rail (out)/in2_lcrit_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T4_7 Rail (out)/in3_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 ASIC 1.2V T4_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-62/PMIC-1 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-64/PMIC-2 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-66/PMIC-3 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-68/PMIC-4 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm + + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (in)/curr1_max_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (in)/curr2_max_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_max_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail Curr (out)/curr3_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (out)/curr4_max_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail Curr (out)/curr4_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in1)/in1_lcrit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in1)/in1_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in2)/in2_lcrit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 PSU 12V Rail (in2)/in2_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 0.85V VCORE_T6_7 Rail (out)/in3_lcrit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail (out)/in4_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 ASIC 1.8V T6_7 Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail Curr (out)/curr1_max_alarm - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_crit_alarm - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail Curr (out)/curr2_max_alarm - - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in2_lcrit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in3_crit_alarm - - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in3_lcrit_alarm + - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-58/PMIC-8 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.8V Rail (out)/in3_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_crit_alarm + - tps53679-i2c-15-58/PMIC-8 COMEX 1.05V Rail (out)/in4_lcrit_alarm - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_crit_alarm - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail Curr (out)/curr1_max_alarm - - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in)/in1_alarm - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in2_crit_alarm - - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in2_lcrit_alarm + - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in1)/in1_alarm + - tps53679-i2c-15-61/PMIC-9 PSU 12V Rail (in2)/in2_alarm + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_crit_alarm + - tps53679-i2c-15-61/PMIC-9 COMEX 1.2V Rail (out)/in3_lcrit_alarm - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_crit_alarm - dps460-i2c-4-58/PSU-1(L) 220V Rail Curr (in)/curr1_max_alarm @@ -1675,40 +1731,40 @@ sensors_checks: - mlxsw-i2c-2-48/front panel 031/temp32_fault - mlxsw-i2c-2-48/front panel 032/temp33_fault - - tps53679-i2c-5-62/PMIC-1 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-62/PMIC-1 Temp 1/temp1_max_alarm - - tps53679-i2c-5-62/PMIC-1 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-62/PMIC-1 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-64/PMIC-2 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-64/PMIC-2 Temp 1/temp1_max_alarm - - tps53679-i2c-5-64/PMIC-2 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-64/PMIC-2 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-66/PMIC-3 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-66/PMIC-3 Temp 1/temp1_max_alarm - - tps53679-i2c-5-66/PMIC-3 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-66/PMIC-3 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-68/PMIC-4 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-68/PMIC-4 Temp 1/temp1_max_alarm - - tps53679-i2c-5-68/PMIC-4 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-68/PMIC-4 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-6a/PMIC-5 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 Temp 1/temp1_max_alarm - - tps53679-i2c-5-6a/PMIC-5 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-6a/PMIC-5 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-6c/PMIC-6 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 Temp 1/temp1_max_alarm - - tps53679-i2c-5-6c/PMIC-6 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-6c/PMIC-6 Temp 2/temp2_max_alarm - - - tps53679-i2c-5-6e/PMIC-7 Temp 1/temp1_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 Temp 1/temp1_max_alarm - - tps53679-i2c-5-6e/PMIC-7 Temp 2/temp2_crit_alarm - - tps53679-i2c-5-6e/PMIC-7 Temp 2/temp2_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-62/PMIC-1 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-62/PMIC-1 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-64/PMIC-2 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-64/PMIC-2 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-64/PMIC-2 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-66/PMIC-3 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-66/PMIC-3 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-66/PMIC-3 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-68/PMIC-4 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-68/PMIC-4 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-68/PMIC-4 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-6a/PMIC-5 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-6a/PMIC-5 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-6a/PMIC-5 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-6c/PMIC-6 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-6c/PMIC-6 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-6c/PMIC-6 Temp 2/temp2_max_alarm + + - xdpe12284-i2c-5-6e/PMIC-7 Temp 1/temp1_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 Temp 1/temp1_max_alarm + - xdpe12284-i2c-5-6e/PMIC-7 Temp 2/temp2_crit_alarm + - xdpe12284-i2c-5-6e/PMIC-7 Temp 2/temp2_max_alarm - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_crit_alarm - tps53679-i2c-15-58/PMIC-8 Temp 1/temp1_max_alarm @@ -1721,30 +1777,18 @@ sensors_checks: - tps53679-i2c-15-61/PMIC-9 Temp 2/temp2_max_alarm - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_lcrit_alarm - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_max_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 1/temp1_min_alarm - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_lcrit_alarm - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_max_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 2/temp2_min_alarm - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_crit_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_lcrit_alarm - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_max_alarm - - dps460-i2c-4-58/PSU-1(L) Temp 3/temp3_min_alarm - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_lcrit_alarm - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_max_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 1/temp1_min_alarm - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_lcrit_alarm - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_max_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 2/temp2_min_alarm - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_crit_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_lcrit_alarm - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_max_alarm - - dps460-i2c-4-59/PSU-2(R) Temp 3/temp3_min_alarm compares: power: [] temp: @@ -2187,6 +2231,92 @@ sensors_checks: temp: [] psu_skips: {} + x86_64-accton_wedge100bf_65x-r0: + alarms: + fan: [] + power: [] + temp: [] + + compares: + fan: [] + power: [] + temp: + - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_input + - tmp75-i2c-3-48/Outlet Middle Temp/temp1_max + - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_input + - tmp75-i2c-3-49/Inlet Middle Temp/temp1_max + - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_input + - tmp75-i2c-3-4a/Inlet Left Temp/temp1_max + - - tmp75-i2c-3-4b/Switch Temp/temp1_input + - tmp75-i2c-3-4b/Switch Temp/temp1_max + - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_input + - tmp75-i2c-3-4c/Inlet Right Temp/temp1_max + - - tmp75-i2c-8-48/Outlet Right Temp/temp1_input + - tmp75-i2c-8-48/Outlet Right Temp/temp1_max + - - tmp75-i2c-8-49/Outlet Left Temp/temp1_input + - tmp75-i2c-8-49/Outlet Left Temp/temp1_max + + non_zero: + fan: + - fancpld-i2c-8-33/Fan 1 front/fan1_input + - fancpld-i2c-8-33/Fan 1 rear/fan2_input + - fancpld-i2c-8-33/Fan 2 front/fan3_input + - fancpld-i2c-8-33/Fan 2 rear/fan4_input + - fancpld-i2c-8-33/Fan 3 front/fan5_input + - fancpld-i2c-8-33/Fan 3 rear/fan6_input + - fancpld-i2c-8-33/Fan 4 front/fan7_input + - fancpld-i2c-8-33/Fan 4 rear/fan8_input + - fancpld-i2c-8-33/Fan 5 front/fan9_input + - fancpld-i2c-8-33/Fan 5 rear/fan10_input + + power: [] + temp: [] + + psu_skips: {} + + x86_64-accton_wedge100bf_32x-r0: + alarms: + fan: [] + power: [] + temp: [] + + compares: + fan: [] + power: [] + temp: + - - tmp75-i2c-3-48/Outlet Middle Temp/temp1_input + - tmp75-i2c-3-48/Outlet Middle Temp/temp1_max + - - tmp75-i2c-3-49/Inlet Middle Temp/temp1_input + - tmp75-i2c-3-49/Inlet Middle Temp/temp1_max + - - tmp75-i2c-3-4a/Inlet Left Temp/temp1_input + - tmp75-i2c-3-4a/Inlet Left Temp/temp1_max + - - tmp75-i2c-3-4b/Switch Temp/temp1_input + - tmp75-i2c-3-4b/Switch Temp/temp1_max + - - tmp75-i2c-3-4c/Inlet Right Temp/temp1_input + - tmp75-i2c-3-4c/Inlet Right Temp/temp1_max + - - tmp75-i2c-8-48/Outlet Right Temp/temp1_input + - tmp75-i2c-8-48/Outlet Right Temp/temp1_max + - - tmp75-i2c-8-49/Outlet Left Temp/temp1_input + - tmp75-i2c-8-49/Outlet Left Temp/temp1_max + + non_zero: + fan: + - fancpld-i2c-8-33/Fan 1 front/fan1_input + - fancpld-i2c-8-33/Fan 1 rear/fan2_input + - fancpld-i2c-8-33/Fan 2 front/fan3_input + - fancpld-i2c-8-33/Fan 2 rear/fan4_input + - fancpld-i2c-8-33/Fan 3 front/fan5_input + - fancpld-i2c-8-33/Fan 3 rear/fan6_input + - fancpld-i2c-8-33/Fan 4 front/fan7_input + - fancpld-i2c-8-33/Fan 4 rear/fan8_input + - fancpld-i2c-8-33/Fan 5 front/fan9_input + - fancpld-i2c-8-33/Fan 5 rear/fan10_input + + power: [] + temp: [] + + psu_skips: {} + x86_64-arista_7060_cx32s: alarms: fan: diff --git a/ansible/group_vars/vm_host/creds.yml b/ansible/group_vars/vm_host/creds.yml index cfda73cbb0..029ab9a68a 100644 --- a/ansible/group_vars/vm_host/creds.yml +++ b/ansible/group_vars/vm_host/creds.yml @@ -1,5 +1,4 @@ --- ansible_user: use_own_value ansible_password: use_own_value -ansible_sudo_pass: use_own_value - +ansible_become_password: use_own_value diff --git a/ansible/library/show_interface.py b/ansible/library/show_interface.py index e56b9852a6..4ca76d456a 100644 --- a/ansible/library/show_interface.py +++ b/ansible/library/show_interface.py @@ -93,7 +93,7 @@ def collect_interface_status(self): rc, self.out, err = self.module.run_command(command, executable='/bin/bash', use_unsafe_shell=True) for line in self.out.split("\n"): line = line.strip() - if regex_int.match(line): + if regex_int.match(line) and interface == regex_int.match(line).group(1): self.int_status[interface]['name'] = regex_int.match(line).group(1) self.int_status[interface]['speed'] = regex_int.match(line).group(2) self.int_status[interface]['alias'] = regex_int.match(line).group(4) diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py index 8bdfb2be13..a8696fe0e1 100644 --- a/ansible/library/test_facts.py +++ b/ansible/library/test_facts.py @@ -3,6 +3,7 @@ import traceback import ipaddr as ipaddress import csv +import string from operator import itemgetter from itertools import groupby import yaml @@ -109,7 +110,7 @@ def __init__(self, testbed_file): def read_testbed_topo(self): CSV_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'server', 'vm_base', 'dut', 'comment') with open(self.testbed_filename) as f: - topo = csv.DictReader(f, fieldnames=CSV_FIELDS) + topo = csv.DictReader(f, fieldnames=CSV_FIELDS, delimiter=',') # Validate all field are in the same order and are present header = next(topo) @@ -125,6 +126,9 @@ def read_testbed_topo(self): line['ptf_ip'] = str(ptfaddress.ip) line['ptf_netmask'] = str(ptfaddress.netmask) + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + del line['dut'] + self.testbed_topo[line['conf-name']] = line return diff --git a/ansible/roles/sonic-common/tasks/sensors_check.yml b/ansible/roles/sonic-common/tasks/sensors_check.yml index 067686367d..0178df17b9 100644 --- a/ansible/roles/sonic-common/tasks/sensors_check.yml +++ b/ansible/roles/sonic-common/tasks/sensors_check.yml @@ -1,25 +1,4 @@ -- name: Get platform monitor docker name - shell: docker ps -a --format '{{'{{'}}.Image{{'}} {{'}}.Names{{'}}'}}' | grep 'platform' | awk '{print $2}' - register: pmon_ps - -- name: Get platform name - shell: show platform summary | grep Platform | awk '{print $2}' - register: platform - -- include_tasks: add_container_to_inventory.yml +- name: run test + include_tasks: roles/test/tasks/pytest_runner.yml vars: - container_name: "{{ pmon_ps.stdout }}" - -- name: Gather sensors - sensors_facts: checks={{ sensors_checks[platform.stdout] }} - delegate_to: "{{ ansible_host }}_{{ pmon_ps.stdout }}" - -- name: Output of sensors information - debug: var=vars['sensors'] - -- name: Assert no alarm - assert: { that: "{{ vars['sensors']['alarm'] }} == False" } - -- name: Show warnings - debug: var=vars['sensors']['warnings'] - when: vars['sensors']['warning'] + test_node: platform/test_sensors.py \ No newline at end of file diff --git a/ansible/roles/test/files/helpers/ferret.py b/ansible/roles/test/files/helpers/ferret.py deleted file mode 100644 index 954d558e27..0000000000 --- a/ansible/roles/test/files/helpers/ferret.py +++ /dev/null @@ -1,335 +0,0 @@ -#/usr/bin/env python - -# python t.py -f /tmp/vxlan_decap.json -s 192.168.8.1 - -import SimpleHTTPServer -import SocketServer -import select -import shutil -import json -import BaseHTTPServer -import time -import socket -import ctypes -import ssl -import struct -import binascii -import itertools -import argparse -import os - -from pprint import pprint - -from cStringIO import StringIO -from functools import partial -from collections import namedtuple - - -Record = namedtuple('Record', ['hostname', 'family', 'expired', 'lo', 'mac', 'vxlan_id']) - -ASIC_TYPE=None - - -class Ferret(BaseHTTPServer.BaseHTTPRequestHandler): - server_version = "FerretHTTP/0.1" - - def do_POST(self): - if not self.path.startswith('/Ferret/NeighborAdvertiser/Slices/'): - self.send_error(404, "URL is not supported") - else: - info = self.extract_info() - self.update_db(info) - self.send_resp(info) - - def extract_info(self): - c_len = int(self.headers.getheader('content-length', 0)) - body = self.rfile.read(c_len) - j = json.loads(body) - return j - - def generate_entries(self, hostname, family, expire, lo, info, mapping_family): - for i in info['vlanInterfaces']: - vxlan_id = int(i['vxlanId']) - for j in i[mapping_family]: - mac = str(j['macAddr']).replace(':', '') - addr = str(j['ipAddr']) - r = Record(hostname=hostname, family=family, expired=expire, lo=lo, mac=mac, vxlan_id=vxlan_id) - self.db[addr] = r - - return - - def update_db(self, info): - hostname = str(info['switchInfo']['name']) - lo_ipv4 = str(info['switchInfo']['ipv4Addr']) - lo_ipv6 = str(info['switchInfo']['ipv6Addr']) - duration = int(info['respondingSchemes']['durationInSec']) - expired = time.time() + duration - - self.generate_entries(hostname, 'ipv4', expired, lo_ipv4, info, 'ipv4AddrMappings') - self.generate_entries(hostname, 'ipv6', expired, lo_ipv6, info, 'ipv6AddrMappings') - - return - - def send_resp(self, info): - result = { - 'ipv4Addr': self.src_ip - } - f, l = self.generate_response(result) - self.send_response(200) - self.send_header("Content-type", "application/json") - self.send_header("Content-Length", str(l)) - self.send_header("Last-Modified", self.date_time_string()) - self.end_headers() - shutil.copyfileobj(f, self.wfile) - f.close() - return - - def generate_response(self, response): - f = StringIO() - json.dump(response, f) - l = f.tell() - f.seek(0) - return f, l - - -class RestAPI(object): - PORT = 448 - - def __init__(self, obj, db, src_ip): - self.httpd = SocketServer.TCPServer(("", self.PORT), obj) - self.context = ssl.SSLContext(ssl.PROTOCOL_TLS) - self.context.verify_mode = ssl.CERT_NONE - self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - self.context.load_cert_chain(certfile="/opt/test.pem", keyfile="/opt/test.key") - self.httpd.socket=self.context.wrap_socket(self.httpd.socket, server_side=True) - self.db = db - obj.db = db - obj.src_ip = src_ip - - def handler(self): - return self.httpd.fileno() - - def handle(self): - return self.httpd.handle_request() - - -class Interface(object): - ETH_P_ALL = 0x03 - RCV_TIMEOUT = 1000 - RCV_SIZE = 4096 - SO_ATTACH_FILTER = 26 - - def __init__(self, iface, bpf_src): - self.iface = iface - self.socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(self.ETH_P_ALL)) - if bpf_src is not None: - blob = ctypes.create_string_buffer(''.join(struct.pack("HBBI", *e) for e in bpf_src)) - address = ctypes.addressof(blob) - bpf = struct.pack('HL', len(bpf_src), address) - self.socket.setsockopt(socket.SOL_SOCKET, self.SO_ATTACH_FILTER, bpf) - self.socket.bind((self.iface, 0)) - self.socket.settimeout(self.RCV_TIMEOUT) - - def __del__(self): - self.socket.close() - - def handler(self): - return self.socket.fileno() - - def recv(self): - return self.socket.recv(self.RCV_SIZE) - - def send(self, data): - self.socket.send(data) - - -class Poller(object): - def __init__(self, httpd, interfaces, responder): - self.responder = responder - self.mapping = {interface.handler(): interface for interface in interfaces} - self.httpd = httpd - - def poll(self): - handlers = self.mapping.keys() + [self.httpd.handler()] - while True: - (rdlist, _, _) = select.select(handlers, [], []) - for handler in rdlist: - if handler == self.httpd.handler(): - self.httpd.handle() - else: - self.responder.action(self.mapping[handler]) - - -class Responder(object): - ARP_PKT_LEN = 60 - ARP_OP_REQUEST = 1 - def __init__(self, db): - self.arp_chunk = binascii.unhexlify('08060001080006040002') # defines a part of the packet for ARP Reply - self.arp_pad = binascii.unhexlify('00' * 18) - self.db = db - - def hexdump(self, data): - print " ".join("%02x" % ord(d) for d in data) - - def action(self, interface): - data = interface.recv() - - ext_dst_mac = data[0x00:0x06] - ext_src_mac = data[0x06:0x0c] - ext_eth_type = data[0x0c:0x0e] - if ext_eth_type != binascii.unhexlify('0800'): - print "Not 0x800 eth type" - self.hexdump(data) - print - return - src_ip = data[0x001a:0x001e] - dst_ip = data[0x1e:0x22] - gre_flags = data[0x22:0x24] - gre_type = data[0x24:0x26] - - gre_type_r = struct.unpack('!H', gre_type)[0] - self.hexdump(data) - if gre_type_r == 0x88be: # Broadcom - arp_request = data[0x26:] - if ASIC_TYPE == "barefoot": - # ERSPAN type 2 - # Ethernet(14) + IP(20) + GRE(4) + ERSPAN(8) = 46 = 0x2e - # Note: Count GRE as 4 byte, only mandatory fields. - # References: https://tools.ietf.org/html/rfc1701 - # https://tools.ietf.org/html/draft-foschiano-erspan-00 - arp_request = data[0x2E:] - - elif gre_type_r == 0x8949: # Mellanox - arp_request = data[0x3c:] - else: - print "GRE type 0x%x is not supported" % gre_type_r - self.hexdump(data) - print - return - - if len(arp_request) > self.ARP_PKT_LEN: - print "Too long packet" - self.hexdump(data) - print - return - - remote_mac, remote_ip, request_ip, op_type = self.extract_arp_info(arp_request) - # Don't send ARP response if the ARP op code is not request - if op_type != self.ARP_OP_REQUEST: - return - - request_ip_str = socket.inet_ntoa(request_ip) - - if request_ip_str not in self.db: - print "Not in db" - return - - r = self.db[request_ip_str] - if r.expired < time.time(): - print "Expired row in db" - del self.db[request_ip_str] - return - - if r.family == 'ipv4': - new_pkt = ext_src_mac + ext_dst_mac + ext_eth_type # outer eth frame - ipv4 = binascii.unhexlify('45000060977e400040110000') + dst_ip + src_ip # ip - crc = self.calculate_header_crc(ipv4) - ipv4 = ipv4[0:10] + crc + ipv4[12:] - new_pkt += ipv4 - new_pkt += binascii.unhexlify('c00012b5004c1280') # udp - new_pkt += binascii.unhexlify('08000000%06x00' % r.vxlan_id) # vxlan - - arp_reply = self.generate_arp_reply(binascii.unhexlify(r.mac), remote_mac, request_ip, remote_ip) - new_pkt += arp_reply - else: - print 'Support of family %s is not implemented' % r.family - return - - interface.send(new_pkt) - - return - - def calculate_header_crc(self, ipv4): - s = 0 - for l,r in zip(ipv4[::2], ipv4[1::2]): - l_u = struct.unpack("B", l)[0] - r_u = struct.unpack("B", r)[0] - s += (l_u << 8) + r_u - - c = s >> 16 - s = s & 0xffff - - while c != 0: - s += c - c = s >> 16 - s = s & 0xffff - - s = 0xffff - s - - return binascii.unhexlify("%x" % s) - - def extract_arp_info(self, data): - # remote_mac, remote_ip, request_ip, op_type - return data[6:12], data[28:32], data[38:42], (ord(data[20]) * 256 + ord(data[21])) - - def generate_arp_reply(self, local_mac, remote_mac, local_ip, remote_ip): - eth_hdr = remote_mac + local_mac - return eth_hdr + self.arp_chunk + local_mac + local_ip + remote_mac + remote_ip + self.arp_pad - -def get_bpf_for_bgp(): - bpf_src = [ - (0x28, 0, 0, 0x0000000c), # (000) ldh [12] - (0x15, 0, 2, 0x00000800), # (001) jeq #0x800 jt 2 jf 4 - (0x30, 0, 0, 0x00000017), # (002) ldb [23] - (0x15, 6, 7, 0x0000002f), # (003) jeq #0x2f jt 10 jf 11 - (0x15, 0, 6, 0x000086dd), # (004) jeq #0x86dd jt 5 jf 11 - (0x30, 0, 0, 0x00000014), # (005) ldb [20] - (0x15, 3, 0, 0x0000002f), # (006) jeq #0x2f jt 10 jf 7 - (0x15, 0, 3, 0x0000002c), # (007) jeq #0x2c jt 8 jf 11 - (0x30, 0, 0, 0x00000036), # (008) ldb [54] - (0x15, 0, 1, 0x0000002f), # (009) jeq #0x2f jt 10 jf 11 - (0x6, 0, 0, 0x00040000), # (010) ret #262144 - (0x6, 0, 0, 0x00000000), # (011) ret #0 - ] - return bpf_src - - -def extract_iface_names(config_file): - with open(config_file) as fp: - graph = json.load(fp) - - net_ports = [] - for name, val in graph['minigraph_portchannels'].items(): - members = ['eth%d' % graph['minigraph_port_indices'][member] for member in val['members']] - net_ports.extend(members) - - return net_ports - -def parse_args(): - parser = argparse.ArgumentParser(description='Ferret VXLAN API') - parser.add_argument('-f', '--config-file', help='file with configuration', required=True) - parser.add_argument('-s', '--src-ip', help='Ferret endpoint ip', required=True) - parser.add_argument('-a', '--asic-type', help='ASIC vendor name', type=str, required=False) - args = parser.parse_args() - if not os.path.isfile(args.config_file): - print "Can't open config file '%s'" % args.config_file - exit(1) - - global ASIC_TYPE - ASIC_TYPE = args.asic_type - return args.config_file, args.src_ip - -def main(): - db = {} - - config_file, src_ip = parse_args() - iface_names = extract_iface_names(config_file) - rest = RestAPI(Ferret, db, src_ip) - bpf_src = get_bpf_for_bgp() - ifaces = [Interface(iface_name, bpf_src) for iface_name in iface_names] - responder = Responder(db) - p = Poller(rest, ifaces, responder) - p.poll() - -if __name__ == '__main__': - main() diff --git a/ansible/roles/test/files/helpers/ferret.py b/ansible/roles/test/files/helpers/ferret.py new file mode 120000 index 0000000000..4d68dcb9ad --- /dev/null +++ b/ansible/roles/test/files/helpers/ferret.py @@ -0,0 +1 @@ +../../../../../tests/arp/files/ferret.py \ No newline at end of file diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py index 2c6c2e1af9..c63a117d35 100644 --- a/ansible/roles/test/files/ptftests/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/advanced-reboot.py @@ -57,12 +57,12 @@ import re from collections import defaultdict import json -import paramiko import Queue import pickle from operator import itemgetter import scapy.all as scapyall import itertools +from device_connection import DeviceConnection from arista import Arista import sad_path as sp @@ -125,6 +125,7 @@ def __init__(self): self.test_params = testutils.test_params_get() self.check_param('verbose', False, required=False) self.check_param('dut_username', '', required=True) + self.check_param('dut_password', '', required=True) self.check_param('dut_hostname', '', required=True) self.check_param('reboot_limit_in_seconds', 30, required=False) self.check_param('reboot_type', 'fast-reboot', required=False) @@ -217,6 +218,12 @@ def __init__(self): self.allow_vlan_flooding = bool(self.test_params['allow_vlan_flooding']) + self.dut_connection = DeviceConnection( + self.test_params['dut_hostname'], + self.test_params['dut_username'], + password=self.test_params['dut_password'] + ) + return def read_json(self, name): @@ -411,7 +418,7 @@ def get_sad_info(self): def init_sad_oper(self): if self.sad_oper: self.log("Preboot/Inboot Operations:") - self.sad_handle = sp.SadTest(self.sad_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.dut_ssh, self.vlan_ports) + self.sad_handle = sp.SadTest(self.sad_oper, self.ssh_targets, self.portchannel_ports, self.vm_dut_map, self.test_params, self.vlan_ports) (self.ssh_targets, self.portchannel_ports, self.neigh_vm, self.vlan_ports), (log_info, fails) = self.sad_handle.setup() self.populate_fail_info(fails) for log in log_info: @@ -480,7 +487,6 @@ def setUp(self): self.reboot_type = self.test_params['reboot_type'] if self.reboot_type not in ['fast-reboot', 'warm-reboot']: raise ValueError('Not supported reboot_type %s' % self.reboot_type) - self.dut_ssh = self.test_params['dut_username'] + '@' + self.test_params['dut_hostname'] self.dut_mac = self.test_params['dut_mac'] # get VM info @@ -509,7 +515,7 @@ def setUp(self): self.from_server_dst_ports = self.portchannel_ports self.log("Test params:") - self.log("DUT ssh: %s" % self.dut_ssh) + self.log("DUT ssh: %s@%s" % (self.test_params['dut_username'], self.test_params['dut_hostname'])) self.log("DUT reboot limit in seconds: %s" % self.limit) self.log("DUT mac address: %s" % self.dut_mac) @@ -1004,7 +1010,7 @@ def reboot_dut(self): time.sleep(self.reboot_delay) self.log("Rebooting remote side") - stdout, stderr, return_code = self.cmd(["ssh", "-oStrictHostKeyChecking=no", self.dut_ssh, "sudo " + self.reboot_type]) + stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type) if stdout != []: self.log("stdout from %s: %s" % (self.reboot_type, str(stdout))) if stderr != []: diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py index 7bab31fff5..2117d945df 100644 --- a/ansible/roles/test/files/ptftests/arista.py +++ b/ansible/roles/test/files/ptftests/arista.py @@ -64,6 +64,9 @@ def connect(self): self.do_cmd('enable') self.do_cmd('terminal length 0') + version_output = self.do_cmd('show version') + self.veos_version = self.parse_version(version_output) + return self.shell def get_arista_prompt(self, first_prompt): @@ -373,7 +376,16 @@ def change_bgp_neigh_state(self, asn, is_up=True): state = ['shut', 'no shut'] self.do_cmd('configure') self.do_cmd('router bgp %s' % asn) - self.do_cmd('%s' % state[is_up]) + if self.veos_version < 4.20: + self.do_cmd('%s' % state[is_up]) + else: + if is_up == True: + self.do_cmd('%s' % state[is_up]) + else: + # shutdown BGP will pop confirm message, the message is + # "You are attempting to shutdown BGP. Are you sure you want to shutdown? [confirm]" + self.do_cmd('%s' % state[is_up], prompt = '[confirm]') + self.do_cmd('y') self.do_cmd('exit') self.do_cmd('exit') @@ -525,3 +537,10 @@ def check_change_time(self, output, entity, what): # Note: the first item is a placeholder return 0, change_count + + def parse_version(self, output): + version = 0 + for line in output.split('\n'): + if ('Software image version: ' in line): + version = float(re.search('([1-9]{1}\d*)(\.\d{0,2})', line).group()) + return version diff --git a/ansible/roles/test/files/ptftests/device_connection.py b/ansible/roles/test/files/ptftests/device_connection.py new file mode 100644 index 0000000000..a29ea493b0 --- /dev/null +++ b/ansible/roles/test/files/ptftests/device_connection.py @@ -0,0 +1,63 @@ +import paramiko +import logging +from paramiko.ssh_exception import BadHostKeyException, AuthenticationException, SSHException + +logger = logging.getLogger(__name__) + +DEFAULT_CMD_EXECUTION_TIMEOUT_SEC = 10 + +class DeviceConnection: + ''' + DeviceConnection uses Paramiko module to connect to devices + + Paramiko module uses fallback mechanism where it would first try to use + ssh key and that fails, it will attempt username/password combination + ''' + def __init__(self, hostname, username, password=None): + ''' + Class constructor + + @param hostname: hostname of device to connect to + @param username: username for device connection + @param password: password for device connection + ''' + self.hostname = hostname + self.username = username + self.password = password + + def execCommand(self, cmd, timeout=DEFAULT_CMD_EXECUTION_TIMEOUT_SEC): + ''' + Executes command on remote device + + @param cmd: command to be run on remote device + @param timeout: timeout for command run session + @return: stdout, stderr, value + stdout is a list of lines of the remote stdout gathered during command execution + stderr is a list of lines of the remote stderr gathered during command execution + value: 0 if command execution raised no exception + nonzero if exception is raised + ''' + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + if isinstance(cmd, list): + cmd = ' '.join(cmd) + + stdOut = stdErr = [] + retValue = 1 + try: + client.connect(self.hostname, username=self.username, password=self.password, allow_agent=False) + si, so, se = client.exec_command(cmd, timeout=timeout) + stdOut = so.readlines() + stdErr = se.readlines() + retValue = 0 + except SSHException as sshException: + logger.error('SSH Command failed with message: %s' % sshException) + except AuthenticationException as authenticationException: + logger.error('SSH Authentiaction failure with message: %s' % authenticationException) + except BadHostKeyException as badHostKeyException: + logger.error('SSH Authentiaction failure with message: %s' % badHostKeyException) + finally: + client.close() + + return stdOut, stdErr, retValue diff --git a/ansible/roles/test/files/ptftests/dip_sip.py b/ansible/roles/test/files/ptftests/dip_sip.py deleted file mode 100644 index eb6ab52c10..0000000000 --- a/ansible/roles/test/files/ptftests/dip_sip.py +++ /dev/null @@ -1,199 +0,0 @@ -''' -Description: - This file contains the DIP=SIP test for SONiC - - This test uses UDP packets to validate that HW supports routing of L3 packets with DIP=SIP - -Topologies: - Supports t0, t0-16, t0-56, t0-64, t0-64-32, t0-116, t1, t1-lag t1-64-lag and t1-64-lag-clet topology - -Parameters: - testbed_type - testbed type - dst_host_mac - destination host MAC address - src_host_mac - source host MAC address - dst_router_mac - destination router MAC address - src_router_mac - source router MAC address - dst_router_ipv4 - destination router IPv4 address - src_router_ipv4 - source router IPv4 address - dst_router_ipv6 - destination router IPv6 address - src_router_ipv6 - source router IPv6 address - dst_port_ids - destination port array of indices (when router has a members) - src_port_ids - source port array of indices (when router has a members) - -Usage: - Example of how to start this script: - ptf --test-dir ptftests dip_sip.DipSipTest --platform-dir ptftests --platform remote \ - -t "testbed_type=''; \ - dst_host_mac=''; \ - src_host_mac=''; \ - dst_router_mac=''; \ - src_router_mac=''; \ - dst_router_ipv4=''; \ - src_router_ipv4=''; \ - dst_router_ipv6=''; \ - src_router_ipv6=''; \ - dst_port_ids=''; \ - src_port_ids=''" \ - --relax --debug info --log-file /tmp/dip_sip.DipSipTest.log \ - --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre - -Notes: - Please check the dip_sip.yml file to see the details of how this test works -''' - -#------------------------------------------------------------------------------- -# Global imports -#------------------------------------------------------------------------------- - -import logging -import ptf - -from ipaddress import ip_address -from ptf.base_tests import BaseTest - -from ptf.testutils import test_params_get -from ptf.testutils import simple_udp_packet -from ptf.testutils import simple_udpv6_packet -from ptf.testutils import send -from ptf.testutils import verify_packet_any_port - -#------------------------------------------------------------------------------- -# Testcase -#------------------------------------------------------------------------------- - -class PortLagRouterBasedTest: - def __init__(self, dipSipTest): - self.test = dipSipTest - self.testParams = dipSipTest.test_params - #-------------------------------------------------------------------------- - - def logParams(self): - self.test.log("Destination router mac is: " + self.dstRouterMac) - self.test.log("Destination router ipv4 is: " + self.dstRouterIpv4) - self.test.log("Destination router ipv6 is: " + self.dstRouterIpv6) - - self.test.log("Destination host mac is: " + self.dstHostMac) - self.test.log("Destination host ipv4 is: " + self.dstHostIpv4) - self.test.log("Destination host ipv6 is: " + self.dstHostIpv6) - - self.test.log("Source router mac is: " + self.srcRouterMac) - self.test.log("Source router ipv4 is: " + self.srcRouterIpv4) - self.test.log("Source router ipv6 is: " + self.srcRouterIpv6) - - self.test.log("Source host mac is: " + self.srcHostMac) - self.test.log("Source host ipv4 is: " + self.srcHostIpv4) - self.test.log("Source host ipv6 is: " + self.srcHostIpv6) - - self.test.log("Destination port ids is: " + str([int(portId) for portId in self.dstPortIds])) - self.test.log("Source port ids is: " + str([int(portId) for portId in self.srcPortIds])) - - self.test.log("Packet TTL/HL is: " + str(self.pktTtlHlim)) - #-------------------------------------------------------------------------- - - def setUpParams(self): - self.dstRouterMac = self.testParams['dst_router_mac'] - self.dstRouterIpv4 = self.testParams['dst_router_ipv4'] - self.dstRouterIpv6 = self.testParams['dst_router_ipv6'] - - self.dstHostMac = self.testParams['dst_host_mac'] - self.dstHostIpv4 = str(ip_address(unicode(self.testParams['dst_router_ipv4'])) + 1) - self.dstHostIpv6 = str(ip_address(unicode(self.testParams['dst_router_ipv6'])) + 1) - - self.srcRouterMac = self.testParams['src_router_mac'] - self.srcRouterIpv4 = self.testParams['src_router_ipv4'] - self.srcRouterIpv6 = self.testParams['src_router_ipv6'] - - self.srcHostMac = self.testParams['src_host_mac'] - self.srcHostIpv4 = str(ip_address(unicode(self.testParams['src_router_ipv4'])) + 1) - self.srcHostIpv6 = str(ip_address(unicode(self.testParams['src_router_ipv6'])) + 1) - - self.dstPortIds = self.testParams['dst_port_ids'] - self.srcPortIds = self.testParams['src_port_ids'] - - self.pktTtlHlim = 64 # Default packet TTL/HL value - #-------------------------------------------------------------------------- - - def runTestIpv6(self): - self.test.log("Run IPv6 based test") - - pkt = simple_udpv6_packet(eth_dst=self.srcRouterMac, - eth_src=self.srcHostMac, - ipv6_src=self.dstHostIpv6, - ipv6_dst=self.dstHostIpv6, - ipv6_hlim=self.pktTtlHlim) - send(self.test, int(self.srcPortIds[0]), pkt) - - pkt = simple_udpv6_packet(eth_dst=self.dstHostMac, - eth_src=self.dstRouterMac, - ipv6_src=self.dstHostIpv6, - ipv6_dst=self.dstHostIpv6, - ipv6_hlim=self.pktTtlHlim-1) - - verify_packet_any_port(self.test, pkt, [int(port) for port in self.dstPortIds]) - - self.test.log("IPv6 based test: done") - #-------------------------------------------------------------------------- - - def runTestIpv4(self): - self.test.log("Run IPv4 based test") - - pkt = simple_udp_packet(eth_dst=self.srcRouterMac, - eth_src=self.srcHostMac, - ip_src=self.dstHostIpv4, - ip_dst=self.dstHostIpv4, - ip_ttl=self.pktTtlHlim) - send(self.test, int(self.srcPortIds[0]), pkt) - - pkt = simple_udp_packet(eth_dst=self.dstHostMac, - eth_src=self.dstRouterMac, - ip_src=self.dstHostIpv4, - ip_dst=self.dstHostIpv4, - ip_ttl=self.pktTtlHlim-1) - - verify_packet_any_port(self.test, pkt, [int(port) for port in self.dstPortIds]) - - self.test.log("IPv4 based test: done") - #-------------------------------------------------------------------------- - - def runTest(self): - self.setUpParams() - self.logParams() - - self.runTestIpv4() - self.runTestIpv6() - #-------------------------------------------------------------------------- - -class DipSipTest(BaseTest): - def __init__(self): - BaseTest.__init__(self) - #-------------------------------------------------------------------------- - - def log(self, message): - logging.info(message) - #-------------------------------------------------------------------------- - - def setUp(self): - self.log("SetUp testbed") - - self.dataplane = ptf.dataplane_instance - self.test_params = test_params_get() - self.testbed_type = self.test_params['testbed_type'] - #-------------------------------------------------------------------------- - - def tearDown(self): - self.log("TearDown testbed") - #-------------------------------------------------------------------------- - - def runTest(self): - if self.testbed_type in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-64-lag-clet']: - self.log("Run PORT/LAG-router based test") - - test = PortLagRouterBasedTest(self) - test.runTest() - - self.log("PORT/LAG-router based test: done") - - return - - self.fail("Unexpected testbed type %s!" % (self.testbed_type)) - #-------------------------------------------------------------------------- diff --git a/ansible/roles/test/files/ptftests/hash_test.py b/ansible/roles/test/files/ptftests/hash_test.py index 76735bf2c0..43efb2a64c 100644 --- a/ansible/roles/test/files/ptftests/hash_test.py +++ b/ansible/roles/test/files/ptftests/hash_test.py @@ -5,11 +5,8 @@ #--------------------------------------------------------------------- # Global imports #--------------------------------------------------------------------- -import ipaddress import logging import random -import socket -import sys from ipaddress import ip_address, ip_network @@ -48,29 +45,20 @@ def setUp(self): ''' self.dataplane = ptf.dataplane_instance self.fib = fib.Fib(self.test_params['fib_info']) + self.testbed_type = self.test_params['testbed_type'] self.router_mac = self.test_params['router_mac'] + self.in_ports = self.test_params['in_ports'] self.src_ip_range = [unicode(x) for x in self.test_params['src_ip_range'].split(',')] self.dst_ip_range = [unicode(x) for x in self.test_params['dst_ip_range'].split(',')] self.src_ip_interval = lpm.LpmDict.IpInterval(ip_address(self.src_ip_range[0]), ip_address(self.src_ip_range[1])) self.dst_ip_interval = lpm.LpmDict.IpInterval(ip_address(self.dst_ip_range[0]), ip_address(self.dst_ip_range[1])) + self.vlan_ids = self.test_params.get('vlan_ids', []) self.hash_keys = self.test_params.get('hash_keys', ['src-ip', 'dst-ip', 'src-port', 'dst-port']) + self.dst_macs = self.test_params.get('dst_macs', []) # TODO self.balancing_range = self.test_params.get('balancing_range', self.DEFAULT_BALANCING_RANGE) - # Provide the list of all UP interfaces with index in sequence order starting from 0 - if self.test_params['testbed_type'] == 't1' or self.test_params['testbed_type'] == 't1-lag': - self.src_ports = range(0, 32) - if self.test_params['testbed_type'] == 't1-64-lag' or self.test_params['testbed_type'] == 't1-64-lag-clet': - self.src_ports = [0, 1, 4, 5, 16, 17, 20, 21, 34, 36, 37, 38, 39, 42, 44, 45, 46, 47, 50, 52, 53, 54, 55, 58, 60, 61, 62, 63] - if self.test_params['testbed_type'] == 't0': - self.src_ports = range(1, 25) + range(28, 32) - if self.test_params['testbed_type'] == 't0-56': - self.src_ports = [0, 1, 4, 5, 8, 9] + range(12, 18) + [20, 21, 24, 25, 28, 29, 32, 33, 36, 37] + range(40, 46) + [48, 49, 52, 53] - if self.test_params['testbed_type'] == 't0-64': - self.src_ports = range(0, 2) + range(4, 18) + range(20, 33) + range(36, 43) + range(48, 49) + range(52, 59) - if self.test_params['testbed_type'] == 't0-116': - self.src_ports = range(0, 120) #--------------------------------------------------------------------- def check_hash(self, hash_key): @@ -81,15 +69,24 @@ def check_hash(self, hash_key): if exp_port_list <= 1: logging.warning("{} has only {} nexthop".format(dst_ip, exp_port_list)) assert False - in_port = random.choice([port for port in self.src_ports if port not in exp_port_list]) + in_port = random.choice([port for port in self.in_ports if port not in exp_port_list]) hit_count_map = {} - for _ in range(0, self.BALANCING_TEST_TIMES): - logging.info("in_port: {}".format(in_port)) - (matched_index, _) = self.check_ip_route(hash_key, in_port, dst_ip, exp_port_list) - hit_count_map[matched_index] = hit_count_map.get(matched_index, 0) + 1 - logging.info("hit count map: {}".format(hit_count_map)) - self.check_balancing(next_hop.get_next_hop(), hit_count_map) + if hash_key == 'ingress-port': # The sample is too little for hash_key ingress-port, check it loose(just verify if the asic actually used the hash field as a load-balancing factor) + for in_port in [port for port in self.in_ports if port not in exp_port_list]: + logging.info("in_port: {}".format(in_port)) + (matched_index, _) = self.check_ip_route(hash_key, in_port, dst_ip, exp_port_list) + hit_count_map[matched_index] = hit_count_map.get(matched_index, 0) + 1 + logging.info("hit count map: {}".format(hit_count_map)) + assert True if len(hit_count_map.keys()) > 1 else False + else: + for _ in range(0, self.BALANCING_TEST_TIMES): + logging.info("in_port: {}".format(in_port)) + (matched_index, _) = self.check_ip_route(hash_key, in_port, dst_ip, exp_port_list) + hit_count_map[matched_index] = hit_count_map.get(matched_index, 0) + 1 + logging.info("hit count map: {}".format(hit_count_map)) + + self.check_balancing(next_hop.get_next_hop(), hit_count_map) def check_ip_route(self, hash_key, in_port, dst_ip, dst_port_list): if ip_network(unicode(dst_ip)).version == 4: @@ -111,27 +108,38 @@ def check_ipv4_route(self, hash_key, in_port, dst_port_list): @param in_port: index of port to use for sending packet to switch @param dst_port_list: list of ports on which to expect packet to come back from the switch ''' - src_mac = self.dataplane.get_mac(0, 0) + base_mac = self.dataplane.get_mac(0, 0) ip_src = self.src_ip_interval.get_random_ip() if hash_key == 'src-ip' else self.src_ip_interval.get_first_ip() ip_dst = self.dst_ip_interval.get_random_ip() if hash_key == 'dst-ip' else self.dst_ip_interval.get_first_ip() sport = random.randint(0, 65535) if hash_key == 'src-port' else 1234 dport = random.randint(0, 65535) if hash_key == 'dst-port' else 80 + src_mac = (base_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_mac + dst_mac = random.choice(self.dst_macs) if hash_key == 'dst-mac' else self.router_mac + vlan_id = random.choice(self.vlan_ids) if hash_key == 'vlan-id' else 0 + ip_proto = random.randint(100, 200) if hash_key == 'ip-proto' else None - pkt = simple_tcp_packet( - eth_dst=self.router_mac, + pkt = simple_tcp_packet(pktlen=100 if vlan_id == 0 else 104, + eth_dst=dst_mac, eth_src=src_mac, + dl_vlan_enable=False if vlan_id == 0 else True, + vlan_vid=vlan_id, + vlan_pcp=0, ip_src=ip_src, ip_dst=ip_dst, tcp_sport=sport, tcp_dport=dport, ip_ttl=64) exp_pkt = simple_tcp_packet( - eth_src=self.router_mac, + eth_src=dst_mac, ip_src=ip_src, ip_dst=ip_dst, tcp_sport=sport, tcp_dport=dport, ip_ttl=63) + + if hash_key == 'ip-proto': + pkt['IP'].proto = ip_proto + exp_pkt['IP'].proto = ip_proto masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") @@ -149,27 +157,39 @@ def check_ipv6_route(self, hash_key, in_port, dst_port_list): @param dst_port_list: list of ports on which to expect packet to come back from the switch @return Boolean ''' - src_mac = self.dataplane.get_mac(0, 0) + base_mac = self.dataplane.get_mac(0, 0) ip_src = self.src_ip_interval.get_random_ip() if hash_key == 'src-ip' else self.src_ip_interval.get_first_ip() ip_dst = self.dst_ip_interval.get_random_ip() if hash_key == 'dst-ip' else self.dst_ip_interval.get_first_ip() sport = random.randint(0, 65535) if hash_key == 'src-port' else 1234 dport = random.randint(0, 65535) if hash_key == 'dst-port' else 80 + src_mac = (base_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_mac + dst_mac = random.choice(self.dst_macs) if hash_key == 'dst-mac' else self.router_mac + vlan_id = random.choice(self.vlan_ids) if hash_key == 'vlan-id' else 0 + ip_proto = random.randint(100, 200) if hash_key == "ip-proto" else None - pkt = simple_tcpv6_packet( - eth_dst=self.router_mac, + pkt = simple_tcpv6_packet(pktlen=100 if vlan_id == 0 else 104, + eth_dst=dst_mac, eth_src=src_mac, + dl_vlan_enable=False if vlan_id == 0 else True, + vlan_vid=vlan_id, + vlan_pcp=0, ipv6_dst=ip_dst, ipv6_src=ip_src, tcp_sport=sport, tcp_dport=dport, ipv6_hlim=64) exp_pkt = simple_tcpv6_packet( - eth_src=self.router_mac, + eth_src=dst_mac, ipv6_dst=ip_dst, ipv6_src=ip_src, tcp_sport=sport, tcp_dport=dport, ipv6_hlim=63) + + if hash_key == 'ip-proto': + pkt['IPv6'].nh = ip_proto + exp_pkt['IPv6'].nh = ip_proto + masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether,"dst") diff --git a/ansible/roles/test/files/ptftests/populate_fdb.py b/ansible/roles/test/files/ptftests/populate_fdb.py new file mode 100644 index 0000000000..5899b41692 --- /dev/null +++ b/ansible/roles/test/files/ptftests/populate_fdb.py @@ -0,0 +1,176 @@ +import ipaddress +import json +import logging +import ptf + +# Packet Test Framework imports +import ptf +import ptf.packet as scapy +import ptf.testutils as testutils +from ptf import config +from ptf.base_tests import BaseTest + +logger = logging.getLogger(__name__) + +class PopulateFdb(BaseTest): + """ + Populate DUT FDB entries + """ + TCP_DST_PORT = 5000 + TCP_SRC_PORT = 6000 + + def __init__(self): + """ + class constructor + + Args: + None + + Returns: + None + """ + BaseTest.__init__(self) + + def setUp(self): + """ + Sets up Populate FDB instance data + + Args: + None + + Returns: + None + """ + self.dataplane = ptf.dataplane_instance + self.dataplane.flush() + + self.testParams = testutils.test_params_get() + self.packetCount = self.testParams["packet_count"] + self.startMac = self.testParams["start_mac"] + + self.configFile = self.testParams["config_data"] + with open(self.configFile) as fp: + self.configData = json.load(fp) + + self.dutMac = self.configData["dut_mac"] + self.macToIpRatio = [int(i) for i in self.testParams["mac_to_ip_ratio"].split(':')] + self.assertTrue( + len(self.macToIpRatio) == 2 and self.macToIpRatio[0] > 0 and self.macToIpRatio[1] > 0, + "Invalid MAC to IP ratio: {0}".format(self.testParams["mac_to_ip_ratio"]) + ) + + if config["log_dir"] is not None: + filename = os.path.join(config["log_dir"], str(self)) + ".pcap" + self.dataplane.start_pcap(filename) + + def tearDown(self): + """ + Tears down FDB instance data + + Args: + None + + Returns: + None + """ + if config["log_dir"] is not None: + self.dataplane.stop_pcap() + + def __convertMacToInt(self, mac): + """ + Converts MAC address to integer + + Args: + mac (str): MAC Address + + Returns: + mac (int): integer representation of MAC address + """ + return int(mac.translate(None, ":.- "), 16) + + def __convertMacToStr(self, mac): + """ + Converts MAC address to string + + Args: + mac (int): MAC Address + + Returns: + mac (str): string representation of MAC address + """ + mac = "{:012x}".format(mac) + return ":".join(mac[i : i + 2] for i in range(0, len(mac), 2)) + + def __prepareVmIp(self): + """ + Prepares VM IP addresses + + Args: + None + + Returns: + vmIp (dict): Map containing vlan to VM IP address + """ + vmIp = {} + for vlan, config in self.configData["vlan_interfaces"].items(): + prefixLen = self.configData["vlan_interfaces"][vlan]["prefixlen"] + ipCount = 2**(32 - prefixLen) - 3 + numDistinctIp = self.packetCount * self.macToIpRatio[1] / self.macToIpRatio[0] + self.assertTrue( + ipCount >= numDistinctIp, + "Vlan network '{0}' does not support the requested number of IPs '{1}'".format( + ipCount, + numDistinctIp + ) + ) + vmIp[vlan] = ipaddress.ip_address(unicode(config["addr"])) + 1 + + return vmIp + + def __populateDutFdb(self): + """ + Populates DUT FDB entries + + It accepts MAC to IP ratio and packet count. It generates packets withratio of distinct MAC addresses + to distinct IP addresses as provided. The IP addresses starts from VLAN address pool. + + Args: + None + + Returns: + None + """ + packet = testutils.simple_tcp_packet( + eth_dst=self.dutMac, + tcp_sport=self.TCP_SRC_PORT, + tcp_dport=self.TCP_DST_PORT + ) + vmIp = self.__prepareVmIp() + macInt = self.__convertMacToInt(self.startMac) + numMac = numIp = 0 + for i in range(self.packetCount): + port = i % len(self.configData["vlan_ports"]) + vlan = self.configData["vlan_ports"][port]["vlan"] + + if i % self.macToIpRatio[1] == 0: + mac = self.__convertMacToStr(macInt + i) + numMac += 1 + if i % self.macToIpRatio[0] == 0: + vmIp[vlan] = ipaddress.ip_address(unicode(vmIp[vlan])) + 1 + numIp += 1 + + packet[scapy.Ether].src = mac + packet[scapy.IP].src = str(vmIp[vlan]) + packet[scapy.IP].dst = self.configData["vlan_interfaces"][vlan]["addr"] + testutils.send(self, self.configData["vlan_ports"][port]["index"], packet) + + logger.info( + "Generated {0} packets with distinct {1} MAC addresses and {2} IP addresses".format( + self.packetCount, + numMac, + numIp + ) + ) + + def runTest(self): + self.__populateDutFdb() diff --git a/ansible/roles/test/files/ptftests/sad_path.py b/ansible/roles/test/files/ptftests/sad_path.py index 8fcb5b7db5..85e61d20e5 100644 --- a/ansible/roles/test/files/ptftests/sad_path.py +++ b/ansible/roles/test/files/ptftests/sad_path.py @@ -1,25 +1,24 @@ import datetime import ipaddress import re -import subprocess import time from arista import Arista +from device_connection import DeviceConnection class SadTest(object): - def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, dut_ssh, vlan_ports): + def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports): self.oper_type = oper_type self.vm_list = vm_list self.portchannel_ports = portchannel_ports self.vm_dut_map = vm_dut_map self.test_args = test_args - self.dut_ssh = dut_ssh self.vlan_ports = vlan_ports self.fails_vm = set() self.fails_dut = set() self.log = [] - self.shandle = SadOper(self.oper_type, self.vm_list, self.portchannel_ports, self.vm_dut_map, self.test_args, self.dut_ssh, self.vlan_ports) + self.shandle = SadOper(self.oper_type, self.vm_list, self.portchannel_ports, self.vm_dut_map, self.test_args, self.vlan_ports) def setup(self): self.shandle.sad_setup(is_up=False) @@ -55,6 +54,7 @@ def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, self.portchannel_ports = portchannel_ports self.vm_dut_map = vm_dut_map self.test_args = test_args + self.dut_connection = DeviceConnection(test_args['dut_hostname'], test_args['dut_username'], password=test_args['dut_password']) self.vlan_ports = vlan_ports self.vlan_if_port = self.test_args['vlan_if_port'] self.neigh_vms = [] @@ -97,16 +97,6 @@ def extract_oper_info(self, oper_type): else: self.oper_type = oper_type - def cmd(self, cmds): - process = subprocess.Popen(cmds, - shell=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - return_code = process.returncode - - return stdout, stderr, return_code - def select_vm(self): self.vm_list.sort() vm_len = len(self.vm_list) @@ -203,9 +193,8 @@ def retreive_logs(self): class SadOper(SadPath): - def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, dut_ssh, vlan_ports): + def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports): super(SadOper, self).__init__(oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports) - self.dut_ssh = dut_ssh self.dut_needed = dict() self.lag_members_down = dict() self.neigh_lag_members_down = dict() @@ -335,7 +324,7 @@ def get_bgp_route_cnt(self, is_up=True, v4=True): else: cmd = 'show ipv6 bgp summary | sed \'1,/Neighbor/d;/^$/,$d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10' - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, cmd]) + stdout, stderr, return_code = self.dut_connection.execCommand(cmd) if return_code != 0: self.fails['dut'].add('%s: Failed to retreive BGP route info from DUT' % self.msg_prefix[1 - is_up]) self.fails['dut'].add('%s: Return code: %d' % (self.msg_prefix[1 - is_up], return_code)) @@ -345,15 +334,15 @@ def get_bgp_route_cnt(self, is_up=True, v4=True): def build_neigh_rt_map(self, neigh_rt_info): # construct neigh to route cnt map self.neigh_rt_map = dict() - for line in neigh_rt_info.strip().split('\n'): - key, value = line.split(' ') + for line in neigh_rt_info: + key, value = line.strip().split(' ') self.neigh_rt_map.update({key:value}) def verify_route_cnt(self, rt_incr, is_up=True, v4=True): neigh_rt_info, ret = self.get_bgp_route_cnt(is_up=is_up, v4=v4) if not ret: - for line in neigh_rt_info.strip().split('\n'): - neigh_ip, rt_cnt = line.split(' ') + for line in neigh_rt_info: + neigh_ip, rt_cnt = line.strip().split(' ') exp_cnt = int(self.neigh_rt_map[neigh_ip]) + rt_incr if int(rt_cnt) != exp_cnt: self.fails['dut'].add('%s: Route cnt incorrect for neighbor %s Expected: %d Obtained: %d' % (self.msg_prefix[is_up], neigh_ip, exp_cnt, int(rt_cnt))) @@ -386,7 +375,7 @@ def change_vlan_port_state(self, is_up=True): for intf, port in self.down_vlan_info: if not re.match('Ethernet\d+', intf): continue self.log.append('Changing state of %s from DUT side to %s' % (intf, state[is_up])) - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'sudo config interface %s %s' % (state[is_up], intf)]) + stdout, stderr, return_code = self.dut_connection.execCommand('sudo config interface %s %s' % (state[is_up], intf)) if return_code != 0: self.fails['dut'].add('%s: State change not successful from DUT side for %s' % (self.msg_prefix[1 - is_up], intf)) self.fails['dut'].add('%s: Return code: %d' % (self.msg_prefix[1 - is_up], return_code)) @@ -400,9 +389,9 @@ def verify_vlan_port_state(self, state='down', pre_check=True): # extract the admin status pat = re.compile('(\S+\s+){7}%s' % state) for intf, port in self.down_vlan_info: - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'show interfaces status %s' % intf]) + stdout, stderr, return_code = self.dut_connection.execCommand('show interfaces status %s' % intf) if return_code == 0: - for line in stdout.split('\n'): + for line in stdout: if intf in line: is_match = pat.match(line.strip()) if is_match: @@ -426,7 +415,7 @@ def change_bgp_dut_state(self, is_up=True): continue self.log.append('Changing state of BGP peer %s from DUT side to %s' % (self.neigh_bgps[vm][key], state[is_up])) - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'sudo config bgp %s neighbor %s' % (state[is_up], self.neigh_bgps[vm][key])]) + stdout, stderr, return_code = self.dut_connection.execCommand('sudo config bgp %s neighbor %s' % (state[is_up], self.neigh_bgps[vm][key])) if return_code != 0: self.fails['dut'].add('State change not successful from DUT side for peer %s' % self.neigh_bgps[vm][key]) self.fails['dut'].add('Return code: %d' % return_code) @@ -442,9 +431,9 @@ def verify_bgp_dut_state(self, state='Idle'): if key not in ['v4', 'v6']: continue self.log.append('Verifying if the DUT side BGP peer %s is %s' % (self.neigh_bgps[vm][key], states)) - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'show ip bgp neighbor %s' % self.neigh_bgps[vm][key]]) + stdout, stderr, return_code = self.dut_connection.execCommand('show ip bgp neighbor %s' % self.neigh_bgps[vm][key]) if return_code == 0: - for line in stdout.split('\n'): + for line in stdout: if 'BGP state' in line: curr_state = re.findall('BGP state = (\w+)', line)[0] bgp_state[vm][key] = (curr_state in states) @@ -507,7 +496,7 @@ def change_dut_lag_state(self, is_up=True): for intf in down_intfs: if not re.match('(PortChannel|Ethernet)\d+', intf): continue self.log.append('Changing state of %s from DUT side to %s' % (intf, state[is_up])) - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'sudo config interface %s %s' % (state[is_up], intf)]) + stdout, stderr, return_code = self.dut_connection.execCommand('sudo config interface %s %s' % (state[is_up], intf)) if return_code != 0: self.fails['dut'].add('%s: State change not successful from DUT side for %s' % (self.msg_prefix[1 - is_up], intf)) self.fails['dut'].add('%s: Return code: %d' % (self.msg_prefix[1 - is_up], return_code)) @@ -549,9 +538,9 @@ def verify_dut_lag_state(self, pre_check=True): po_list.append(po_name) self.po_neigh_map[po_name] = self.neigh_names[vm] - stdout, stderr, return_code = self.cmd(['ssh', '-oStrictHostKeyChecking=no', self.dut_ssh, 'show interfaces portchannel']) + stdout, stderr, return_code = self.dut_connection.execCommand('show interfaces portchannel') if return_code == 0: - for line in stdout.split('\n'): + for line in stdout: for po_name in po_list: if po_name in line: is_match = pat.match(line) diff --git a/ansible/roles/test/files/ptftests/vxlan-decap.py b/ansible/roles/test/files/ptftests/vxlan-decap.py index 0b6548100a..fcee135260 100644 --- a/ansible/roles/test/files/ptftests/vxlan-decap.py +++ b/ansible/roles/test/files/ptftests/vxlan-decap.py @@ -307,6 +307,7 @@ def checkRegularRegularVLANtoLAG(self, acc_port, pc_ports, dst_ip, test): exp_packet = Mask(exp_packet) exp_packet.set_do_not_care_scapy(scapy.Ether, "dst") + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, acc_port, packet) nr_rcvd = testutils.count_matched_packets_all_ports(self, exp_packet, pc_ports, timeout=0.5) @@ -339,6 +340,7 @@ def checkRegularRegularLAGtoVLAN(self, acc_port, net_port, test): ip_ttl = 63, ) + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, net_port, packet) nr_rcvd = testutils.count_matched_packets(self, exp_packet, acc_port, timeout=0.5) @@ -376,6 +378,8 @@ def checkVxlan(self, acc_port, net_port, test): vxlan_vni=test['vni'], inner_frame=inpacket ) + + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, net_port, packet) nr_rcvd = testutils.count_matched_packets(self, inpacket, acc_port, timeout=0.5) diff --git a/ansible/roles/test/files/saitests/sai_qos_tests.py b/ansible/roles/test/files/saitests/sai_qos_tests.py index 55e8edcd61..5f346bfda0 100644 --- a/ansible/roles/test/files/saitests/sai_qos_tests.py +++ b/ansible/roles/test/files/saitests/sai_qos_tests.py @@ -122,6 +122,24 @@ def runTest(self): send_packet(self, self.dst_port_3_id, arpreq_pkt) time.sleep(8) + +class ARPpopulatePTF(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + ## ARP Populate + index = 0 + for port in ptf_ports(): + arpreq_pkt = simple_arp_packet( + eth_dst='ff:ff:ff:ff:ff:ff', + eth_src=self.dataplane.get_mac(port[0],port[1]), + arp_op=1, + ip_snd='10.0.0.%d' % (index * 2 + 1), + ip_tgt='10.0.0.%d' % (index * 2), + hw_snd=self.dataplane.get_mac(port[0], port[1]), + hw_tgt='ff:ff:ff:ff:ff:ff') + send_packet(self, port[1], arpreq_pkt) + index += 1 + + class ReleaseAllPorts(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): switch_init(self.client) @@ -1268,6 +1286,8 @@ def runTest(self): queue_num_of_pkts[48] = queue_6_num_of_pkts total_pkts = 0 + diff_list = [] + for pkt_to_inspect in pkts: dscp_of_pkt = pkt_to_inspect.payload.tos >> 2 total_pkts += 1 @@ -1276,10 +1296,16 @@ def runTest(self): queue_pkt_counters[dscp_of_pkt] += 1 if queue_pkt_counters[dscp_of_pkt] == queue_num_of_pkts[dscp_of_pkt]: - assert((queue_0_num_of_pkts + queue_1_num_of_pkts + queue_2_num_of_pkts + queue_3_num_of_pkts + queue_4_num_of_pkts + queue_5_num_of_pkts + queue_6_num_of_pkts) - total_pkts < limit) + diff_list.append((dscp_of_pkt, (queue_0_num_of_pkts + queue_1_num_of_pkts + queue_2_num_of_pkts + queue_3_num_of_pkts + queue_4_num_of_pkts + queue_5_num_of_pkts + queue_6_num_of_pkts) - total_pkts)) print >> sys.stderr, queue_pkt_counters + print >> sys.stderr, "Difference for each dscp: " + print >> sys.stderr, diff_list + + for dscp, diff in diff_list: + assert diff < limit, "Difference for %d is %d which exceeds limit %d" % (dscp, diff, limit) + # Read counters print "DST port counters: " port_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) diff --git a/ansible/roles/test/tasks/bgp_speaker.yml b/ansible/roles/test/tasks/bgp_speaker.yml index 4bed8ce473..7dca4b45e0 100644 --- a/ansible/roles/test/tasks/bgp_speaker.yml +++ b/ansible/roles/test/tasks/bgp_speaker.yml @@ -1,4 +1,4 @@ - name: run test include_tasks: roles/test/tasks/pytest_runner.yml vars: - test_node: test_bgp_speaker.py + test_node: bgp/test_bgp_speaker.py diff --git a/ansible/roles/test/tasks/config.yml b/ansible/roles/test/tasks/config.yml index c0aa4c3ab3..1dae600ef2 100644 --- a/ansible/roles/test/tasks/config.yml +++ b/ansible/roles/test/tasks/config.yml @@ -1,140 +1,4 @@ -- debug: msg="Configuration Test" - -- name: Gather minigraph facts - minigraph_facts: host={{inventory_hostname}} - -- name: Gather interface facts - interface_facts: - -- name: Initialize portchannel - set_fact: - portchannel: "{{minigraph_portchannels | first}}" - tmp_portchannel: "PortChannel999" - -- name: Initialize portchannel_ip and portchannel_member - set_fact: - portchannel_ip: "{{ansible_interface_facts[portchannel]['ipv4']['address']}}" - portchannel_members: "{{minigraph_portchannels[portchannel]['members']}}" - -- name: Print variables +- name: run test + include_tasks: roles/test/tasks/pytest_runner.yml vars: - msg: | - portchannel: {{ portchannel }} - portchannel_ip: {{ portchannel_ip }} - portchannel_members: {{ portchannel_members }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Initialize flags - set_fact: - remove_portchannel_members: false - remove_portchannel_ip: false - create_tmp_portchannel: false - add_tmp_portchannel_members: false - add_tmp_portchannel_ip: false - -- block: - - name: Step 1 Remove {{ portchannel_members }} from {{ portchannel }} - shell: config portchannel member del {{ portchannel }} {{ item }} - become: yes - with_items: "{{portchannel_members}}" - - set_fact: - remove_portchannel_members: true - - - name: Step 2 Remove {{ portchannel_ip }} from {{ portchannel }} - shell: config interface ip remove {{ portchannel }} {{ portchannel_ip }}/31 - become: yes - - set_fact: - remove_portchannel_ip: true - - - pause: seconds=30 - - - interface_facts: - - assert: - that: - - "{{ansible_interface_facts[portchannel]['link']}} == False" - - - bgp_facts: - - assert: - that: - - "{{bgp_statistics['ipv4_idle']}} == 1" - - - name: Step 3 Create {{ tmp_portchannel }} - shell: config portchannel add {{ tmp_portchannel }} - become: yes - - set_fact: - create_tmp_portchannel: true - - - name: Step 4 Add {{ portchannel_members }} to {{ tmp_portchannel }} - shell: config portchannel member add {{ tmp_portchannel }} {{ item }} - become: yes - with_items: "{{portchannel_members}}" - - set_fact: - add_tmp_portchannel_members: true - - - name: Step 5 Add {{ portchannel_ip }} to {{ tmp_portchannel }} - shell: config interface ip add {{ tmp_portchannel }} {{ portchannel_ip }}/31 - become: yes - - set_fact: - add_tmp_portchannel_ip: true - - - interface_facts: - - assert: - that: - - "'{{ansible_interface_facts[tmp_portchannel].ipv4.address}}' == '{{portchannel_ip}}'" - - - pause: seconds=30 - - - interface_facts: - - assert: - that: - - "{{ansible_interface_facts[tmp_portchannel]['link']}} == True" - - - bgp_facts: - - assert: - that: - - "{{bgp_statistics['ipv4_idle']}} == 0" - - always: - - name: Remove {{ portchannel_ip }} from {{ tmp_portchannel }} - shell: config interface ip remove {{ tmp_portchannel }} {{ portchannel_ip }}/31 - become: yes - when: add_tmp_portchannel_ip - - - pause: seconds=5 - - - name: Remove {{ portchannel_members }} from {{ tmp_portchannel }} - shell: config portchannel member del {{ tmp_portchannel }} {{ item }} - become: yes - when: add_tmp_portchannel_members - with_items: "{{portchannel_members}}" - - - pause: seconds=5 - - - name: Remove {{ tmp_portchannel }} - shell: config portchannel del {{ tmp_portchannel }} - become: yes - when: create_tmp_portchannel - - - name: Add {{ portchannel_ip }} to {{ portchannel }} - shell: config interface ip add {{ portchannel }} {{ portchannel_ip }}/31 - become: yes - when: remove_portchannel_ip - - - name: Add {{ portchannel_members }} to {{ portchannel }} - shell: config portchannel member add {{ portchannel }} {{ item }} - become: yes - when: remove_portchannel_members - with_items: "{{portchannel_members}}" - - - pause: seconds=30 - - - interface_facts: - - assert: - that: - - "{{ansible_interface_facts[portchannel]['link']}} == True" - - - bgp_facts: - - assert: - that: - - "{{bgp_statistics['ipv4_idle']}} == 0" + test_node: pc/test_po_update.py \ No newline at end of file diff --git a/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml b/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml index ff3c91b4df..cb0e394b6e 100644 --- a/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml +++ b/ansible/roles/test/tasks/crm/crm_test_fdb_entry.yml @@ -1,5 +1,11 @@ - block: + - name: Stop arp_update + command: docker exec -i swss supervisorctl stop arp_update + + - name: Remove FDB entry + command: fdbclear + - name: Get "crm_stats_fdb_entry" used and available counter value command: redis-cli --raw -n 2 HMGET CRM:STATS crm_stats_fdb_entry_used crm_stats_fdb_entry_available register: out @@ -84,3 +90,6 @@ - name: Remove FDB JSON config from SWSS container command: docker exec -i swss rm /fdb.json + + - name: Restart arp_update + command: docker exec -i swss supervisorctl start arp_update diff --git a/ansible/roles/test/tasks/dhcp_relay.yml b/ansible/roles/test/tasks/dhcp_relay.yml index 839434b4f4..eb93fea9bd 100644 --- a/ansible/roles/test/tasks/dhcp_relay.yml +++ b/ansible/roles/test/tasks/dhcp_relay.yml @@ -1,4 +1,4 @@ - name: Run DHCP relay test (pytest-ansible) include_tasks: roles/test/tasks/pytest_runner.yml vars: - test_node: test_dhcp_relay.py + test_node: dhcp_relay/test_dhcp_relay.py diff --git a/ansible/roles/test/tasks/dip_sip.yml b/ansible/roles/test/tasks/dip_sip.yml index d064187be3..2881dfe5a4 100644 --- a/ansible/roles/test/tasks/dip_sip.yml +++ b/ansible/roles/test/tasks/dip_sip.yml @@ -1,131 +1,4 @@ -- fail: msg="testbed_type is not defined" - when: testbed_type is not defined - -- fail: msg="testbed_type {{ test_type }} is invalid" - when: testbed_type not in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] - -- include_vars: "vars/topo_{{ testbed_type }}.yml" - -- name: "Expand properties into props" - set_fact: props="{{ configuration_properties['common'] }}" - -- name: "Gather minigraph facts about the device" - minigraph_facts: host={{ inventory_hostname }} - -- name: "Remove existing IPs from PTF host" - script: roles/test/files/helpers/remove_ip.sh - delegate_to: "{{ ptf_host }}" - -- name: "Set unique MACs to PTF interfaces" - script: roles/test/files/helpers/change_mac.sh - delegate_to: "{{ ptf_host }}" - -- include_tasks: add_container_to_inventory.yml +- name: run test + include_tasks: roles/test/tasks/pytest_runner.yml vars: - container_name: lldp - -- name: "Gather information from LLDP" - lldp: - delegate_to: "{{ ansible_host }}_lldp" - -- name: "Copy tests to PTF" - copy: src=roles/test/files/ptftests dest=/root - delegate_to: "{{ ptf_host }}" - -- block: - - fail: msg="minigraph_interfaces is not defined or zero length" - when: minigraph_interfaces is not defined or (minigraph_interfaces | length == 0) - - - name: "Set destination PORT name" - set_fact: - dst_port: "{{ minigraph_interfaces[0].attachto }}" - - - name: "Set source PORT name" - set_fact: - src_port: "{{ minigraph_interfaces[2].attachto }}" - - - name: "Start PTF runner: '{{ testbed_type }}' designated" - include_tasks: ptf_runner.yml - vars: - ptf_test_name: DipSip test - ptf_test_dir: ptftests - ptf_test_path: dip_sip.DipSipTest - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_test_params: - - testbed_type='{{ testbed_type }}' - - dst_host_mac='{{ lldp[dst_port]['chassis'].mac }}' - - src_host_mac='{{ lldp[src_port]['chassis'].mac }}' - - dst_router_mac='{{ ansible_interface_facts[dst_port].macaddress }}' - - src_router_mac='{{ ansible_interface_facts[src_port].macaddress }}' - - dst_router_ipv4='{{ ansible_interface_facts[dst_port]['ipv4']['address'] }}' - - src_router_ipv4='{{ ansible_interface_facts[src_port]['ipv4']['address'] }}' - - dst_router_ipv6='{{ ansible_interface_facts[dst_port]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' - - src_router_ipv6='{{ ansible_interface_facts[src_port]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' - - dst_port_ids=[{{ minigraph_port_indices[dst_port] }}] - - src_port_ids=[{{ minigraph_port_indices[src_port] }}] - ptf_extra_options: "--relax --debug info --log-file /tmp/dip_sip.DipSipTest.{{ lookup('pipe','date +%Y-%m-%d-%H:%M:%S') }}.log" - - vars: - dst_port: "default('')" - src_port: "default('')" - when: testbed_type in ['t1'] - -- block: - - fail: msg="minigraph_portchannel_interfaces is not defined or zero length" - when: minigraph_portchannel_interfaces is not defined or (minigraph_portchannel_interfaces | length == 0) - - - name: "Set destination LAG name" - set_fact: - dst_lag: "{{ minigraph_portchannel_interfaces[0].attachto }}" - - - name: "Set source LAG name" - set_fact: - src_lag: "{{ minigraph_portchannel_interfaces[2].attachto }}" - - - name: "Gather destination port indices" - set_fact: - dst_port_ids: "{{ minigraph_port_indices[item] }}" - with_items: "{{ minigraph_portchannels[dst_lag].members }}" - register: dst_port_ids_result - - - name: "Make a list from destination port indices" - set_fact: - dst_port_ids: "{{ dst_port_ids_result.results | map(attribute='ansible_facts.dst_port_ids') | list }}" - - - name: "Gather source port indices" - set_fact: - src_port_ids: "{{ minigraph_port_indices[item] }}" - with_items: "{{ minigraph_portchannels[src_lag].members }}" - register: src_port_ids_result - - - name: "Make a list from source port indices" - set_fact: - src_port_ids: "{{ src_port_ids_result.results | map(attribute='ansible_facts.src_port_ids') | list }}" - - - name: "Start PTF runner: '{{ testbed_type }}' designated" - include_tasks: ptf_runner.yml - vars: - ptf_test_name: DipSip test - ptf_test_dir: ptftests - ptf_test_path: dip_sip.DipSipTest - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_test_params: - - testbed_type='{{ testbed_type }}' - - dst_host_mac='{{ lldp[minigraph_portchannels[dst_lag].members[0]]['chassis'].mac }}' - - src_host_mac='{{ lldp[minigraph_portchannels[src_lag].members[0]]['chassis'].mac }}' - - dst_router_mac='{{ ansible_interface_facts[dst_lag].macaddress }}' - - src_router_mac='{{ ansible_interface_facts[src_lag].macaddress }}' - - dst_router_ipv4='{{ ansible_interface_facts[dst_lag]['ipv4']['address'] }}' - - src_router_ipv4='{{ ansible_interface_facts[src_lag]['ipv4']['address'] }}' - - dst_router_ipv6='{{ ansible_interface_facts[dst_lag]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' - - src_router_ipv6='{{ ansible_interface_facts[src_lag]['ipv6'] | selectattr("scope", "match", "^global$") | map(attribute='address') | list | first }}' - - dst_port_ids={{ dst_port_ids }} - - src_port_ids={{ src_port_ids }} - ptf_extra_options: "--relax --debug info --log-file /tmp/dip_sip.DipSipTest.{{ lookup('pipe','date +%Y-%m-%d-%H:%M:%S') }}.log" - - vars: - dst_lag: "default('')" - src_lag: "default('')" - when: testbed_type in ['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116', 't1-lag', 't1-64-lag', 't1-64-lag-clet'] + test_node: ipfwd/test_dip_sip.py diff --git a/ansible/roles/test/tasks/everflow_testbed.yml b/ansible/roles/test/tasks/everflow_testbed.yml index c5efbe2ca6..256cb3bc32 100644 --- a/ansible/roles/test/tasks/everflow_testbed.yml +++ b/ansible/roles/test/tasks/everflow_testbed.yml @@ -1,4 +1,4 @@ - name: run test include_tasks: roles/test/tasks/pytest_runner.yml vars: - test_node: test_everflow_testbed.py + test_node: everflow/test_everflow_testbed.py diff --git a/ansible/roles/test/tasks/lag_2.yml b/ansible/roles/test/tasks/lag_2.yml index 7b1eca5c45..a9d5a76f18 100644 --- a/ansible/roles/test/tasks/lag_2.yml +++ b/ansible/roles/test/tasks/lag_2.yml @@ -5,83 +5,7 @@ ### Also, most of the traffic load balancing tests of LAG interface are covered in new FIB tests. so we are ignoring traffic test ### for lag member flaps for now, will consider add traffic back if required -- fail: msg="Please define ptf_host" - when: ptf_host is not defined - -- fail: msg="Please define testbed_type" - when: testbed_type is not defined - -- name: gathering lag facts from device - lag_facts: host={{ inventory_hostname }} - -- fail: msg="No lag configuration found in {{ inventory_hostname }}" - when: lag_facts.names == [] - -- set_fact: test_minlink=true - when: test_minlink is not defined - -- set_fact: test_rate=true - when: test_rate is not defined - -- include_tasks: add_container_to_inventory.yml +- name: run test + include_tasks: roles/test/tasks/pytest_runner.yml vars: - container_name: lldp - -- name: Gathering peer VM information from lldp - lldp: - delegate_to: "{{ ansible_host }}_lldp" - -- name: gathering minigraph of the device configuration - minigraph_facts: host={{ inventory_hostname }} - -- name: Gathering lab graph facts about the device - conn_graph_facts: host={{ inventory_hostname }} - delegate_to: localhost - -- set_fact: - fanout_neighbors: "{{device_conn}}" - -- set_fact: - vm_neighbors: "{{ minigraph_neighbors }}" - -- name: Copy PTF test into PTF-docker for test LACP DU. - copy: src=roles/test/files/acstests/{{ item }} dest=/tmp/{{ item }} - with_items: - - lag_test.py - - acs_base_test.py - - router_utils.py - delegate_to: "{{ ptf_host }}" - -- name: Copy tests to the PTF container - copy: src=roles/test/files/ptftests dest=/root - delegate_to: "{{ ptf_host }}" - -- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). - include_vars: vars/topo_t1-lag.yml - when: testbed_type == 't1-lag' - -- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). - include_vars: vars/topo_t0.yml - when: testbed_type == 't0' - -- name: Include testbed topology configuration (to get LAG IP and PTF docker interfaces, that are behind LAG VMs). - include_vars: vars/topo_t0-116.yml - when: testbed_type == 't0-116' - -- set_fact: - dut_mac: "{{ ansible_Ethernet0['macaddress'] }}" - -- name: test each lag interface minimum links and rate - include_tasks: single_lag_test.yml - with_items: "{{ lag_facts.names }}" - when: lag_facts.lags[item]['po_config']['runner']['min_ports'] is defined and test_minlink|bool == true - -- name: test each lag interface LACP DU rate - include_tasks: single_lag_lacp_rate_test.yml - with_items: "{{ lag_facts.names }}" - when: lag_facts.lags[item]['po_config']['runner']['min_ports'] is defined and test_rate|bool == true - -- name: test each lag interface with fallback config - include_tasks: lag_fallback.yml - with_items: "{{ lag_facts.names }}" - when: lag_facts.lags[item]['po_config']['runner']['fallback'] is defined + test_node: pc/test_lag_2.py diff --git a/ansible/roles/test/tasks/lag_fallback.yml b/ansible/roles/test/tasks/lag_fallback.yml deleted file mode 100644 index 15a107d191..0000000000 --- a/ansible/roles/test/tasks/lag_fallback.yml +++ /dev/null @@ -1,95 +0,0 @@ -### This playbook is part of lag_2.yml test -### It is to test LACP fallback functionality when neighbor is not sending LACPDU. -### The lag_fallback test cases flap the link from VM side while keeping the -### physical link up, this is to simulate the situation where the remote end -### stops sending LACP DU. If fallback is enabled, the port should still be selected, -### and LAG should be kept up. Otherwise, the LAG should be brought down. -### Then bring up the remote interface to make sure Port channel interface -### should be kept up if fallback is enabled - -- set_fact: - po: "{{ item }}" - po_interfaces: "{{ lag_facts.lags[item]['po_config']['ports'] }}" - po_intf_num: "{{ lag_facts.lags[item]['po_config']['ports']|length }}" - -- set_fact: - flap_intf: "{{ lag_facts.lags[item]['po_config']['ports'].keys()[0] }}" - po_fallback: "{{ lag_facts.lags[item]['po_config']['runner']['fallback'] }}" - -### Now figure out remote VM and interface info for the falpping lag member and run fallback test -- set_fact: - peer_device: "{{vm_neighbors[flap_intf]['name']}}" - neighbor_interface: "{{vm_neighbors[flap_intf]['port']}}" - peer_hwsku: 'Arista-VM' - -- set_fact: - peer_host: "{{ lldp[flap_intf]['chassis']['mgmt-ip'] }}" - wait_down_time: 120 - -- block: - - name: Shut down neighbor interface {{ neighbor_interface }} on {{ peer_device }} - action: apswitch template=neighbor_interface_shut_single.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch - - - pause: - seconds: "{{ wait_down_time }}" - - - lag_facts: host={{ inventory_hostname }} - - - name: Display teamshow result - shell: teamshow - become: true - register: teamshow_result - - - debug: var=teamshow_result.stdout_lines - - - name: Verify all other lag member interfaces are marked selected - assert: { that: "'{{ lag_facts.lags[po]['po_stats']['ports'][item]['runner']['selected'] }}' == 'True'" } - with_items: "{{ po_interfaces.keys() }}" - when: item != "{{ flap_intf }}" - - - name: Verify {{ flap_intf}} lag member interfaces are marked as deselected for the shutdown port without fallback - assert: { that: "'{{ lag_facts.lags[po]['po_stats']['ports'][item]['runner']['selected'] }}' == 'False'" } - with_items: "{{ po_interfaces.keys() }}" - when: - - po_fallback != True - - item == "{{ flap_intf }}" - - - name: Verify {{ flap_intf}} lag member interfaces are marked as selected for the shutdown port with fallback - assert: { that: "'{{ lag_facts.lags[po]['po_stats']['ports'][item]['runner']['selected'] }}' == 'True'" } - with_items: "{{ po_interfaces.keys() }}" - when: - - po_fallback == True - - item == "{{ flap_intf }}" - - - name: verify port-channel {{ po }} interface are marked down correctly if portchannel should down - assert: { that: "'{{ lag_facts.lags[po]['po_intf_stat'] }}' == 'Down' "} - when: po_fallback != True - - - name: verify port-channel {{ po }} interface are marked Up correctly if portchannel should keepup - assert: { that: "'{{ lag_facts.lags[po]['po_intf_stat'] }}' == 'Up' "} - when: po_fallback == True - - ### always bring back port in case test error and left testbed in unknow stage - always: - - name: Bring up neighbor interface {{ neighbor_interface }} on {{ peer_host }} - action: apswitch template=neighbor_interface_no_shut_single.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch - - - pause: - seconds: 30 - - - lag_facts: host={{ inventory_hostname }} - - - name: Verify all interfaces in port_channel are marked up - assert: { that: "'{{ lag_facts.lags[po]['po_stats']['ports'][item]['link']['up'] }}' == 'True'" } - with_items: "{{ po_interfaces.keys() }}" - - - name: verify port-channel {{ po }} interface are marked up correctly - assert: { that: "'{{ lag_facts.lags[po]['po_intf_stat'] }}' == 'Up' "} diff --git a/ansible/roles/test/tasks/lldp.yml b/ansible/roles/test/tasks/lldp.yml index eb7ee04f6c..c1b6bb388e 100644 --- a/ansible/roles/test/tasks/lldp.yml +++ b/ansible/roles/test/tasks/lldp.yml @@ -1,4 +1,4 @@ - name: run test include_tasks: roles/test/tasks/pytest_runner.yml vars: - test_node: test_lldp.py + test_node: lldp/test_lldp.py diff --git a/ansible/roles/test/tasks/mtu.yml b/ansible/roles/test/tasks/mtu.yml index a3340a3464..6492725b7f 100644 --- a/ansible/roles/test/tasks/mtu.yml +++ b/ansible/roles/test/tasks/mtu.yml @@ -4,4 +4,4 @@ - name: Run MTU relay test (pytest-ansible) include_tasks: roles/test/tasks/pytest_runner.yml vars: - test_node: test_mtu.py + test_node: ipfwd/test_mtu.py diff --git a/ansible/roles/test/tasks/ptf_runner_reboot.yml b/ansible/roles/test/tasks/ptf_runner_reboot.yml index 3111e8cbed..f614eb1dff 100644 --- a/ansible/roles/test/tasks/ptf_runner_reboot.yml +++ b/ansible/roles/test/tasks/ptf_runner_reboot.yml @@ -51,7 +51,8 @@ ptf_qlen: 1000 ptf_test_params: - verbose=False - - dut_username=\"{{ ansible_ssh_user }}\" + - dut_username=\"{{ sonicadmin_user }}\" + - dut_password=\"{{ sonicadmin_password }}\" - dut_hostname=\"{{ ansible_host }}\" - reboot_limit_in_seconds={{ reboot_limit }} - reboot_type=\"{{ reboot_type }}\" @@ -60,7 +61,7 @@ - ports_file=\"/tmp/ports.json\" - dut_mac='{{ dut_mac }}' - dut_vlan_ip='192.168.0.1' - - default_ip_range='192.168.0.0/16' + - default_ip_range='192.168.100.0/18' - vlan_ip_range='{{ vlan_ip_range }}' - lo_v6_prefix='{{ lo_v6_prefix }}' - arista_vms=\"['{{ vm_hosts | list | join("','") }}']\" diff --git a/ansible/roles/test/tasks/qos_sai.yml b/ansible/roles/test/tasks/qos_sai.yml index 7b6d6ef550..ee601eefda 100644 --- a/ansible/roles/test/tasks/qos_sai.yml +++ b/ansible/roles/test/tasks/qos_sai.yml @@ -37,14 +37,18 @@ - lldpd - lldp-syncd - - name: Disable bgpd + - name: Ensure BGP Daemon stopped become: yes - lineinfile: dest=/etc/quagga/daemons - regexp=^bgpd=.*$ - line='bgpd=no' - notify: - - Restart Quagga Daemon + supervisorctl: state=stopped name=bgpd delegate_to: "{{ ansible_host }}_bgp" + + - name: Add iptables rule to drop BGP SYN Packet from peer so that we do not ACK back + shell: "iptables -A INPUT -j DROP -p tcp --destination-port bgp" + become: true + + - name: Add ip6tables rule to drop BGP SYN Packet from peer so that we do not ACK back + shell: "ip6tables -A INPUT -j DROP -p tcp --destination-port bgp" + become: true - meta: flush_handlers @@ -113,6 +117,21 @@ - src_port_ip='{{src_port_ip}}' when: testbed_type in ['t0', 't0-64', 't0-116'] or arp_entries.stdout.find('incomplete') == -1 + - include_tasks: qos_sai_ptf.yml + vars: + test_name: populate arp on all ports + test_path: sai_qos_tests.ARPpopulatePTF + test_params: + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - dst_port_2_id='{{dst_port_2_id}}' + - dst_port_2_ip='{{dst_port_2_ip}}' + - dst_port_3_id='{{dst_port_3_id}}' + - dst_port_3_ip='{{dst_port_3_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + when: testbed_type in ['ptf32', 'ptf64'] + # XOFF limit - include_tasks: qos_sai_ptf.yml vars: @@ -467,14 +486,20 @@ - lldpd - lldp-syncd + - name: Remove iptables rule to drop BGP SYN Packet from Peer + shell: "iptables -D INPUT -j DROP -p tcp --destination-port bgp" + become: true + + - name: Remove ip6tables rule to drop BGP SYN Packet from Peer + shell: "ip6tables -D INPUT -j DROP -p tcp --destination-port bgp" + become: true + - name: Enable bgpd become: yes - lineinfile: dest=/etc/quagga/daemons - regexp=^bgpd=.*$ - line='bgpd=yes' + supervisorctl: state=started name=bgpd + delegate_to: "{{ ansible_host }}_bgp" notify: - Restart Quagga Daemon - delegate_to: "{{ ansible_host }}_bgp" - name: Restore original watermark polling status shell: counterpoll watermark {{watermark_status.stdout}} diff --git a/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml b/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml deleted file mode 100644 index 1f5ae29a27..0000000000 --- a/ansible/roles/test/tasks/single_lag_lacp_rate_test.yml +++ /dev/null @@ -1,99 +0,0 @@ -### Part of lag test palybook lag_2.yml (--tag lag_2) -### This playbook test one single port channel member interfaces sending ACP DU rate - -# Gather information of port channel ports, minimum links and total interface member numbers -- set_fact: - po: "{{ item }}" - po_interfaces: "{{ lag_facts.lags[item]['po_config']['ports'] }}" - po_intf_num: "{{ lag_facts.lags[item]['po_config']['ports']|length }}" - po_min_links: "{{lag_facts.lags[item]['po_config']['runner']['min_ports']}}" - -# pick flap interface name and calculate when it flaps, should portchannel interface flap or not -# Current it is using a static capacity < 75%, Portchannel will flap which match Sonic configuration -# if need to be random, then will make it a var -- set_fact: - po_flap: "{{ (po_intf_num|float - 1)/(po_min_links|float)*100 < 75 }}" - flap_intf: "{{ lag_facts.lags[item]['po_config']['ports'].keys()[0] }}" - -### figure out fanout switches info for the flapping lag member -- set_fact: - peer_device: "{{ fanout_neighbors[flap_intf]['peerdevice'] }}" - neighbor_interface: "{{ fanout_neighbors[flap_intf]['peerport'] }}" - -- conn_graph_facts: host={{ peer_device }} - delegate_to: localhost - -### Now figure out remote VM and interface info for the falpping lag member and run minlink test -- set_fact: - peer_device: "{{vm_neighbors[flap_intf]['name']}}" - neighbor_interface: "{{vm_neighbors[flap_intf]['port']}}" - peer_hwsku: 'Arista-VM' - -- set_fact: - peer_host: "{{ minigraph_devices[peer_device]['mgmt_addr'] }}" - -### Now prepare for the remote VM interfaces that using PTF docker to check teh LACP DU packet rate is correct - -- set_fact: - iface_behind_lag_member: [] -- set_fact: - iface_behind_lag_member: "{{iface_behind_lag_member}}+ ['{{minigraph_port_indices[item.key]}}']" - with_dict: "{{ minigraph_neighbors }}" - when: peer_device == "{{item.value.name}}" -- set_fact: - neighbor_lag_intfs: [] - -- set_fact: - neighbor_lag_intfs: "{{ neighbor_lag_intfs }} + [ '{{ vm_neighbors[item]['port'] }}' ]" - with_items: "{{ po_interfaces }}" - -- block: - # make sure portchannel peer rate is set to fast - - name: make sure all lag members on VM are set to fast - action: apswitch template=neighbor_lag_rate_fast.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch - - - set_fact: - lag_rate_current_setting: "fast" - - - pause: - seconds: 5 - - - name: test lacp packet sending rate is 1 seconds - include_tasks: lag_lacp_timing_test.yml - vars: - vm_name: "{{ peer_device }}" - lacp_timer: 1 - - # make sure portchannel peer rate is set to slow - - name: make sure all lag members on VM are set to slow - action: apswitch template=neighbor_lag_rate_slow.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch - - - set_fact: - lag_rate_current_setting: "slow" - - - pause: - seconds: 5 - - - name: test lacp packet sending rate is 30 seconds - include_tasks: lag_lacp_timing_test.yml - vars: - vm_name: "{{ peer_device }}" - lacp_timer: 30 - - always: - - name: Restore lag rate setting on VM in case of failure - action: apswitch template=neighbor_lag_rate_slow.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - timeout: 300 - connection: switch - when: "lag_rate_current_setting is defined and lag_rate_current_setting == 'fast'" diff --git a/ansible/roles/test/tasks/single_lag_test.yml b/ansible/roles/test/tasks/single_lag_test.yml deleted file mode 100644 index 3f3956696e..0000000000 --- a/ansible/roles/test/tasks/single_lag_test.yml +++ /dev/null @@ -1,56 +0,0 @@ -### Part of lag test palybook lag_2.yml (--tag lag_2) -### This playbook test one single port channel minimum link feature of one member interface shutdown -### and portchannel member interfaces sending ACP DU rate - -# Set maximum value of "smart" timeout to be the same as before, -# user can now set own value outside the test, for example by passing '-e wait_timeout=5' -- set_fact: - wait_timeout: 30 - when: "wait_timeout is not defined" - -# Gather information of port channel ports, minimum links and total interface member numbers -- set_fact: - po: "{{ item }}" - po_interfaces: "{{ lag_facts.lags[item]['po_config']['ports'] }}" - po_intf_num: "{{ lag_facts.lags[item]['po_config']['ports']|length }}" - po_min_links: "{{lag_facts.lags[item]['po_config']['runner']['min_ports']}}" - -# pick flap interface name and calculate when it flaps, should portchannel interface flap or not -# Current it is using a static capacity < 75%, Portchannel will flap which match Sonic configuration -# if need to be random, then will make it a var -- set_fact: - po_flap: "{{ (po_intf_num|float - 1)/(po_min_links|float)*100 < 75 }}" - flap_intf: "{{ lag_facts.lags[item]['po_config']['ports'].keys()[0] }}" - -### figure out fanout switches info for the flapping lag member and run minlink test -- set_fact: - peer_device: "{{ fanout_neighbors[flap_intf]['peerdevice'] }}" - neighbor_interface: "{{ fanout_neighbors[flap_intf]['peerport'] }}" - -- conn_graph_facts: host={{ peer_device }} - delegate_to: localhost - -- set_fact: - peer_host: "{{ device_info['mgmtip'] }}" - peer_hwsku: "{{ device_info['HwSku'] }}" - -- name: test fanout interface (physical) flap and lacp keep correct po status follow minimum links requirement - include_tasks: lag_minlink.yml - vars: - deselect_time: 5 - wait_down_time: "{{ wait_timeout | int }}" - -### Now figure out remote VM and interface info for the flapping lag member and run minlink test -- set_fact: - peer_device: "{{vm_neighbors[flap_intf]['name']}}" - neighbor_interface: "{{vm_neighbors[flap_intf]['port']}}" - peer_hwsku: 'Arista-VM' - -- set_fact: - peer_host: "{{ minigraph_devices[peer_device]['mgmt_addr'] }}" - -- name: test vm interface flap (no physical port down, more like remote port lock) that lag interface can change to correct po status follow minimum links requirement - include_tasks: lag_minlink.yml - vars: - deselect_time: 95 - wait_down_time: "{{ wait_timeout | int }}" diff --git a/ansible/roles/test/tasks/sonic.yml b/ansible/roles/test/tasks/sonic.yml index 1eacb11290..64eddff540 100644 --- a/ansible/roles/test/tasks/sonic.yml +++ b/ansible/roles/test/tasks/sonic.yml @@ -63,8 +63,13 @@ test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}" delegate_to: localhost + - name: Set default dut index + set_fact: + dut_index: 0 + when: dut_index is not defined + - fail: msg="The DUT you are trying to run test does not belongs to this testbed" - when: testbed_facts['dut'] != inventory_hostname + when: testbed_facts['duts'][dut_index] != inventory_hostname - name: set testbed_type set_fact: diff --git a/ansible/roles/test/tasks/vxlan-decap.yml b/ansible/roles/test/tasks/vxlan-decap.yml index 937182aa51..31aa917e52 100644 --- a/ansible/roles/test/tasks/vxlan-decap.yml +++ b/ansible/roles/test/tasks/vxlan-decap.yml @@ -1,115 +1,4 @@ -# example - -- block: - - fail: msg="Please set ptf_host variable" - when: ptf_host is not defined - - - name: Remove existing ip from ptf host - script: roles/test/files/helpers/remove_ip.sh - delegate_to: "{{ ptf_host }}" - - - name: Make all mac addresses in ptf unique - should be done in vm_set - script: roles/test/files/helpers/change_mac.sh - delegate_to: "{{ ptf_host }}" - - - name: Copy tests to the PTF container - copy: src=roles/test/files/ptftests dest=/root - delegate_to: "{{ ptf_host }}" - - - name: Copy arp responder to the PTF container - copy: src=roles/test/files/helpers/arp_responder.py dest=/opt - delegate_to: "{{ ptf_host }}" - - - name: Copy arp responder supervisor configuration to the PTF container - template: src=arp_responder.conf.j2 dest=/etc/supervisor/conf.d/arp_responder.conf - vars: - - arp_responder_args: '--conf /tmp/vxlan_arpresponder.conf' - delegate_to: "{{ ptf_host }}" - - - name: Update supervisor configuration - include_tasks: "roles/test/tasks/common_tasks/update_supervisor.yml" - vars: - supervisor_host: "{{ ptf_host }}" - - - name: Restart DUT. Wait 240 seconds after SONiC started ssh - include_tasks: reboot.yml - vars: - ready_timeout: 240 - - - name: Render DUT parameters to json file for the test - template: src=vxlan_decap.json.j2 dest=/tmp/vxlan_decap.json - delegate_to: "{{ ptf_host }}" - - - name: Render DUT vxlan configuration. Tunnel - template: src=vxlan_db.tunnel.json.j2 dest=/tmp/vxlan_db.tunnel.json - - - name: Render DUT vxlan configuration. Tunnel Maps - template: src=vxlan_db.maps.json.j2 dest=/tmp/vxlan_db.maps.{{ item }}.json - with_items: "{{ minigraph_vlans }}" - - - set_fact: - send_packet_count: 10 - when: send_packet_count is not defined - - - include_tasks: ptf_runner.yml - vars: - ptf_test_name: Vxlan decap test - No vxlan configuration - ptf_test_dir: ptftests - ptf_test_path: vxlan-decap.Vxlan - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_qlen: 1000 - ptf_test_params: - - vxlan_enabled=False - - config_file='/tmp/vxlan_decap.json' - - count={{ send_packet_count }} - - - name: Configure vxlan decap tunnel - shell: sonic-cfggen -j /tmp/vxlan_db.tunnel.json --write-to-db - - - name: Configure vxlan decap tunnel maps - shell: sonic-cfggen -j /tmp/vxlan_db.maps.{{ item }}.json --write-to-db - with_items: "{{ minigraph_vlans }}" - - - include_tasks: ptf_runner.yml - vars: - ptf_test_name: Vxlan decap test - vxlan configuration applied - ptf_test_dir: ptftests - ptf_test_path: vxlan-decap.Vxlan - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_qlen: 1000 - ptf_test_params: - - vxlan_enabled=True - - config_file='/tmp/vxlan_decap.json' - - count={{ send_packet_count }} - - - name: Remove vxlan tunnel maps configuration - shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" - with_items: "{{ minigraph_vlans }}" - - - name: Remove vxlan tunnel configuration - shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" - - - include_tasks: ptf_runner.yml - vars: - ptf_test_name: Vxlan decap test - vxlan configuration removed - ptf_test_dir: ptftests - ptf_test_path: vxlan-decap.Vxlan - ptf_platform: remote - ptf_platform_dir: ptftests - ptf_qlen: 1000 - ptf_test_params: - - vxlan_enabled=False - - config_file='/tmp/vxlan_decap.json' - - count={{ send_packet_count }} - -- block: - - name: Remove vxlan tunnel maps configuration - shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map{{ item }}" - with_items: "{{ minigraph_vlans }}" - - - name: Remove vxlan tunnel configuration - shell: docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan" - tags: - - always +- name: run test + include_tasks: roles/test/tasks/pytest_runner.yml + vars: + test_node: vxlan/test_vxlan_decap.py diff --git a/ansible/roles/test/templates/ferret.conf.j2 b/ansible/roles/test/templates/ferret.conf.j2 deleted file mode 100644 index 485153c819..0000000000 --- a/ansible/roles/test/templates/ferret.conf.j2 +++ /dev/null @@ -1,10 +0,0 @@ -[program:ferret] -command=/usr/bin/python /opt/ferret.py {{ ferret_args }} -process_name=ferret -stdout_logfile=/tmp/ferret.out.log -stderr_logfile=/tmp/ferret.err.log -redirect_stderr=false -autostart=false -autorestart=true -startsecs=1 -numprocs=1 diff --git a/ansible/roles/test/templates/ferret.conf.j2 b/ansible/roles/test/templates/ferret.conf.j2 new file mode 120000 index 0000000000..329cecf469 --- /dev/null +++ b/ansible/roles/test/templates/ferret.conf.j2 @@ -0,0 +1 @@ +../../../../tests/arp/files/ferret.conf.j2 \ No newline at end of file diff --git a/ansible/roles/test/templates/pfc_storm_icos.j2 b/ansible/roles/test/templates/pfc_storm_icos.j2 new file mode 100644 index 0000000000..b1382b3be7 --- /dev/null +++ b/ansible/roles/test/templates/pfc_storm_icos.j2 @@ -0,0 +1,5 @@ +bash +cd /mnt/flash +sudo python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -n {{pfc_frames_number}} -i {{"fpti1_"~pfc_fanout_interface|replace("/", "_")}} -r {{ansible_eth0_ipv4_addr}} & +exit +exit diff --git a/ansible/roles/test/templates/pfc_storm_stop_icos.j2 b/ansible/roles/test/templates/pfc_storm_stop_icos.j2 new file mode 100644 index 0000000000..a483cf255d --- /dev/null +++ b/ansible/roles/test/templates/pfc_storm_stop_icos.j2 @@ -0,0 +1,5 @@ +bash +cd /mnt/flash +sudo pkill -f {{pfc_gen_file}} +exit +exit diff --git a/ansible/roles/test/templates/vxlan_db.maps.json.j2 b/ansible/roles/test/templates/vxlan_db.maps.json.j2 deleted file mode 100644 index 1be0cf7c6e..0000000000 --- a/ansible/roles/test/templates/vxlan_db.maps.json.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{ - "VXLAN_TUNNEL_MAP": { - "tunnelVxlan|map{{ item }}": { - "vni": "{{ item | replace("Vlan", "") | int + 336 }}", - "vlan": "{{ item }}" - } - } -} - diff --git a/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 b/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 deleted file mode 100644 index f4671fe6e2..0000000000 --- a/ansible/roles/test/templates/vxlan_db.tunnel.json.j2 +++ /dev/null @@ -1,8 +0,0 @@ -{ - "VXLAN_TUNNEL": { - "tunnelVxlan": { - "src_ip": "{{ minigraph_lo_interfaces[0]['addr'] }}", - "dst_ip": "8.8.8.8" - } - } -} diff --git a/ansible/roles/test/templates/vxlan_decap.json.j2 b/ansible/roles/test/templates/vxlan_decap.json.j2 deleted file mode 100644 index ab68c860c3..0000000000 --- a/ansible/roles/test/templates/vxlan_decap.json.j2 +++ /dev/null @@ -1,9 +0,0 @@ -{ - "minigraph_port_indices": {{ minigraph_port_indices | to_nice_json }}, - "minigraph_portchannel_interfaces": {{ minigraph_portchannel_interfaces | to_nice_json }}, - "minigraph_portchannels": {{ minigraph_portchannels | to_nice_json }}, - "minigraph_lo_interfaces": {{ minigraph_lo_interfaces | to_nice_json }}, - "minigraph_vlans": {{ minigraph_vlans | to_nice_json }}, - "minigraph_vlan_interfaces": {{ minigraph_vlan_interfaces | to_nice_json }}, - "dut_mac": {{ ansible_Ethernet0['macaddress'] | to_nice_json }} -} diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index b76bbf3b8c..6d0ebd79ba 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -57,37 +57,38 @@ function usage function read_file { - echo reading + echo reading - # Filter testbed names in the first column in the testbed definition file - line=$(cat $tbfile | grep "^$1,") + # Filter testbed names in the first column in the testbed definition file + line=$(cat $tbfile | grep "^$1,") - if [ $? -ne 0 ] - then + if [ $? -ne 0 ] + then echo "Couldn't find topology name '$1'" exit - fi + fi - NL=' + NL=' ' - case $line in - *"$NL"*) echo "Find more than one topology names in $tbfile" - exit - ;; - *) echo Found topology $1 - ;; - esac - - IFS=, read -r -a line_arr <<< $line - - testbed_name=${line_arr[1]} - topo=${line_arr[2]} - ptf_imagename=${line_arr[3]} - ptf=${line_arr[4]} - ptf_ip=${line_arr[5]} - server=${line_arr[6]} - vm_base=${line_arr[7]} - dut=${line_arr[8]} + case $line in + *"$NL"*) echo "Find more than one topology names in $tbfile" + exit + ;; + *) echo Found topology $1 + ;; + esac + + IFS=, read -r -a line_arr <<< $line + + testbed_name=${line_arr[1]} + topo=${line_arr[2]} + ptf_imagename=${line_arr[3]} + ptf=${line_arr[4]} + ptf_ip=${line_arr[5]} + server=${line_arr[6]} + vm_base=${line_arr[7]} + dut=${line_arr[8]//;/,} + duts=${dut//[\[\] ]/} } function start_vms @@ -123,9 +124,9 @@ function add_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" $@ - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ # Delete the obsoleted arp entry for the PTF IP ip neighbor flush $ptf_ip @@ -143,7 +144,7 @@ function remove_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" $@ echo Done } @@ -158,9 +159,9 @@ function renumber_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ echo Done } @@ -171,11 +172,11 @@ function refresh_dut passwd=$2 shift shift - echo "Refresh $dut in '${topology}'" + echo "Refresh $duts in '${topology}'" read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_refresh_dut.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_refresh_dut.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ echo Done } @@ -186,7 +187,7 @@ function connect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } @@ -197,7 +198,7 @@ function disconnect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } @@ -216,7 +217,7 @@ function generate_minigraph read_file $topology - ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ + ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ echo Done } @@ -234,7 +235,7 @@ function deploy_minigraph read_file $topology - ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e deploy=true -e save=true $@ + ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e deploy=true -e save=true $@ echo Done } @@ -252,7 +253,7 @@ function test_minigraph read_file $topology - ansible-playbook -i "$inventory" --diff --connection=local --check config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ + ansible-playbook -i "$inventory" --diff --connection=local --check config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ echo Done } @@ -274,7 +275,7 @@ function connect_topo read_file $1 - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="$2" -e "dut=$dut" + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="$2" -e "dut=$duts" } vmfile=veos diff --git a/ansible/vars/qos.yml b/ansible/vars/qos.yml index c3d767c264..d3327783fb 100644 --- a/ansible/vars/qos.yml +++ b/ansible/vars/qos.yml @@ -502,28 +502,28 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 1979 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 xoff_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 1979 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 xon_1: dscp: 3 ecn: 1 pg: 3 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 xon_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 ecn_1: dscp: 8 @@ -591,7 +591,7 @@ qos_params: dst_port_id: 24 pgs_num: 10 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 732 + pkts_num_trig_pfc: 1194 pkts_num_hdrm_full: 520 pkts_num_hdrm_partial: 361 wm_pg_shared_lossless: @@ -600,7 +600,7 @@ qos_params: pg: 3 pkts_num_leak_out: 19 pkts_num_fill_min: 6 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 cell_size: 208 wm_pg_shared_lossy: dscp: 8 @@ -615,8 +615,8 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 19 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 1979 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 cell_size: 208 wm_q_shared_lossless: dscp: 3 @@ -624,7 +624,7 @@ qos_params: queue: 3 pkts_num_leak_out: 19 pkts_num_fill_min: 8 - pkts_num_trig_ingr_drp: 1979 + pkts_num_trig_ingr_drp: 7063 cell_size: 208 wm_q_shared_lossy: dscp: 8 @@ -641,8 +641,8 @@ qos_params: queue: 3 pkts_num_leak_out: 19 pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 1979 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 pkts_num_fill_egr_min: 8 cell_size: 208 wm_buf_pool_lossy: @@ -661,28 +661,28 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 xoff_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 xon_1: dscp: 3 ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 xon_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 ecn_1: dscp: 8 @@ -750,7 +750,7 @@ qos_params: dst_port_id: 16 pgs_num: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1095 + pkts_num_trig_pfc: 2620 pkts_num_hdrm_full: 1292 pkts_num_hdrm_partial: 1165 wm_pg_shared_lossless: @@ -759,7 +759,7 @@ qos_params: pg: 3 pkts_num_leak_out: 36 pkts_num_fill_min: 6 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 cell_size: 208 wm_pg_shared_lossy: dscp: 8 @@ -774,8 +774,8 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 cell_size: 208 wm_q_shared_lossless: dscp: 3 @@ -783,7 +783,7 @@ qos_params: queue: 3 pkts_num_leak_out: 36 pkts_num_fill_min: 8 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_ingr_drp: 7835 cell_size: 208 wm_q_shared_lossy: dscp: 8 @@ -800,8 +800,8 @@ qos_params: queue: 3 pkts_num_leak_out: 36 pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 pkts_num_fill_egr_min: 8 cell_size: 208 wm_buf_pool_lossy: @@ -820,28 +820,28 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 xoff_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 xon_1: dscp: 3 ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 xon_2: dscp: 4 ecn: 1 pg: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 pkts_num_dismiss_pfc: 11 ecn_1: dscp: 8 @@ -909,7 +909,7 @@ qos_params: dst_port_id: 16 pgs_num: 4 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1095 + pkts_num_trig_pfc: 2620 pkts_num_hdrm_full: 1292 pkts_num_hdrm_partial: 1165 wm_pg_shared_lossless: @@ -918,7 +918,7 @@ qos_params: pg: 3 pkts_num_leak_out: 36 pkts_num_fill_min: 6 - pkts_num_trig_pfc: 1458 + pkts_num_trig_pfc: 6542 cell_size: 208 wm_pg_shared_lossy: dscp: 8 @@ -933,8 +933,8 @@ qos_params: ecn: 1 pg: 3 pkts_num_leak_out: 36 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 cell_size: 208 wm_q_shared_lossless: dscp: 3 @@ -942,7 +942,7 @@ qos_params: queue: 3 pkts_num_leak_out: 36 pkts_num_fill_min: 8 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_ingr_drp: 7835 cell_size: 208 wm_q_shared_lossy: dscp: 8 @@ -959,8 +959,8 @@ qos_params: queue: 3 pkts_num_leak_out: 36 pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 1458 - pkts_num_trig_ingr_drp: 2751 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 pkts_num_fill_egr_min: 8 cell_size: 208 wm_buf_pool_lossy: diff --git a/ansible/vtestbed.csv b/ansible/vtestbed.csv index 52b678a934..9b9e96b212 100644 --- a/ansible/vtestbed.csv +++ b/ansible/vtestbed.csv @@ -1,4 +1,4 @@ # conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,server,vm_base,dut,comment -vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-unknown,10.250.0.102/24,server_1,VM0100,vlab-01,Tests virtual switch vm -vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-unknown,10.250.0.102/24,server_1,VM0100,vlab-02,Tests virtual switch vm -vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-unknown,10.250.0.106/24,server_1,VM0104,vlab-03,Tests virtual switch vm +vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-unknown,10.250.0.102/24,server_1,VM0100,[vlab-01],Tests virtual switch vm +vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-unknown,10.250.0.102/24,server_1,VM0100,[vlab-02],Tests virtual switch vm +vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-unknown,10.250.0.106/24,server_1,VM0104,[vlab-03],Tests virtual switch vm diff --git a/docs/ACL-test-plan.md b/docs/ACL-test-plan.md new file mode 100644 index 0000000000..68329a2871 --- /dev/null +++ b/docs/ACL-test-plan.md @@ -0,0 +1,426 @@ +## [DRAFT, UNDER DEVELOPMENT] + + +- [Overview](#overview) + - [Scope](#scope) + - [Related **DUT** CLI commands](#related-dut-cli-commands) +- [Setup configuration](#setup-configuration) + - [Scripts for generating ACL configuration on SONIC](#Scripts-for-generating-ACL-configuration-on-SONIC) + - [Ansible scripts to setup and run ACL test](#Ansible scripts to setup and run ACL test) + - [acl_testbed.yml](#acl-testbed-yml) + - [Setup of DUT switch](#Setup-of-DUT-switch) + - [J2 templates](#j2-templates) +- [PTF Test](#ptf-test) + - [Input files for PTF test](#input-files-for-ptf-test) + - [Traffic validation in PTF](#traffic-validation-in-ptf) +- [Test cases](#test-cases) +- [TODO](#todo) +- [Open Questions](#open-questions) + +## Overview +The purpose is to test functionality of ACL rules on the SONIC switch DUT with and without LAGs configured, closely resembling production environment. +The test assumes all necessary configuration, including ACL and LAG configuration, BGP routes, are already pre-configured on the SONIC switch before test runs. + +### Scope +The test is targeting a running SONIC system with fully functioning configuration. +The purpose of the test is not to test specific SAI API, but functional testing of ACL on SONIC system, making sure that traffic flows correctly, according to BGP routes advertised by BGP peers of SONIC switch, and the LAG configuration. + +NOTE: ACL+LAG test will be able to run **only** in the testbed specifically created for LAG. + +### Related **DUT** CLI commands +Manual ACL configuration can be done using swssconfig utility in swss container. + + swssconfig + +## Test structure +### Setup configuration +ACL configuration should be created on the DUT before running the test. Configuration could be deployed using ansible sonic test playbook with the tag acltb_configure. + +#### Scripts for generating ACL configuration on SONIC + +There will be two j2 template files for the ACL test configuration: acltb_test_table.j2 and acltb_test_rules.j2. They will be used by Ansible playbook to generate json files and apply them on the switch. + +#### Ansible scripts to setup and run ACL test + +##### acl_testbed.yml + +acl_testbed.yml when run with ***different tags*** will + +Tag ***acltb_configure*** will generate acl json files for the ACL test out of the corresponding j2 files and apply them on the switch. +Tag ***acltb_test*** will run ACL test (or ACL+LAG) test. +Tag ***acltb_cleanup*** will clear the test ACL configuration from the switch. + +ACL test consists of a number of subtests, and each of them will include the following steps: + +1. Run lognanalyzer 'init' phase +2. Run ACL Sub Test +3. Run loganalyzer 'analyze' phase + +ACL subtests will be implemented in the PTF (acl_testbed_test.py). Every subtest wibb be implemented in a separate class. + +#### Setup of DUT switch +Setup of SONIC DUT will be done by Ansible script. During setup Ansible will copy json file containing configuration for ACL to the swss container on the DUT. swssconfig utility will be used to push configuration to the SONiC DB. Data will be consumed by orchagent. + +JSON Sample: + +table.json + + [ + { + "ACL_TABLE:Drop_IP": { + "policy_desc" : "Drop_IP_Traffic", + "type" : "L3", + "ports" : "Ethernet0" + }, + "OP": "SET" + } + ] + +rule.json + + [ + { + "ACL_RULE_TABLE:Drop_IP:TheDrop": { + "priority" : "55", + "ETHER_TYPE" : "0x0800", + "PACKET_ACTION" : "DROP" + }, + "OP": "SET" + } + ] + +**NOTE** +Tables and rules configuration will reside in two different jsons and table configuration will be applied before rules to ensure correct objects creation order in SAI. + +##### J2 templates +acltb_test_table.j2 will configure single table bound to all switch ports. + + [ + { + "ACL_TABLE:ACL_Testbed_Test_Table": { + "policy_desc" : "Thistable_contains_rules_needed_for_the_testbed_regression_tests", + "type" : "L3", + "ports" : "{% list_of_ingress_ports %}", + }, + "OP": "SET" + } + ] + +acltb_test_rules.j2 will contain ACL rules needed for the test + +ACL Rules: + +**RulesN..N+1000-:** Any rules, action: forward (placeholder) +**Rule#1:** match src ip 10.0.0.2, action: drop +**Rule#2:** match dst ip TBD, action: drop +**Rule#3:** match l4_src_port 0x1235, action: drop +**Rule#4:** match l4_dst_port 0x1235, action: drop +**Rule#5:** match ether type 0x1234, action: forward +**Rule#6:** match ip protocol 0x7E, action: drop +**Rule#7:** match tcp flags 0xFF/RST, action: drop +**Rule#8:** match ip type TBD, action: drop +**Rules#9.1-9.8:** match source ports range [[0x1240..0x1249], [0x1250..0x1259]...], action: drop (8 rules with different port ranges) +**Rule#10.1-10.8:** match destination ports range [[0x1240..0x1249], [0x1250..0x1259]...], action: drop (8 rules with different port ranges) +**Rules#11-12:** check priority: match some src ip 10.0.0.3, action: drop + match src ip 10.0.0.3 (higher prio), action: forward + +/if needed additionally match src ip/ + +## PTF Test + +### Input files for PTF test + +PTF test will generate traffic between ports and make sure it passes according to the configured ACL rules. Depending on the testbed topology and the existing configuration (e.g. ECMP, LAGS, etc) packets may arrive to different ports. Therefore ports connection information will be generated from the minigraph and supplied to the PTF script. + +### Traffic validation in PTF +Depending on the test PTF test will verify the packet arrived or dropped. + +## Test cases + +Each test case will be additionally validated by the loganalizer and counters reading utility. + +### Generic packet +Packet with the values below should not trigger any "drop" rule. +
+###[ Ethernet ]###
+  dst = [auto]
+  src = [auto]
+  type = 0x800
+###[ IP ]###
+    version = 4  
+    ttl =   
+    proto = tcp  
+    chksum = None  
+    src = 10.0.0.1  
+    dst = [get_from_route_info]
+###[ TCP ]###  
+    sport = 4660 (0x1234)  
+    dport = http (80)  
+    flags = S  
+
+ +### Test case \#0 - Resources consuming test + +#### Test objective + +Verify whether ACL engine resources are being freed on rule/range/counter/table delete. + +#### Test steps + +- Clear ACL configuration. +- Reapply ACL configuration. +- Verify there are no errors in the log + +### Test case \#1 - Verify source IP match + +#### Test objective + +Verify match source IP address works. + +#### Packet to trigger the rule #1 +
+...
+###[ IP ]###
+    version = 4  
+    ttl =   
+    proto = tcp  
+    chksum = None  
+    src = 10.0.0.2
+    dst = [get_from_route_info]
+...
+
+ +#### Test steps + +- PTF host will send packet specifying particular source IP address in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #1. PTF docker should not receive this packet. +- Counter for the rule #1 should increment + + +### Test case \#2 - Verify destination IP match + +#### Test objective + +Verify match destination IP address works. + +#### Packet to trigger the rule #2 +
+...
+###[ IP ]###
+    version = 4  
+    ttl =   
+    proto = tcp  
+    chksum = None  
+    src = 10.0.0.1
+    dst = [get_from_route_info]
+...
+
+ +#### Test steps + +- PTF host will send packet specifying particular destination IP address in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #2. PTF docker should not receive this packet. +- Counter for the rule #2 should increment + + +### Test case \#3 - Verify L4 source port match + +#### Test objective + +Verify match L4 source port works. + +#### Packet to trigger the rule #3 +
+...
+###[ TCP ]###  
+    sport = 4661 (0x1235)
+    dport = 80
+    flags = S
+...
+
+ +#### Test steps + +- PTF host will send packet with the specific L4 source port in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #3. PTF docker should not receive this packet. +- Counter for the rule #3 should increment + +### Test case \#4 - Verify L4 destination port match + +#### Test objective + +Verify match L4 source port works. + +#### Packet to trigger the rule #4 +
+...
+###[ TCP ]###  
+    sport = 4660 (0x1234)
+    dport = 4661 (0x1235)
+    flags = S
+...
+
+ +#### Test steps + +- PTF host will send packet with the specific L4 destination port in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #4. PTF docker should not receive this packet. +- Counter for the rule #4 should increment + +### Test case \#5 - Verify ether type match + +#### Test objective + +Verify match packet ether type works. + +#### Packet to trigger the rule #5 +
+###[ Ethernet ]###
+  dst = [auto]
+  src = [auto]
+  type = 0x1234
+...
+
+ +#### Test steps + +- PTF host will send packet with the specific ether type in the packet. +- When packet reaches SONIC DUT, it should be dropped because non-IP ethertype. But will be forwarded by the rule #5. PTF docker should receive this packet. +- Counter for the rule #5 should increment + +***NOTE*** Ether type used in this test should be "exotic" enough to exclude possible interference with the other tests traffic. + +### Test case \#6 - Verify ip protocol match + +#### Test objective + +Verify match ip protocol works. + +#### Packet to trigger the rule #6 +
+...
+###[ IP ]###
+    version = 4  
+    ttl =   
+    proto = 0x7E
+    chksum = None  
+    src = 10.0.0.1  
+    dst = [get_from_route_info]
+
+... +#### Test steps + +- PTF host will send packet with the specific ip protocol field in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #6. PTF docker should not receive this packet. +- Counter for the rule #6 should increment + +***NOTE*** IP protocol used in this test should be "exotic" enough to exclude possible interference with the other tests traffic. For example 0x7E (Combat Radio Transport Protocol) + +### Test case \#7 - Verify TCP flags match + +#### Test objective + +Verify match TCP flags works. + +#### Packet to trigger the rule #7 +
+...
+###[ TCP ]###  
+    sport = 4660 (0x1234)
+    dport = 80
+    flags = RS
+...
+
+#### Test steps + +- PTF host will send TCP packet with the specific flags in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #7. PTF docker should not receive this packet. +- Counter for the rule #7 should increment. + +### Test case \#8 - Verify ip type match + +#### Test objective + +Verify match ip protocol works. + +#### Test steps + +- PTF host will send packet with the specific ip protocol field in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #8. PTF docker should not receive this packet. +- Counter for the rule #8 should increment + +***TODO*** Think about IP protocol to use for the test. Maybe add another match criteria (source ip?) + +### Test case \#9 - Verify source port range match + +#### Test objective + +Verify match source port range works. + +#### Packet to trigger the rule #9 +
+...
+###[ TCP ]###  
+    sport = 0x1236..0x1240
+    dport = 80
+    flags = S
+...
+
+#### Test steps + +- PTF host will send TCP packet with the specific source port in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #9. PTF docker should not receive this packet. +- Counter for the rule #9 should increment + +### Test case \#10 - Verify destination port range match + +#### Test objective + +Verify match destination port range works. + +#### Packet to trigger the rule #9 +
+...
+###[ TCP ]###  
+    sport = 0x1234
+    dport = 0x1236..0x1240
+    flags = S
+...
+
+#### Test steps + +- PTF host will send TCP packet with the specific destination port in the packet. +- When packet reaches SONIC DUT, it should be dropped by the rule #10. PTF docker should not receive this packet. +- Counter for the rule #10 should increment + +### Test case \#11 - Verify rules priority + +#### Test objective + +Verify rules priority works. + +#### Test steps + +- PTF host will send TCP packet with the specific source ip in the packet. +- When packet reaches SONIC DUT, it will not be dropped by the rule #11 because rule #12 with the same matching criteria allows packet to pass. +- PTF docker verefies packet arrived. +- Counter for the rule #12 should increment + +### Test case \#12 - False rule triggering check + +#### Test objective + +Verify rules are not triggered by mistake. +This test should be executed the last. + +#### Test steps + +- Send several "Generic packets" +- Verify all rules counters value is equal to number of packets used for each rule in all tests. + +### Other possible tests +- match combinations + +## TODO +- ACL+LAG test configuration and testcases (separate ansible tag) + +## Open Questions diff --git a/docs/BGP-GR-helper-mode-test-plan.md b/docs/BGP-GR-helper-mode-test-plan.md new file mode 100644 index 0000000000..3945f13e1f --- /dev/null +++ b/docs/BGP-GR-helper-mode-test-plan.md @@ -0,0 +1,90 @@ +- [Overview](#overview) + - [Scope](#scope) + - [Testbed](#testbed) +- [Setup configuration](#setup-configuration) + - [Arista VM configuration](#arista-vm-configuration) + - [Ansible scripts to setup and run test](#ansible-scripts-to-setup-and-run-test) + - [everflow_testbed.yml](#everflow-testbed-yml) +- [PTF Test](#ptf-test) + - [Input files for PTF test](#input-files-for-ptf-test) + - [Traffic validation in PTF](#traffic-validation-in-ptf) +- [Test cases](#test-cases) +- [TODO](#todo) +- [Open Questions](#open-questions) + +## Overview +The purpose is to test a functionality of BGP GR mode on the SONIC switch DUT, closely resembling production environment. +The test assumes all necessary configuration is already pre-configured on the SONIC switch before test runs. + +### Scope +The test is targeting a running SONIC system with fully functioning configuration. +The purpose of the test is not to test specific API, but functional testing of BGP GR helper mode on SONIC system, making sure that traffic flows correctly, according to BGP routes advertised by BGP peers of SONIC switch. + +### Testbed +The test will run on the following testbeds: +- t1 +- t1-lag + +## Setup configuration + +#### Arista VM configuration + +Test assumes that BGP GR is enabled and preconfigured on Arista VMs. BGP GR timer value should be more than time required for VM reboot. + +#### Ansible scripts to setup and run test + +##### bgp_gr_helper.yml + +bgp_gr_helper.yml when run with tag "bgp_gr_helper" will do the following: + +1. Randomly choose VM. +2. Run test. + +BGP GR helper test consists of a number of subtests, and each of them will include the following steps: + +1. Run lognanalyzer 'init' phase +2. Run BGP GR helper Sub Test +3. Run loganalyzer 'analyze' phase + +## PTF Test + +To run traffic FIB PTF test will be reused. + +## Test cases + +Each test case will be additionally validated by the loganalizer utility. + +### Test case \#1 - BGP GR helper mode. + +#### Test objective + +Verify that routes are preserved during neighbor graceful restart. + +#### Test steps + +- Randomly choose VM for the test. +- Reboot VM. +- Verify BGP timeout (at least 115 seconds routes should stay in fib). +- Verify all routes are preserved (no reinstallation after BGP open message from the neighbor). +- Verify that BGP session with the VM established. + +### Test case \#2 - BGP GR helper mode routes change. + +#### Test objective + +Verify that traffic run without changes during neighbor graceful restart. + +#### Test steps + +- Randomly choose VM for the test. +- Change VM startup config (advertised routes should be different). +- Reboot VM. +- Verify that preserved routes are removed when VM back. +- Verify that new routes are installed when VM back. +- Restore VM startup config. + +## TODO + +## Open Questions +- Should tests run for neighbors behind physical interfaces only or behind LAGs as well? +- On which topologies test should run? diff --git a/docs/BGP-MP-test-plan.md b/docs/BGP-MP-test-plan.md new file mode 100644 index 0000000000..4003ff0db6 --- /dev/null +++ b/docs/BGP-MP-test-plan.md @@ -0,0 +1,55 @@ +# BGP-MP test plan + +* [Overview](#Overview) + * [Scope](#Scope) + * [Testbed](#Testbed) +* [Setup configuration](#Setup%20configuration) + * [Ansible scripts to setup and run test](#Ansible%20scripts%20to%20setup%20and%20run%20test) + * [bgp_mp.yml](#bgp_mp.yml) + * [Setup of DUT switch](#Setup%20of%20DUT%20switch) +* [Test](#Test) +* [Test cases](#Test%20cases) +* [TODO](#TODO) +* [Open questions](#Open%20questions) + +## Overview +The purpose is to test functionality of BGP-MP on the SONIC switch DUT, closely resembling production environment. The test assumes all necessary configurations are already pre-configured on the SONIC switch before test runs. + +### Scope +The test is targeting a running SONIC system with fully functioning configuration. The purpose of the test is not to test specific API, but functional testing of BGP-MP on SONIC system. + +### Testbed +The test will run on the following testbeds: +* t0 + +## Setup configuration +IPv4 BGP neighborship will be configured between DUT and exabgp and each neighbor will redistribute IPv6 routes to each other. +### Ansible scripts to setup and run test +#### bgp_mp.yml +bgp_mp.yml when run with tag “bgp_mp” will do the following: +1. Generate and apply exabgp configuration. +2. Run test. +3. Clean up dynamic and temporary exabgp configuration. + +## Test +On PTF host, exabgp tool will be used to configure bgp peer and redistribute IPv6 routes via IPv4 BGP session. + +## Test cases +### Test case # 1 – BGP-MP IPv6 routes over IPv4 session +#### Test objective +Verify that IPv6 routes are correctly redistributed over IPv4 BGP session. +#### Test steps +* Generate IPv4 BGP peer configuration for exabgp instance. +* Generate IPv6 routes, to be announced via IPv4 session, for exabgp instance. +* Run exabgp instance. +* Verify that IPv4 BGP neighborship is established. +* Redistribute IPv6 routes using exabgp. +* Verify that IPv6 routes are correctly redistributed to the DUT. +* Redistribute IPv6 routes from the DUT to exabgp. +* Verify that IPv6 routes are correctly redistributed to the exabgp. +* Set default configuration. + +## TODO + +## Open questions +* Should be some traffic test cases performed as part of this test? \ No newline at end of file diff --git a/docs/CRM-test-plan.md b/docs/CRM-test-plan.md new file mode 100644 index 0000000000..2fc9d9080f --- /dev/null +++ b/docs/CRM-test-plan.md @@ -0,0 +1,221 @@ +# CRM test plan + +* [Overview](#Overview) + * [Scope](#Scope) + * [Testbed](#Testbed) +* [Setup configuration](#Setup%20configuration) + * [Ansible scripts to setup and run test](#Ansible%20scripts%20to%20setup%20and%20run%20test) + * [crm.yml](#crm.yml) +* [Test](#Test) +* [Test cases](#Test%20cases) +* [TODO](#TODO) +* [Open questions](#Open%20questions) + +## Overview +The purpose is to test functionality of CRM on the SONIC switch DUT, closely resembling production environment. + +### Scope +The test is targeting a running SONIC system with fully functioning configuration. The purpose of the test is not to test specific API, but functional testing of CRM on SONIC system. + +### Testbed +The test will run on the all testbeds. + +## Setup configuration +No setup pre-configuration is required, test will configure and clean-up all the configuration. +### Ansible scripts to setup and run test +#### crm.yml +crm.yml when run with tag “crm” will do the following for each CRM resource: +1. Apply required configuration. +2. Verify "used" and "free" counters. +3. Verify "EXCEEDED" and "CLEAR" messages using all types of thresholds. +4. Restore configuration. + +## Test + +## Test cases + +### Test case # 1 – IPv4 route +#### Test objective +Verify "IPv4 route" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 route and observe that counters were updated as expected. +* Remove 1 route and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 2 – IPv6 route +#### Test objective +Verify "IPv6 route" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 route and observe that counters were updated as expected. +* Remove 1 route and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 3 – IPv4 nexthop +#### Test objective +Verify "IPv4 nexthop" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Add 1 nexthop and observe that counters were updated as expected. +* Remove 1 nexthop and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 4 – IPv6 nexthop +#### Test objective +Verify "IPv6 nexthop" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Add 1 nexthop and observe that counters were updated as expected. +* Remove 1 nexthop and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 5 – IPv4 neighbor +#### Test objective +Verify "IPv4 neighbor" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 neighbor and observe that counters were updated as expected. +* Remove 1 neighbor and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 6 – IPv6 neighbor +#### Test objective +Verify "IPv6 neighbor" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 neighbor and observe that counters were updated as expected. +* Remove 1 neighbor and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 7 – Nexthop group object +#### Test objective +Verify "nexthop group object" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ECMP route and observe that counters were updated as expected. +* Remove 1 ECMP route and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 8 – Nexthop group member +#### Test objective +Verify "nexthop group member" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ECMP route and observe that counters were updated as expected. +* Remove 1 ECMP route and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 9 – FDB entry +#### Test objective +Verify "FDB entry" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 FDB entry and observe that counters were updated as expected. +* Remove 1 FDB entry and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 10 – ACL group +#### Test objective +Verify "ACL group" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ACL and observe that counters were updated as expected. +* Remove 1 ACL and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 11 – ACL table +#### Test objective +Verify "ACL table" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ACL and observe that counters were updated as expected. +* Remove 1 ACL and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 12 – ACL entry +#### Test objective +Verify "ACL entry" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ACL rule and observe that counters were updated as expected. +* Remove 1 ACL rule and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +### Test case # 13 – ACL counter +#### Test objective +Verify "ACL entry" CRM resource. +#### Test steps +* Set polling interval to 1 minute. +* Configure 1 ACL rule and observe that counters were updated as expected. +* Remove 1 ACL rule and observe that counters were updated as expected. +* Perform the following steps for all threshold types ("percentage", "used", "free"): + * Set low and high thresholds according to current usage and type. + * Verify that "EXCEEDED" message is logged (using log analyzer). + * Set low and high thresholds to default values. + * Verify that "CLEAR" message is logged (using log analyzer). +* Restore default configuration. + +## TODO + +## Open questions diff --git a/docs/Everflow-test-plan.md b/docs/Everflow-test-plan.md new file mode 100644 index 0000000000..16183ffeee --- /dev/null +++ b/docs/Everflow-test-plan.md @@ -0,0 +1,351 @@ +- [Overview](#overview) + - [Scope](#scope) + - [Related **DUT** CLI commands](#related-dut-cli-commands) +- [Setup configuration](#setup-configuration) + - [Scripts for generating configuration on SONIC](#scripts-for-generating-configuration-on-SONIC) + - [Ansible scripts to setup and run test](#ansible-scripts-to-setup-and-run-test) + - [everflow_testbed.yml](#everflow-testbed-yml) + - [Setup of DUT switch](#Setup-of-DUT-switch) + - [J2 templates](#j2-templates) +- [PTF Test](#ptf-test) + - [Input files for PTF test](#input-files-for-ptf-test) + - [Traffic validation in PTF](#traffic-validation-in-ptf) +- [Test cases](#test-cases) +- [TODO](#todo) +- [Open Questions](#open-questions) + +##Overview +The purpose is to test functionality of Everflow on the SONIC switch DUT with and without LAGs configured, closely resembling production environment. +The test assumes all necessary configuration, including Everflow session and ACL rules, LAG configuration and BGP routes, are already pre-configured on the SONIC switch before test runs. + +###Scope +The test is targeting a running SONIC system with fully functioning configuration. +The purpose of the test is not to test specific SAI API, but functional testing of Everflow on SONIC system, making sure that traffic flows correctly, according to BGP routes advertised by BGP peers of SONIC switch, and the LAG configuration. + +NOTE: Everflow+LAG test will be able to run **only** in the testbed specifically created for LAG. + +###Related **DUT** CLI commands +Manual Everflow configuration can be done using swssconfig utility in swss container. + + swssconfig + +##Test structure +###Setup configuration +Everflow configuration should be created on the DUT before running the test. Configuration could be deployed using ansible sonic test playbook with the tag **everflow_tb**. + +#### Scripts for generating configuration on SONIC + +There will be three j2 template files for the Everflow test configuration: everflow_tb_test_session.j2, everflow_tb_test_acl_table.j2 and everflow_tb_test_acl_rue.j2. They will be used by Ansible playbook to generate json files and apply them on the switch. + +#### Ansible scripts to setup and run test + +##### everflow_testbed.yml + +everflow_testbed.yml when run with tag "everflow_tb" will to the following: + +1. Generate JSON files and apply them on the switch. +2. Run test. +3. Clean up dynamic configuration and temporary configuration on the DUT. + +Everflow test consists of a number of subtests, and each of them will include the following steps: + +1. Run lognanalyzer 'init' phase +2. Run Everflow Sub Test +3. Run loganalyzer 'analyze' phase + +Everflow subtests will be implemented in the PTF (everflow_testbed_test.py). Every subtest will be implemented in a separate class. + +#### Setup of DUT switch +Setup of SONIC DUT will be done by Ansible script. During setup Ansible will copy JSON file containing configuration for Everflow to the swss container on the DUT. swssconfig utility will be used to push configuration to the SONiC DB. Data will be consumed by orchagent. + +JSON Sample: + +everflow_session.json +``` +[ + { + "MIRROR_SESSION_TABLE:session_1": { + "src_ip": "1.1.1.1", + "dst_ip": "2.2.2.2", + "gre_type": "0x6558", + "dscp": "8", + "ttl": "64", + "queue": "0" + }, + "OP": "SET" + } +] +``` + +everfow_acl_table.json +``` +[ + { + "ACL_TABLE:acl_table_mirror": { + "policy_desc" : "Everflow_ACL_table", + "type" : "MIRROR", + "ports" : "Ethernet0, Ethernet4, Ethernet8, Ethernet12, Ethernet16, Ethernet20, Ethernet24, Ethernet28, Ethernet32, Ethernet36, Ethernet40, Ethernet44, Ethernet48, Ethernet52, Ethernet56, Ethernet60, Ethernet64, Ethernet68, Ethernet72, Ethernet76, Ethernet80, Ethernet84, Ethernet88, Ethernet92, Ethernet96, Ethernet100, Ethernet104, Ethernet108, Ethernet112, Ethernet116, Ethernet120, Ethernet124, Ethernet128" + }, + "OP": "SET" + } +] +``` +everflow_acl_rule_persistent.json +``` +[ + { + "ACL_RULE_TABLE:acl_table_mirror:Rule01": { + "policy_desc" : "Mirror_packet_with_tcp_flag_fin", + "priority" : "50", + "tcp_flags" : "0x01/0xff", + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + { + "ACL_RULE_TABLE:acl_table_mirror:Rule02": { + "policy_desc" : "Mirror_packet_with_tcp_flag_syn_and_dscp", + "priority" : "50", + "tcp_flags" : "0x02/0xff", + "dscp" : "1" + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + { + "ACL_RULE_TABLE:acl_table_mirror:Rule03": { + "policy_desc" : "Mirror_packet_with_tcp_flag_rst", + "priority" : "50", + "tcp_flags" : "0x04/0xff", + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + { + "ACL_RULE_TABLE:acl_table_mirror:Rule04": { + "policy_desc" : "Mirror_packet_with_specific_tcp_port", + "priority" : "50", + "ip_protocol" : "0x06", + "l4_src_port" : "1101", + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + +] +``` +everflow_acl_rule_dynamic.json +``` +[ + { + "ACL_RULE_TABLE:acl_table_mirror:RuleDynamic01": { + "policy_desc" : "Mirror_packet_with_specific_src_ip", + "priority" : "50", + "src_ip" : "10.0.0.0/32", + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + { + "ACL_RULE_TABLE:acl_table_mirror:RuleDynamic02": { + "policy_desc" : "Mirror_packet_with_specific_dst_ip", + "priority" : "50", + "dst_ip" : "10.0.0.5/32", + "mirror_action" : "session_1" + }, + "OP": "SET" + }, + { + "ACL_RULE_TABLE:acl_table_mirror:RuleDynamic03": { + "policy_desc" : "Mirror_packet_with_specific_src_and_dst_ip", + "priority" : "50", + "src_ip" : "10.0.0.0/32", + "dst_ip" : "10.0.0.5/32", + "mirror_action" : "session_1" + }, + "OP": "SET" + } +] +``` +##PTF Test + +### Input files for PTF test + +PTF test will generate traffic between ports and make sure it mirrored according to the configured Everflow session and ACL rules. Depending on the testbed topology and the existing configuration (e.g. ECMP, LAGS, etc) packets may arrive to different ports. Therefore ports connection information will be generated from the minigraph and supplied to the PTF script. + +### Traffic validation in PTF +Depending on the test PTF test will verify the packet arrived or dropped. + +##Test cases + +Each test case will be additionally validated by the loganalizer utility. + +Each test case will add dynamic Everflow ACL rules at the beginning and remove them at the end. + +Each test case will run traffic for persistent and dynamic Everflow ACL rules. + +Each test case will analyze Everflow packet header and payload (if mirrored packet is equal to original). + +### Test case \#1 - Resolved route + +#### Test objective + +Verify that session with resolved route has active state. + +#### Test steps + +- Create route that matches session destination IP with unresolved next hop. +- Resolve route next hop. +- Verify that session state in APP DB changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packet mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packet payload is equal to sent packet. +- Verify that counters value of each Everflow ACL rule is correct. + +### Test case \#2 - Longer prefix route with resolved next hop + +#### Test objective + +Verify that session destination port and MAC address are changed after best match route insertion. + +#### Test steps + +- Create route that matches session destination IP with unresolved next hop. +- Resolve route next hop. +- Verify that session state in APP DB changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packet payload is equal to sent packet. +- Create best match route that matches session destination IP with unresolved next hop. +- Send packets that matches each Everflow ACL rule. +- Verify that packets are mirrored to the same port. +- Resolve best match route next hop (neighbor should be on different port). +- Verify that session state in APP DB is active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets are mirrored and destination port changed accordingly. + +### Test case \#3 - Remove longer prefix route. + +#### Test objective + +Verify that session destination port and MAC address are changed after best match route removal. + +#### Test steps + +- Create route that matches session destination IP with unresolved next hop. +- Resolve route next hop. +- Verify that session state in APP DB changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packet payload is equal to sent packet. +- Create best match route that matches session destination IP with unresolved next hop. +- Resolve best match route next hop (neighbor should be on different port). +- Send packets that matches each Everflow ACL rule. +- Verify that packets are mirrored and destination port changed accordingly. +- Remove best match route. +- Send packets that matches each Everflow ACL rule. +- Verify that packets are mirrored and destination port changed accordingly. + +### Test case \#4 - Change neighbor MAC address. + +#### Test objective + +Verify that session destination MAC address is changed after neighbor MAC address update. + +#### Test steps + +- Create route that matches session destination IP with unresolved next hop. +- Resolve route next hop. +- Verify that session state in APP DB changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packet payload is equal to sent packet. +- Change neighbor MAC address. +- Send packets that matches each Everflow ACL rule. +- Verify that DST MAC address in mirrored packet header is changed accordingly. + +### Test case \#5 - Resolved ECMP route. + +#### Test objective + +Verify that session with resolved ECMP route has active state. + +#### Test steps + +- Create ECMP route that matches session destination IP with two unresolved next hops. +- Resolve route next hops. +- Verify that session state in APP DB is changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packets header. +- Verify that mirrored packets payload is equal to sent packet. + +### Test case \#6 - ECMP route change (add next hop). + +#### Test objective + +Verify that insertion of additional next hop to ECMP group doesn't affects session DST MAC and port. + +#### Test steps + +- Create ECMP route that matches session destination IP with two unresolved next hops. +- Resolve route next hops. +- Verify that session state in APP DB is changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packet payload is equal to sent packet. +- Add resolved next hop to ECMP route. +- Send packets that matches each Everflow rule. +- Verify that packets are mirrored to the same port. +- Verify that mirrored packets have the same DST MAC. + +### Test case \#7 - ECMP route change (remove next hop used by session). + +#### Test objective + +Verify that removal of next hop that is not used by session doesn't cause DST port and MAC change. + +#### Test steps + +- Create ECMP route that matches session destination IP with two unresolved next hops. +- Resolve route next hops. +- Verify that session state in APP DB is changed to active. +- Send packets that matches each Everflow rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packets payload is equal to sent packets. +- Remove next hop that is not used by session. +- Send packets that matches each Everflow rule. +- Verify that packets are mirrored to the same port. +- Verify that mirrored packets have the same DST MAC. + +### Test case \#8 - ECMP route change (remove next hop not used by session). + +#### Test objective + +Verify that after removal of next hop that was used by session from ECMP route session state is active. + +#### Test steps + +- Create ECMP route that matches session destination IP with two unresolved next hops. +- Resolve route next hops. +- Verify that session state in APP DB is changed to active. +- Send packets that matches each Everflow ACL rule. +- Verify that packets mirrored to appropriate port. +- Analyze mirrored packet header. +- Verify that mirrored packets payload is equal to sent packets. +- Remove next hop that is used by session. +- Send packets that matches each Everflow ACL rule. +- Verify that packets are mirrored and destination port changed accordingly. + +### Other possible tests + +## TODO +- Everflow+LAG test configuration and testcases (separate ansible tag) + +## Open Questions \ No newline at end of file diff --git a/docs/IPv4-Decapsulation-test.md b/docs/IPv4-Decapsulation-test.md new file mode 100644 index 0000000000..fa1fc22448 --- /dev/null +++ b/docs/IPv4-Decapsulation-test.md @@ -0,0 +1,187 @@ +**Related documents** + +* [Tests readme](https://github.com/Azure/sonic-mgmt/blob/master/ansible/README.test.md) +* [Test system readme](https://github.com/Azure/sonic-mgmt/blob/master/ansible/README.testbed.md) +* [swss-schema](https://github.com/Azure/sonic-swss/blob/master/doc/swss-schema.md) + +##Overview +*** + +This test case is aimed at testing the DUTs ability to do de-capsulation of IP encapsulated packets, and verify that each decapsulated packet is with the right properties in each field, and forward with the corresponding underlay destination IP to the correct route. +The test assumes all routes and decapsulation are set prior by to test, so no configuration is required to be done by the test itself, and the test will correspond only to the right IPs that is configured in the test. + +The validation to the routes and the decapsulation is done by sending packets with the corresponding IPs both, in the overlay and underlay. If the test fails, it is the testing responsibility to provide all the debug information which can be retrieved in order to shade more light to the failure for offline debugging as much as possible. + +###Scope +--------- +The scope is this test plan is only the Ansible test, including the PTF test and the necessary configuration. + +###Scale / Performance +------------------- +N/A + + +###Related **DUT** CLI commands +---------------------------- +No CLI commands will be needed for this test. + +###Related DUT configuration files +----------------------------------- +The decap_conf.j2 will create JSON file that will configure the decap rule for the **Decap IP** + +###Related SAI APIs +---------------- +N/A + +### D.U.T Requirement +-------------------- +The D.U.T must connect with 32 different ports to the PTF docker. +And the test must run after BGP configuration so that the default route will be configured + +##Test structure +=============== + +###Setup configuration +------------------- +The setup tests assume to have single SONiC (DUT) connected to a switch connected to a server running 32 Arista VMs. + +There will be 32 BGP peers connected to the switch. The peers will advertise the default route and update the switch. + +PTF host needs to be connected to a port through which it will send packets to the switch and needs to have a connection via ports through which the switch will send forward received packet back to the host for validation. + +###Configuration scripts +--------------------- +The peers and SONIC (DUT) will be deployed by an Ansible script. As part of the deployment, the script will generate the routes and decapsulation commands. +The decapsulation rule will be generated by J2 script that will output JSON file with **Decap IP** that will configure through the SWSS config tool. +#### J2 script +
[
+         {
+                 "TUNNEL_DECAP_TABLE:NETBOUNCER" : {
+                         "tunnel_type":"IPINIP",
+                         "dst_ip":"{{ansible_lo['ipv4']['address']}}",
+                         "dscp_mode":"pipe",
+                         "ecn_mode":"copy_from_outer",
+                         "ttl_mode":"pipe"
+                 },
+                 "OP": "SET"
+         }
+]
+ +#### Example for SWSS JSON file output +
[
+ 	{
+ 		"TUNNEL_DECAP_TABLE:NETBOUNCER" : {
+ 			"tunnel_type":"IPINIP",
+ 			"dst_ip":"10.0.0.1",
+ 			"dscp_mode":"pipe",
+ 			"ecn_mode":"copy_from_outer",
+ 			"ttl_mode":"pipe"
+ 		},
+ 		"OP": "SET"
+ 	}
+]
+ + +Test cases +---------- + +The test assumes there is a mechanism to validate logs on the DUT where we should be able to analyze /var/log/syslog for error and warning messages related to the current test. +In case such messages are detected, the test is considered failed. + +The test should have standardized summary section in the following format: + +*TEST SUMMARY:* + +*TEST*: OK/FAIL + +*LOGS*: OK/FAIL + + + +### Test case \#1 + +#### Test objective + +The objective is to validate decapsulation ability and each route has been added to the switch and is functioning properly with the decapsulated packet. + +#### Test configurations + - IP decap IPv4 that will be taken from loopback IP: _**Decap IP** + - default IPv4 routes that will be configured through the BGP session as ECMP routes. + - unicase IPv4 routes that will configure throught the BGP session as TOR routes. + +#### Test description +1. The test will use **host IP** that fall into the default route and for the TOR routes. +2. The test will use different outer and inner TTL value combinations for different TTL modes + +TTL mode | ttl_outer | ttl_inner | Decap & forwarded? | Expected TTL +---------|-----------|-----------|-------------------|-------------- +pipe | 2 ~ 64 | 2 ~ 64 | Yes | ttl_inner - 1 +pipe | 64 | 2 | Yes | 1 +pipe | 1 | 64 | Yes | 63 +uniform | 2 ~ 64 | 2 ~ 64 | Yes | ttl_outer - 1 +uniform | 64 | 2 | Yes | 63 +uniform | 2 | 64 | Yes | 1 +3. From the PTF docker, craft and sent through all the ports a double encapsulated IP packets as follows: + * `outer IP header [S:2.2.2.0,D:`**Decap IP**`]` + * `inner IP header [S:1.1.1.1,D:`**host IP**`]` + * `ECN and DSCP will be set to values picked from allowed range in a round robin way, for the overlay and the underlay.` + * `ECN_outer != ECN_inner1` + * `DSCP_outer != DSCP_inner1` + * `TTL_outer != TTL_outer1` + * `all other fields will be PTF default for simple_tcp_packet function` +4. Verify the Sonic does not see the encapsulated packet. the IP-in-IP packet should not go to CPU, the packet should not be seen on the DUT. + +5. Confirm that the packet that comes back to PTF Docker decapsulated from one of the expected ports. and the L3 header fields will look like this : + * `IP header [S:1.1.1.1,D:`**host port0 IP**`]` + * `TTL = TTL_inner - 1 when ttl_mode=pipe` + * `TTL = TTL_outer - 1 when ttl_mode=uniform` + * `ECN = ECN_outer` + * `DSCP = DSCP_outer ` + +6. repeat steps 1-4 32 times, so each port will send 2 packets one for unicast route and one ecmp route. + +### Test case \#2 +####Test objective + +The objective is to validate decapsulation ability with triple encapsulated packet and each route has been added to the switch and is functioning properly with the decapsulated packet. + +#### Test configurations + - IP decap IPv4 that will be taken from loopback IP: **Decap IP** + - ECMP route IPv4 that will be configured through the BGP session as ECMP routes. + - unicase IPv4 routes that will configure throught the BGP session as TOR routes. + +####Test description +1. The test will use **host IP** that is one of the routes preconfigured in the BGP session (ECMP and Unicast). +2. The test will use different outer and inner TTL value combinations for different TTL modes + +TTL mode | ttl_outer | ttl_inner | Decap & forwarded? | Expected TTL +---------|-----------|-----------|-------------------|-------------- +pipe | 2 ~ 64 | 2 ~ 64 | Yes | ttl_inner - 1 +pipe | 64 | 2 | Yes | 1 +pipe | 1 | 64 | Yes | 63 +uniform | 2 ~ 64 | 2 ~ 64 | Yes | ttl_outer - 1 +uniform | 64 | 2 | Yes | 63 +uniform | 2 | 64 | Yes | 1 +3. From the PTF docker, craft and sent through all the ports a triple encapsulated IP packets as follows: + + * `outer IP header [S:2.2.2.0,D:`**Decap IP**`]` + * `inner1 IP header [S:1.1.1.1,D:`**host IP**`]` + * `inner2 IP header [S:4.4.4.4,D:4.4.4.3]` + * `ECN and DSCP will be set to values picked from allowed range in a round robin way, for the overlay and the underlay.` + * `ECN_outer != ECN_inner1` + * `DSCP_outer != DSCP_inner1` + * `TTL_outer != TTL_inner1` + * `all other fields will be PTF default for simple_tcp_packet function` + +4. Verify the Sonic does not see the encapsulated packet. the IP-in-IP packet should not go to CPU, the packet should not be seen on the DUT. + +5. Confirm that the packet that comes back to PTF Docker decapsulated from one of the expected ports. and the L3 header fields will look like this: + * outer IP headers + - `IPs [S:1.1.1.1,D:`**host IP**`]` + - `TTL = TTL_inner - 1 when ttl_mode=pipe` + - `TTL = TTL_outer - 1 when ttl_mode=uniform` + - `ECN = ECN_outer` + - `DSCP = DSCP_outer ` + * inner IP headers : PTF default for simple_tcp_packet function + +6. repeat steps 1-4 32 times, so each port will send 2 packets one for unicast route and one ecmp route. diff --git a/docs/QoS-configuration-in-Config-DB.-ECN-WRED-configuration-utility-test-plan.md b/docs/QoS-configuration-in-Config-DB.-ECN-WRED-configuration-utility-test-plan.md new file mode 100644 index 0000000000..483736cec1 --- /dev/null +++ b/docs/QoS-configuration-in-Config-DB.-ECN-WRED-configuration-utility-test-plan.md @@ -0,0 +1,137 @@ +## Overview +The purpose is to test the functionality of ECN Configuration on the SONIC Switch. +The test does not require any links on the Switch. + +Test cases cover initial ECN WRED configuration and + +## Related tools and CLI utilities +Utility to be used for QoS configuration (including ECN WRED) validation: `sonic-cfggen`. + + sonic-cfggen -j qos.json --print-config +Utility to test ECN WRED configuration update: `ecnconfig`. + + ecnconfig -p AZURE_LOSSY -gmax 516096 + +Utility to explore DB: `redis-cli`. + + redis-cli -n 4 keys * + +## Manual tests +To be performed after changes touching QoS ECN components: + +- Orchagent: qosorch.cpp, qosorch.h, orchdaemon.cpp +- sonic-cfggen utility +- Common table classes: ProducerTable, ConsumerTable +- sonic-py-swsssdk library +- first-time init script rc.load +- manipulations with hwsku +- adding configuration for new platforms/hwskus +- ecnconfig utility + +## Automated tests +To be performed on testbed or stand-alone switch in scope of regression tests. + +### Test cases +Test cases marked with (M) - manual - recommended for manual testing and (A) if it makes sense to automate the test and run it in regression. (MA) - suitable for both. + +#### Test case #1(M): Check configuration file(s) available on Switch +Build SONiC, install to Switch, boot and check if `qos.json` files in folders + + /usr/share/sonic/device/// +Check for related or updated platforms. + +#### Test case #2(M): Check QoS configuration reaches DB + +Ssh to Switch and check whether QoS configuration was added to the Config DB. + + redis-cli -n 4 keys \* | grep QUEUE + +Expected records like: + + 43) "QUEUE|Ethernet0,Ethernet4.... + +#### Test case #3(MA): Check configuration applied + +Ssh to Switch and check whether QoS configuration applied. + + redis-cli -n 1 keys \* | grep QUEUE + +Expected records like: + + 1034) "ASIC_STATE:SAI_OBJECT_TYPE_QUEUE:oid:0x150000000003bf" + +#### Test case #4(M): Ecnconfig utility test. List mode +Ssh to Switch and execute: + + ecnconfig -l + +Expected output: + + Profile: AZURE_LOSSLESS + -------------------- ------- + red_max_threshold 516096 + wred_green_enable true + ecn ecn_all + green_min_threshold 184320 + red_min_threshold 516096 + wred_yellow_enable true + yellow_min_threshold 516096 + green_max_threshold 184320 + yellow_max_threshold 516096 + -------------------- ------- + ... + +Compare to values in init_cfg.json: + + sonic-cfggen -j /etc/sonic/init_cfg.json --print-data | grep -A20 \"AZURE_LOSSLESS\" + +#### Test case #5(M): Ecnconfig utility test. Set mode +Ssh to Switch and execute: + + ecnconfig -p AZURE_LOSSY -gmax 184320 -rmax 516096 + +No error messages expected. + +#### Test case #6(MA): Ecnconfig utility test. Set function +Ssh to Switch and execute: + + ecnconfig -p AZURE_LOSSY -rmin 491520 + +Check ASIC DB records. + + redis-cli -n 1 keys \* | grep "ASIC_STATE:SAI_OBJECT_TYPE_WRED" + +Expected output example: + + root@arc-switch1028:/home/admin# redis-cli -n 1 hgetall "ASIC_STATE:SAI_OBJECT_TYPE_WRED:oid:0x130000000005cc" + ... + 13) "SAI_WRED_ATTR_RED_MIN_THRESHOLD" + 14) "491520" + ... + +#### Test case #7(M): Ecnconfig utility test. Integration with `show` and `config` +Ssh to Switch and run the following commands: + + show ecn + +Expected output: like in test case #4 + + config ecn --help + +Expected output: utility usage help screen + + config ecn -profile AZURE_LOSSY -rmin 430080 + +Check value applied like in test case #6 + +#### Test case #8(M): Ecnconfig utility test. Negative tests +Ssh to Switch and run the following commands: + + ecnconfig -l -p + ecnconfig -l -p LOSS + ecnconfig -p LOSS + ecnconfig -p LOSS -gmax + config ecn -gmax 45 + show ecn -p + +All should fail with the appropriate error messages. diff --git a/docs/VLAN-trunk-test-plan.md b/docs/VLAN-trunk-test-plan.md new file mode 100644 index 0000000000..0cfe39ff31 --- /dev/null +++ b/docs/VLAN-trunk-test-plan.md @@ -0,0 +1,260 @@ +## Overview +The purpose is to test VLAN functions on the SONiC switch. + +### Scope +The tests will include: + +1. Functionalities of VLAN ports. +2. VLAN interfaces routing. +3. IP2me traffic on VLAN interfaces. + +The test will trying to cover all functionalities of VLAN ports including Ethernet ports and LAGs. And will make sure the IP traffic and IP2me traffic is working well. + +### Functionalities of VLAN ports + +A VLAN port will include three attributes: + +* PVID: Ingress untagged packets will be tagged with PVID, and PVID will always in Permit VLAN IDs. + +* Permit VLAN IDs: Which VLAN ID of ingress and egress packets is allowed in the port. + +* tagged VLAN IDs: Determine which VLAN IDs egress packets will be tagged. + + For the VLAN trunk feature, the tagged VLAN IDs are limited to Permit VLAN IDs besides PVID, e.g., if PVID is 100, Permit VLAN IDs are 100,200,300, tagged VLAN IDs are 200,300, in other words, untagged VLAN ID is 100. + +The detail actions of VLAN ports: + + +| Packet Tagged or Untagged | Direction | Action | +| :------------------------ | :-------- | :--------------------------------------- | +| Untagged | Ingress | Tags the packet with the PVID tag. | +| | Egress | If VLAN ID of the packet is equal with untagged VLAN ID, untag and send out the packet. Besides, if VLAN ID is in Permit VLAN IDs, send out the packet with tag. | +| Tagged | Ingress | If VLAN ID of the packet is not in Permit VLAN IDs, drop the packet. | +| | Egress | If VLAN ID of the packet is equal with untagged VLAN ID, untag and send out the packet. Besides, if VLAN ID is in Permit VLAN IDs, send out the packet with tag. | + +## TEST structure + +1. The tests assume fanout switch support QinQ (stacked VLAN), so that stacked VLAN packets can passthrough fanout switch and can be tested on DUT with inner VLAN. + + ``` + | testbed server | <------------------> | Fanout switch | <------------------> | DUT | + stacked vlan pkt single vlan pkt + outer vlan: 1681 vlan: 100 + inner vlan: 100 + ``` + +2. Tests will be based on *t0* testbed type. The IP address of every LAGs on the DUT will be flushed to make all LAGs act as L2 ports. New test IP addresses will be configured on VLAN interfaces. + +3. VMs are only used to do LACP negotiation for LAGs; PTF is used to send packet and verify VLAN functionalities. + +4. The test contains three files: + + vlan_info.j2: Define VLAN ports info and VLAN interface info for vlan.yml and vlan_test.py + + vlan.yml: Configure DUT for the test according to vlan_info.j2 + + vlan_test.py: Do the PTF test according to vlan_info.j2 + +5. vlan_info.j2 will choose several Ethernet ports and LAG ports from minigraph of current topology and generate VLAN port info and VLAN interface info for PTF python script to do test. + + ```jinja2 + {% set vlan_id_list = [ 100, 200 ] %} + vlan_ports_list: + {% for lag_number in range(2) %} + - dev: '{{ minigraph_portchannels.keys()[lag_number|int] }}' + port_index: '{{ minigraph_port_indices[minigraph_portchannels[minigraph_portchannels.keys()[lag_number|int]].members[0]] }}' + pvid: '{{ vlan_id_list[(lag_number|int)%2] }}' + permit_vlanid: + {% for vlan in vlan_id_list %} + '{{ vlan }}': + peer_ip: '192.168.{{ vlan }}.{{ minigraph_port_indices[minigraph_portchannels[minigraph_portchannels.keys()[lag_number|int]].members[0]] }}' + remote_ip: '{{vlan}}.1.1.{{ minigraph_port_indices[minigraph_portchannels[minigraph_portchannels.keys()[lag_number|int]].members[0]] }}' + {% endfor %} + {% endfor %} + {% for port_number in range(2) %} + - dev: '{{ minigraph_ports.keys()[port_number|int]}}' + port_index: '{{ minigraph_port_indices[minigraph_ports.keys()[port_number|int]]}}' + pvid: '{{ ((port_number|int)%2+1)*100}}' + permit_vlanid: + {% for vlan in vlan_id_list %} + '{{ vlan }}': + peer_ip: '192.168.{{ vlan }}.{{ minigraph_port_indices[minigraph_ports.keys()[port_number|int]] }}' + remote_ip: '{{vlan}}.1.1.{{ minigraph_port_indices[minigraph_ports.keys()[port_number|int]] }}' + {% endfor %} + {% endfor %} + + vlan_intf_list: + {% for vlan in vlan_id_list %} + - vlan_id: '{{ (vlan|int) }}' + ip: '192.168.{{ vlan }}.1/24' + {% endfor %} + ``` + + and generate vlan.yml. Below is for an example: + + ```yaml + vlan_ports_list: + - dev: 'PortChannel03' + port_index: '30' + pvid: '100' + permit_vlanid: + '100': + peer_ip: '192.168.100.30' + remote_ip: '100.1.1.30' + '200': + peer_ip: '192.168.200.30' + remote_ip: '200.1.1.30' + - dev: 'PortChannel02' + port_index: '29' + pvid: '200' + permit_vlanid: + '100': + peer_ip: '192.168.100.29' + remote_ip: '100.1.1.29' + '200': + peer_ip: '192.168.200.29' + remote_ip: '200.1.1.29' + - dev: 'Ethernet8' + port_index: '8' + pvid: '100' + permit_vlanid: + '100': + peer_ip: '192.168.100.8' + remote_ip: '100.1.1.8' + '200': + peer_ip: '192.168.200.8' + remote_ip: '200.1.1.8' + - dev: 'Ethernet9' + port_index: '9' + pvid: '200' + permit_vlanid: + '100': + peer_ip: '192.168.100.9' + remote_ip: '100.1.1.9' + '200': + peer_ip: '192.168.200.9' + remote_ip: '200.1.1.9' + + vlan_intf_list: + - vlan_id: '100' + ip: '192.168.100.1/24' + - vlan_id: '200' + ip: '192.168.200.1/24' + ``` + + +## TEST case + +All the test cases will try to send packets from all the VLAN ports defined in vlan.yml, and try to verify packets from all expected VLAN ports. + +### Test case #1 + +#### Test objective + +To verify untagged packets received and be sent out with tag or without tag determined by egress port PVID. + +#### Test description + +``` +Test example: + |(untag:100/permit:100,200)->pkt(untagged) +pkt(untagged)->(pvid:100/permit:100,200)|DUT| + |(untag:200/permit:100,200)->pkt(tagged:100) +``` + +1. PTF send untagged packets(destination MAC unknown). +2. Verify packets can be received from other ports which permit PVID on ingress port. And packets will egress untagged if PVID on ingress port is same with egress ports , or packets will egress tagged with ingress port PVID. + +### Test case #2 + +#### Test objective + +To verify if tagged packets received in Permit VLAN IDs and be sent out with tag or without tag determined by egress port PVID. + +#### Test description + +``` +Test example: + |(untag:100/permit:100,200)->pkt(untagged) +pkt(tagged:100)->(pvid:100/permit:100,200)|DUT| + |(untag:200/permit:100,200)->pkt(tagged:100) + + |(untag:100/permit:100,200)->pkt(tagged:200) +pkt(tagged:200)->(pvid:100/permit:100,200)|DUT| + |(untag:200/permit:100,200)->pkt(untagged) +``` + +1. PTF send tagged packets(destination MAC unknown), which VLAN ID is in Permit VLAN IDs of ingress port. +2. Verify packets can be received from other ports which permit PVID on ingress port. And packets will egress untagged if PVID on ingress port is same with egress ports , or packets will egress tagged with ingress port PVID. + +### Test case #3 + +#### Test objective + +To verify if tagged packets received not in Permit VLAN IDs, the packets will be dropped + +#### Test description + +``` +Test example: + |(untag:100/permit:100,200)->no pkt egress +pkt(tagged:4095)->(pvid:100/permit:100,200)|DUT| + |(untag:200/permit:100,200)->no pkt egress +``` + +1. PTF send tagged packets(destination MAC unknown), which VLAN ID is not in Permit VLAN IDs of ingress port. +2. Verify no packets received from other ports. + +### Test case #4 +#### Test objective + +To verify the VLAN interface routing is working. + +#### Test description + +``` +Test example: +Vlan100: 192.168.100.1/24 +Vlan200: 192.168.200.1/24 + +192.168.100.30->192.168.200.30 (for directly-connected routing) +pkt(tagged:100)->(pvid:200/permit:100,200)|DUT|(untag:100/permit:100,200)->pkt(tagged:200) +pkt(untagged)->(pvid:100/permit:100,200)|DUT|(untag:100/permit:100,200)->pkt(tagged:200) +pkt(untagged)->(pvid:100/permit:100,200)|DUT|(untag:200/permit:100,200)->pkt(untagged) + +1.1.1.30->2.1.1.30 (for indirectly-connected routing) +pkt(tagged:100)->(pvid:200/permit:100,200)|DUT|(untag:100/permit:100,200)->pkt(tagged:200) +pkt(untagged)->(pvid:100/permit:100,200)|DUT|(untag:100/permit:100,200)->pkt(tagged:200) +pkt(untagged)->(pvid:100/permit:100,200)|DUT|(untag:200/permit:100,200)->pkt(untagged) +``` + +1. PTF send IP packets over VLAN interfaces. +2. Verify packets can be receive on the egress port. + +### Test case #5 +#### Test objective + +To verify the IP traffic to VLAN interface self is working. + +#### Test description + +``` +Test example: +Vlan100: 192.168.100.1/24 +Vlan200: 192.168.200.1/24 + +192.168.100.30->192.168.100.1 +pkt(untagged)-> + (pvid:100/untag:100/permit:100,200)|DUT| +pkt(untagged)<- + +192.168.100.30->192.168.100.1 +pkt(tagged:100)-> + (pvid:200/untag:200/permit:100,200)|DUT| +pkt(tagged:100)<- +``` + +1. PTF send ICMP request packet to VLAN interfaces. +2. Verify ICMP reply packets can be received from ingress port. + + diff --git a/lgtm.yml b/lgtm.yml new file mode 100644 index 0000000000..1300b6840c --- /dev/null +++ b/lgtm.yml @@ -0,0 +1,7 @@ +path_classifiers: + test: + exclude: "/" # We want to analyze all our test code. +extraction: + python: + python_setup: + version: "2" diff --git a/spytest/Doc/arch.jpg b/spytest/Doc/arch.jpg new file mode 100755 index 0000000000000000000000000000000000000000..2eaf89767cb7c2b44b6ffbf8901b193264584eed GIT binary patch literal 38295 zcmeFZ2UwKJ(my_6;IS;>Nc5+#m^WD&_ZC|Pn&!YE3R3<@YnkSsYT!BLPPNRARE z=O`dyn7NJX?t;5}cF(!r`9J^r+|PYxdHW3=s;jE2epTJ5LDUz>8F^_rX$Tq`8YBSx z2SE)(Bq4a%*f`i&csMvX_;`5ugy+r>5)cs5l93aiW29$hVxVWZz``NO&2o|N;su7w z5?Ad1 z;5h_?81w9fD-u{F%6G6C97uT{M!v#fyk7L4Or>j`iTAFfFD@QA#W_kUW)@bqi|l;- z0)khCge7lCNz2H}$*ZcVYiMd|>lmAunweWz-g9zxadmU|@O<>x&p#mWNl?_&XVEdS z&tJr)q^6~3WM;k2E-oo8E3c@ms;;a5(9qb_{IR9Gr?;_+p#L(g-$wS+xQM~HPGMl6V_+YR3+M8|iZ46V>nQlz*4+dnMqAXvXe(hhk53L@dzojVQ$JGTqE%t?gw-A=!TNfzrc8BR1bB4_82 zr!B>&SboFu0lPPW3<`o9;18RJ-a$cNej=9z*m7aJH${0tB?m+Y2sRYtaeX~>i@p#A zf!789&13O**2-Ea$og}4bL68P`ft0+Cu%46elVC)gn~?5Uhg=>UWTr%ry@y+P6Gwx zQ;$SJ@Gc#o6-gPRAa!>plJ_yOUlHbS==KjDKi;qlU~&oN zAI37fpoSa3w3X*5LPx-H`JCAkT`NVZ$X*I`Qjbm)q{AJ)_N747hNo^xy`Qu(=w;Z|j;# zKVB^c^Nx!og?<0-mjW47`-%8xx`%U;D2Ui3cVEQ^k zcPCp)PBS zsRb0#4bO$`U$N>;TC3p|vu858dPh9A&5p-4|P;amfqE z0CiSS9D3`aAoJhya|KmBna8ogJzV(G&Ke3L&y+V*w+=%*h)2S0QIM?^`O*B6WH3(1 zfBW}tLutQ_1X%}^!aTQ*di-S4b{y-W9}uEWgVHQ?zcLggyjB-k=SIIzrY*KuR)Fwc zgzu14g6G+Znb`(IiyATH0uddYA8hwxR35rcRbuzodfN7OSs7~twHEc|Gxf$R_U4`K zpT1!lk`~`B)lNuc?bfFCNVCV*#Qn=#Uztz^;hc~M!gC8c7g=~_$cTnk?q{V767M-h zFnEs$iCtMzH}2OSWK&ZK*_{`8kk1+P*EWMzJO z6IdaoUfsucKqg&X<4^X?6kl+RE+uMCqbKjIVAWe+W^e~OMwdDi%d^TeO4`LBdR^^jj4$Z?Wl2JN%a}wR*{W-zIM$BiG&hp_= zxr{*FB3~xi*7WImyS6N8#NP@t;igeI?>!>mXfOUKsWHrif-fsOraC{tv%S2c^QJMS zQTR(dS^iL-MHk;OB(CY5DrI`6@O$@LD_ftb%Wmrn>%xngywUYc!Y^HIm(d9m(-{*X zcJmUe!80+zkgKFQi!EMB%Qkz>FeMuWxo0`~nd|K^cElVm%SKsvh9zU4^Fkios>nRm zC?TC9g1bM=1hbkQUtvjMcSu3x`6}71bB&+he|xs@ ziPM8@JdoSGx}+iz=T%|Uxj0ic)5t@#IU02vb`l4Vw5IBfu>p}OD`l#80$==Xza+K4 zxinY6=cvRgzHQK<|G|k$(QnAgVXy-qpOdL3Le^6p1*yc&?~2^*6T3)#Q&@lHu(WZM zla8~Fr;(LA@ll2}mQ@MDVQWSDmeW{L33_lGC1Y>yHd#f|&EhMvuZxLzTklSgMjn(Gh$8bl_7))w)$t_4#lpPY z+Fa|G!qe%~={^y1EGK>5*&(P#Hky{Q6_?`k6vs4&4wMPSSWFdFRR($UEG_$sbq?b) z+72IirS*iAT?##$3L-l1RuA-T9@Pb1T@r zV0rtBbMoH9v)0~IAx4vL3Uu_6RCiVF(IYl6Ylae2DqLN;nKU99M_#ua<|&+FM1Kom z-AeDf8?(Bm7SxZ6KECjAS}^v(;Ak{!iP3u6 zV9cdg)YJp>;$@uWiCq?Nsnp8Y*4snR5BpYO3$5vEv5LOz+E-qVpfQbzBe>p;ueC~p zpVDFOVvSpY5!GHyly7N0>4jUmwA8Atn5*O%8rbS_vt7O%+uFo@>3|+0o zj*pSU$2**TBW|ZY_IFQdYNz^5HIj1SlP%-EcELYyTb2>kKh^A!m!?^vTzQ759b5YS zB1bOLr`J&|YsbjQ-B#fRSHaDIl`{!`b_DRK9zh`tx28P1P7hIJsd|I9)*UHQ_5YF{ zQ9Yw9Mc*D#7sPgG0Z9o_z6Q*Tqu)))j}`kj)sgGaUoJ|5f-p=QZf`pQiy^VPa8xlX z_Ws`(TGj0lz;1yY|D`eVmEOjt%rtZlS1VHJ?OWmO)_iJhXf$4kZHg|^=;^8c(mfXV zMetot6l5iu$PZ{xy>b-9$L^47J4cs|gHlN3*_BDdeIgwcKB?RwAUit!Trr#~a+dzk3pfyI; zIl<%;YpCGVS6MeMrv!2E+;VED(21^XjEXafH;XcFA&kP*TY(x_+HJ$j=Z9>zUxuHH z+hH3a%o%V@n$ai=)}HO(61w{&@hTUSr552Cn?^-bD;?%(;f9(xXBGvK_{!O;C)oQH zs^5y{o!XpbV*2?sEIwBc^mBY%S>Zd#>9KYYd6!C@SlQteR+nYC>#wvzKz$%iX;xZe zX2yoye&w6TDiO6^Ro+ZELbiTzejF8FD!hgDh=-F&pKnqQ{y@xF(#9qFe_ zG17jWbhLH0Li;9o#tUw(b~*YoR;)vJ`@{$C`PrL7AP(CYz2*Dr)0&C&iL$h5BpqT? zXx}IY;8P9i(W_z@Hi|)(juI|HVT#vfr+ErLqKne+NMFrUo}}hAce$@2VQxFD;du4g z9R2x4k14UGBpa;RHN~FgRqNT$hp8N&QhgbjHoe2QbIw}EC3iD-lat3Eh+E7SKMT3E z;op37KkV7dyPf>h)Z}rSie6;tqoHjfPkoJevpzgu#1f`)<ejc4i%BX`sw(sE zR}!}5A~7Sx!4vb~@9gK5vo>2f@=_upOS*KpC`hCu_x^qr3ewe~HI--W&We<3hA%Aw ze^*A#mC>5}MWoFxc3Lq=f7~|UqZYW6lS&bE!gg%bBEtdBw4TE+VS$V;UvTSy~#0lfos$UGDqiLZ%*1g98 zRRJG}Jz}l%Sez%qqpyeDN7!?i1u3%d4Y65ou)@yh0%cYT+i-EJcdPbm=+Csw(vz|8 zcVb`yI%buC3eLz{mDx*24Mc1_5V|x;e|RSPWt1)jrGT@V%2`avSM50bKljJ}C8YK@ za2QQUBJIGdnp!Vy=W_qaMY*0f9)GlUd;2RgI}}e~@Ko5i6US>=F47jFxQ8tso+QqZWM$>zjR8w!LcZ19DEJ01z)Qkp&te;$emCW z#9I+bruwlLI&TE1wtfM&8z-6|iS#EhL_to0iGGqj^T7?D=Rf*1HI{hfQY-9OOQf(n z?(qjhf`{9JyXs1;z~g=O2+a9Az-E&cn=VBA=lEd=977~a7?_0;un_(Jy7Hmlq}`gu z__yBi3H6g*Og+(kAqU#JPZh`BJ|fz$!&W=qNcc}3_}`KKJv(H!2CkBu!k4*8U|!NF z$lDA6)Iket6c1OZk^Auo_RObQu+H~jW&Cm-2rr+9aKP_;sPI3-5BuNQ=ZGH+*XI9> zAJR9=e&Qsi>UDUaoz2jYW$E)NSN%_+7md)--!T@D4PE)jI#y{CuUy*4S5DQR%A?>- z!z06&-8}!fnbBe^f3_<7>uaws_V)k6NfZOv4DSzYW?${B!O)Gc?hz@f#skV1g=FwI zNjhekuGV~Sate9wJbCWz(6ppCmT6MBt0e2MPS`#6p6Olq+fQ1j4K)Fk&8?Op5w9F_Z3g#eT!a%%v=VIs2Om^v zZz)iFEs2o%GMUFr;8RliyJpcsI#_Z(R}}8q74exZ7+f3wcP@)`ubb^38 zwh)%DgO(yJ`yN$jU;Yc`vKrT*>`g^V*p=q?P7$;Q&DsruWs;%6TRYV@BN3G5QTMC4 z2q6#`);OPbO+pRlOvHh*HWgO!P|}diOw>iwGNaR-kBZe9&xdS%BP-DU$hXa$;Tr^; zTv{osU9gN*{<@4Ahouzf*4Q~ETWai(`_RK{6LmjEGZRM=v$%dC9w?a8*Rsv%y|wgy z4#{Nm+d4NuHepBn-Y%8Yi$FHg%~4KQn(@U;mCR}@d2Av*&q?)`}|7=(Cy%DvdD?sN&z+w z?FQCf$n7N^K_yX{+WYS$E=E@R&|1B^&nmJT0>gtf7nyE71y~2aHQUHyW>xNqTbcU} z`$1Mli%!+b17-R=_-iX!cSu(6oGaA8K|h1#=G2{c_{acgVqb0qc_s=%rnsJj2B5SYYLq#4z0DR6??sS)|leMeh)JA^maS>pDpY zb$X)#_`Fdt@Csg|APpLMq*`32-jDR@zkaU)gs3L7Mf5pzORB0X13s|Pt@Nd}CsyvM zu{CuxK{VP(<8E4(-rhvWjDG;D!!pfRqhbOXDPS^SS^w%-{iDC<=q{ol+9|L>9r{DeCHa#3 zCs1UFgBx^{e+i}wJ0!ih)mpGewGIE42O!hinF9rqyRt_HzcnzlPdZ(HQ$AJOgm}yc zJHV|+K}s~8^!QH{4a``lm|xOBH;Mb9%dCQ1uNQG((~P;W!CQ6y#lB73i>qH#Ot3p68}3poBzV;e+^%PG{TB1|)zdbW>oq36mpRf94r|)gw@Qq2 z&r>&L2n~iB^@pjT>3;v`3;M8Aa3k^`(>wvv4{|tsaiZ0LNjBzjC-ze zp$%iv4z`%x1@W|XMQ9HiXPP#%hyn`ENA+(GZHfdp-@PJl+vC`RJL2hlek(t~l2#ha zGY;>iBcX_cOeyFCRKDa*nReplq>yQJh4mAGrgO;owcR+ldfr-URb^#4B)9()dX5{0 z$>p6Ke!`W}s5SeeNurgqc0V6E(Y*e_Y;mgK(9N^y7c~8MSTBd5cj$>z9i=ceY+z*^ z`Akl?rl0-~W;DZXDNa&IH_v4c#KllMd>4P-ZDp0NYh>|CC9qm^wEYj6`)W3+mf_z@ zhb6ZkDsAPI-o|oRM+SM$?*OQNpcv+NVSP3bHcMIo;PM3n1B;n7?An#~Q%|Fke z$F%^LUE!&hk<#UsZ0H@LLKAs(QxX+Xx}2ZdxReskH=nJfr5;q#xi8$!IFQz78n-Cp zL6#bLvnBFjC&|l-$$NADWz;yO`0g|Cve3TS(*?q>vDDuf01zc}lNqIX^G1f#~P+Qn6qVG=V z%(e3Do~()&(>3V7v}$-u;oNM++o+MfiSlO#0b$!p()~5+uj5~sB0u_%HBQ&t-}+*o zIm=if&*l4R8CJljj!)nw+h^(yU_<;^iKwiRsR6oZpWr}tA_7>m@dx_tZ3<*ySZ0lZLoBz)abN--Zd?0k(Y>v=UA%H z@@%$`rjV=FEJBPC*kPGMWPPKS{oi8Adp^;WrLG0qz8eiO3ER3yPQIW1bV&c=mAAO; z!?Ne6&v!B3b&LO0<(Gc#DUK;SGsJk7?(Npz7CvPN3%5Yu%GBnR zm-*mznA?czY@q%*;YWHB{Qge}`q~iEa`)e#erS5ZQ`&GC?&&O-BZZY|X)~)W-I=2s z{N_O$$~uNTSjahP8Uh$e_Q%lfd5QFVl|DQ4|d1a^VPUx#SS+W$zW1JN~3; zM#tq@?x#TzTx3A|8E-6V(ivP=LIg*`2KqXXVV;ZiE&?g~Cqst0+}OZwITlh}68?jH zM1%zCu$_8j5vLYrpvcwsqKaww6%$e|K%xt{{v^gI$b*kavRASe=;7uDAx zy^AxqI~>;?)86eiE|y-?vBV_b^p)W`O^skVl6hmsuki(2Vyj}s)Ap)st`t6}Uazg) zcIVaa-m<|{6jXof-?-W`Ybbni*b-88{=ip^qa=*&m14w)NbYaS*TeHMCB>7BUD#qK zTZSwtQsvGTA(~S@(LB9eb1HwIzhOdJYmM79@q9hTD_cUbC8`z?p&Hi7%C$t(GMnpn z7>D!clp~vIHxM~*L7wM1ysMDAaYZucj3QH3g^aSXmiPC&jH>D_z7^0vDSczf(p{D$ zOmSvRyJ@D)_5smb>~o?-d2TM4wSEbyZ*+zv&|o!ppQsfg*gY+^=w5xdp6JelfPEHx z>xPS%#6-c~Y4=CSw<$pJ5cl@(%0KJ^T=6jL-;6W;U3`fWK-r9D707KkGKj3*z1)4W zrYmZ3)yhiSEaFzz6&2~OFqv9sO~XwAE7%%9Xy#w36>K%u_LdHJTv4|+abv3&dBk;@ zqc_t{PAk3Wp}CKBf$eO!Ne#SbqLk-TNy!-|>|KS^bGA-fd9Urx_pt;khJUhHwRzk5 zRr)}lOL~NO(P>X3WU~qLjhxU2d0boGCLa!>Ix3IylDPT3$9h8KNTd2o9CbvqFse6l zB4`O@d3n+81r9DAS$tH+uR^@j;nhUqRiYXR@F!0y!#J`oNb=b*G2r_>L!^qx(h{#|Km!2758>?DTTOHFW zR?YK*GllKzw2RO9p=W2;B`&@;9ubZwsqrm)%Bd>i2A?}T9aP>XJNBQ|(z~>IkAkXR zapwi;bJ`uqxv6t`2YL562{L?yTwrR8Ur#EUUtNg9vD@{f%`YVhsU`atg zy!B7%K|tLsnR6W#jHh)H={W8yaA9FhA!!x4-7$hcR4APmw9CLKj+$CVnkRfUcO4z} z=2elJ6td;NXp^^`>g+$&?81x2Hix-GmkRGrLP2m4NrUzaa0~q|y(hsdqR92P&~(k( z^_kJbezVFliw~1(m79T;A?OLoVPeHzs`tML(^_0J_3t0kUNw6g#WodXfD0kNt0Z+Q zi92wn+;_s3Z-*kLxQY5&)alDke(b#g^#jB$TzUDax|M5mvn-~}_EVTR`);JrfK}TY zUq|(C*k&eDo=c$(DthThd*~rvdbm!mMBHIg(VC^@;WTuu#5zcEqIcS5rFk}QLHOmj z8N{%A^r@Unoi~|6ONJREyt75x=q9I>t7?7MuYJ|Y#oaWwJgv*x>zZLH%v7%Pa_I56 z>64j2>AR)Zvqq+?Iy|l7zF2ojk-e7q36jM03g5%A0O1&t02IV5f40)3I?D|a*_^dw!?(` zn|xu@dcnxI`LHdg0S)TcWoOrZGMMkUpTMODw7Y*qOil)D9-9Cgn)Fs^hDeOJK-cq4 zxrc;GGjbrc3dEndT2T0!3PjDL&#}Zt90JIxfIo*(;AsO{UG5=!iorir?ip(c&P5p=0evmLK}mV|L$oo9dJ`4ngEEr z$bCpmD|uL!w$qo(3$O}hknb!~7kfr;F%D7wRff-O@sj8*N~KlCRd$vwF6sn+Qe~~U zpRKmP^KSpvn?0snl``~U%>%%ht+fMAw4ZpbCw;wC-lP2xp!*At8ma@9?h#L2{^R8T z%nWRQ=hJfhGoM!WFP5Bl*eC6gAAZ`eSx-Vs-~BW<5Cu4%cve{RQ@}KL0hq#no`ip$ z=b}Y2o{VgdcCdKDJ-O+pr#4~i(4tcKKGqOy93a(rZ=@h9$i{B4E>T;804W86?L>)v zg%nNUfLZ}0Q`lr4?i#MA9xnio50iV@h8y{?Jn7p6tS3XSi*kRbe7GmJTgUZLm{CS`MYAiqQ|4l~Rp6~sNa1;VZbAKn zbO9gelK!c5DodUE*{|>g408uC%y-2z|F0B}(gu##NsI#9VaP2QR5b{s&KQnuz zmlT-G$xlxP9GybW4-AcNnz!3XefvJ`xJd`bos=#B~%NHHXzmjKQDGFv_rNel(!>0}t+wA)BM zD!{-s3IblF5{oBvb|p3Yr6uT5OE6G(zB><2UUHvJvSLL2>*k_lRy=`M?>elXsi)Mhz?12EGU>l3Rl>g~RnEE)5#Ecka*N#&EWgs^HuIEby~>n5-krVv zd@po8Y&RGQ+e$)U$D$xX*+Va?t2e}76Zfut2_j;Wp{;F|PQBvmPWx5aI}EmedWn9Q zZhQC^S(Wq_A~5LnK$$Z(nS|jgyd%gqx7q zwT3K=mCFFoeC+nY@GA-el=l}=5O<{isTx+5^)2JR871IdWhu`jv(^<{YvNxZ5_Z+c4h8Mp&-zGB_ujn zn;L+6|EaQ9J3>Hr@aDq4cy%4S{0Jf|pzaeRWDozTHg%$+4zO+|0FMcMEd1DY*i9Mx zBG^y#Qaed6_G9$=opO$MMf?C<`x|+%j%Eok(Fw|{%PecsZ=LIhtqaSGHvs!xBGmk7 z$3c3l;hx{?pXcch?H&frAs@0BKah{Zk+RFk<>5kiFVz ztPQ4OkL`f0;}DA<>=KF!(?LN#k(;jlM4&&CJ9UR6Y5%kpbeEo){w~k}$&DporZ<&A z&wW1uDEs8>R3Dpa0x%sEYCgw?o4V|Vo?r5P=2{Ia@BJN@1!vR$#%0{cxJ>CO@rdhB z1LyjA;C=MRrZ>D;>9=YBKGi<-rZ4V25=}Xl8rg}O%Eq(2*qm#R#mhRH$JiD#!pxK!@^*CI7u4NON z%h>6x2u2@Z#3lP5$sAbYHq;Xs4fGe|QpNLLOEbcwm}G4~lc_sAfb4f4#!Uf{g9DMX zZ&7>dCQW!-NmUX_3J5-=4K{Xt=PNMrC3>(<^P{Nmx76hU`Yp<% zxU<-z(EJVszj$N7@#c?29hkdORs5`q4hFfQVVx%%e0<|HH07^)LfE8HBz@V7Sf&NbG`QK>( zs7`1>1yrMZz%Ro*(pVaE0i_>Hy;(R!oeTSX6{V~`JYz27vT}5}f z3s~lhilL9_DToG(cI`!#&l(jMha@7<_B47&=<6)8DT?g=&JHvFHQ(tcpW+W@Q^rQGzc=;8c4jii zAY>O~*tF^()G@sIu7f|NWf=?Y?hAf_oO=U9QZp%M=-lb}_ahq|JvBp|M+NJ4_PysT z!&S;FHVGmt9G4dbHXc6ebc*}Ddbga9Pl>Dva81~Y*Gs%0m&U*!jUli^R+5g4)I7QE zp5&w$i#LyroE;gb5!21?QyIMTYMVs2uISnKsLHO7`vdQV+N*)fhl z-TNxZ&ekp%CiE>C`JTh?-cnbT1z~mDq+e;w((j3V*Qitr^aPF)*y4qIWJ)iQsJaH# z0bXVWySgw+!R{SrSRD!2Z*@usHeL-GO(}#=;DYU0Xq?csQTp}AyXMFTF-hN69|+zP znhhJnWqsi*Llx^P_JES5=-8Vw4quc`ci3EFlqnBs_LIzZtUA~HEg6SDLAn(nkky6N zL9-NncP>mB$&A8U%9kSO2)jiuV1;7E<6}Qn3uucTzBJA8b!Cfepc9+I2|!l{72Cf+ z^M7J0KMn?-l1BX>+zaJi6_v1*h%h&WuC3u#ZS$()AZ?vR?@wQ%95~QAx`FpqCjwx% zPM&e(9^VQ+QUd5VLrr=R2)_PSBX?$nF7x#CUdZ^ajI-fB7BJ4oSrNdaf9wXNJlPa} zxCv`4&C0D!JBV!KnzchPR&yVsbD|(?Pk|r%d&STx9W~_@))Arfk+zc`gT?3fiml{> z#L^A|_};vLuZPXUw@yFWgia}T+ZdJf^Izpm(MrKb!>L8PP<#!6c~701dyi-_X}cYm zn-ZR@?^;v}as*O_Bh#fwHM?jZ{^|t$OY}jhQSxul2g<)hAAZpkf3^4Jw@ljnn*THj z|92z!xIz3^*YdO52KUV~_fZgYkPX^wm7cpIDcQYkOxf$a%N26%F;B8hRincmp5^B! zDdj}zEbrgAzSBKukcJ7{AubqDg2^NcKingQ)lmShN7DlH9I3g2Gfks;-nR*%kfaO)!7lKuS&(0V!b!-@T3i`sW%4{lVtN1EurRG>k(y>Ucz=r&au#Bpxmc zt3oI6f#5)LVQbA`JvjcU13=l z=~~$K&D^~nnug7oby}L}iD;&CLn*K=#aKAl#InQif6NJ}St z8h}OH+eZtUK?b~+u_yKH$J3kKLRVN;Dp&8(9@7qs4f`bv1MClCES^JnKQ`QtR+{AP zNczycb$E+bo`g?XiN>8;M{S52>}S%19;!Eu4(XBg5Y)j^SrW6zR>q8?j7-Q>BfJTi zc4E^xiK5*Y>TQ$kzU17De~5$aC^A(+yKwdVjEsz1u8wwUNo+(~-KofYT@T0u?9L$b zozd!H(arZxI2!8sOj_o~oouUCc(d;fHr-cNX8h9nEF1H(CO? zYs=gMxc6uVQ^@O7(8I?QD9GoWs(h*w#R6Wkji`1)`aPT_`W=$^eYI_j-*YsU>T@n{|=a_tutENGN!fY8bR$}Vs`djnyvGlcHR+V{+#2|4jd=- z;_eSDtH$&%8;sE?pNPLZW!0CCV0Nsg?C{81;@a6McO25;|1>RBz=F9rP9R3+_Swq`@-M$8E$NmE_l0G0Fz4p4=ERJW6wqssxzC09MBjh? zuq8j%(HV!c?e$lzoY~??g!mm1I6Hr+z~?$f@Q}j#_E=b{BnYq0uIigsE0^2^d`l5>#4gUU>`}kuS~JKKZO-XXd$5 zcFd`xAf-0fl*WCTp4FvGoUd$`p;zjD>Wd!V>D<~(_+%$uGc>QQ>!~X*r5Ux;HYqiJ zt8RUkcj$E3xN4Ny6G$r~%V>+pmjT^&Xb0(|A{tK``j&9BTr3vvo1Tq@nRO-v#Hl3HJKa`hOvlufo9i?r2W%G8H z@|)L8$Y*T%6UL6(G3V$ksjV&EJlO)__DDsVA{jnw0eSQb+ZZOAEFU=!YojYmt2nXZ znN)EZj6~lhqcu-&tlhrks+$(1fIFtNJg7mYr8^Ca7lq+=I0fB9YjH)qbm_R~Po0$+ zV-osJlxPNiZpa}K`9YJzzM_1`Ga+|fPr=2Bh8gdI(FeBE5;UicrhU|l(sa|`=H+5m zT#2n3%PV})qULNH@Upk^vmWQGf?ltKzVFrx{D<{Yt7Z+KhGxSCL|`BVtb`jWCoZfNW!3kQbL})=-%~bP2>@hk{iUUz@gbcd>}EgB(Xij*WyGX^ksTLPVwEvG+`b&e`JweJnZx*7 zZ|poLIUD8=o^s*0OWxEdXb{)ChR)o4aquqk{nXO!PyJ5wo(3++NesBw^7}+Wg0S7X%W9Q71BR5 zfC%**KLCkC`)q0yL#8nqSrz3AJdKX80wh-q@|Z@MR0i`ox3S796{V3F(jgByi_Z=5 zE=4sx?5}#G=%oBqzPAjQOF{4RFsAQ$9Bq)W)asa-4@-A;aCejS(^qNaKbz5nE?SVT zYelUYZEEG-nrn7`5Q*OBlh}8^*z2;;b5^i5InY$u{v#6)-xC*CrmtFDUz$Z|_8r<3 z?-jGen!Xk6Ee^TZ;0x`Hf z-M!<896fqfyQ=qa3RU_<>1hB^T+Sr>bH`=@DdVpQoMdmk%Z#O zxouO7sfAl<>UgiNy;FXGeNU&)`tqL}e5D-F>tLC;0{MV_O~_Es&VtAsgtW)k-4`K4 zqoHMOBf*x=_cK3>ekA6IrfnAbLNy>4bwfsurx?=W)Jxo7Xyg23HA$7kK!3P_f7px( z5B;@ryDV#>j81F+@(izml+9iB3F6$-#Uj`1GbSFcHAolT zcq->yNyPA=osb7xX%;3tyXu@CHLO+{HD)KQ@cs$8SjS9gZFTfgq`G*k>&|+3ajvWK z=oRnsF!jxvD*;02qQ(PpLZVGAt{#xnFVV-vyxFO(vep>P!x@dAncndYA!4M3?{BwR z4k=ew7gE^EaaLbi4rbCNbCm8XefqHXft9hsUYVkf)GSNFHSt`ES!6T;)8oES;Z#$Q zjd!SttQrRRIk#qZXe=UN+i-u*4!BLVuHa#l^s`?ScG~~A!d4D;*O*u(XCe{yvpj|P zgnjp({Kc_%G7fXW9^x&Gyi?O>y+;oEXekt9l`0ZeDXCUhHR7Bo@GG~_2Q{Ke4HZtg zGFn;CO>j|>@}92~QjVs5$8WGE+hdnOqObLy?@{RLgO{H4I#p5>cU(29d#vqJW%Kjy zMw6^!>82lH{Dzvw^0XC=1u?N}cGednc^6W(K%h?k z?=dZZ$}{{A4NH)7{YwhapG2war1C(dG0*?F3;O*!H7gCs76c>$ZW6%u_W@qqRiHzb z@XGi=Xb%O6Ju-;;QVkh3cSF8rZd{(*NM&+{D&1qteL^@j6p~oe1vZQ}-w7KPxbaFY zs%hkD6inEg6tgbtGprc~th{_zpknd+rhc&&b`D36yo!0ievJaJY>odr_QFpY9caIl ze?9c$_cZ1IFRyaJN2}biz_UJly=`m1VeFOp>-VQdJxo(#dYjd-nnoDZ82fPdF)v z;)Ju{xW22Ya^Ujpn@(*_(a^}H8O9Q}cFn8Q6-kEjiyZ!?y)(`%_V%Y2sXzcF>*ze7 zoTu(e;DhO%>atRYQ9aQKrV_4ZT}tXlAzNEu4_+PT5rf+T28U1s*3*0;%=0W>=F;o( zmQwYCU8%ByeF88Yzi@f5mEjikuUu|WcXvmHy-Nc=LEaCa7d$31oO!VC6@Hf(wjB~8 zQ-(Y;zzQ~2$VaZf@-D;Bz#-Q158l0V`Tv@#fJA{_p#f&5tGlzDra^0E2+l_0lIHDR zoD)Ssdd#z->%l7j(S0%2mZ$m2gxU>aCbw37!s##&Dj^0Zd|< z>UocTmn$CZ^T^Ff`h5%2Y|<{WY*>jflZ+#jQ2|M!Pu+7HXhG+3gd+WV-27Nv01_WC z*`$@=#-RlQrqi<+T&V@3l#d6bw?KM_z986S3=YGq7R-1BRPF+NH?9r51*Pu~&*qvi z{s$F`XT9+Y<=$0vwo{Y@I=Tn80iqUG$9g;7w4Dmt-cMO2=Yvo1ch&L}At)44kPpBe z$ITC2$bx;TwnMU%z(AYe)&C>?J5J5t`zJyLTaUB&on)Fyy_{tV=L)PxYR=ACnzFky z(Gf95NIBtu3lKZBQhM(pPFB8l@EN43I}M0=vM1!Sgm&^cXu;N>TVg(E9Zthr_Dd~% z5qmzu{`!|q0^0&E@v_EU_oQXNk8>*&^!q|fQI0b=jUgku)t4ncf;or(*bJ%GVR7ee=3`Kw^4rM^D?|}@XrQTwn_9)`)aZCR$}EG zXjEaVU~ld<>`OTe$r65|^diYg+PjTzpjj-H9`K@1GzucR4R%s?{9w)#H+pziEx^!* z_Wqr5{YI_LlhpdH_=8$j({x=?L=+3!lbpZK0DpSMqQFpInp=!*2Y%6QeY`VjUwnxe zF^ILVeN|d_Lp+c`K{8vD4(YbjSf%OKj6(5u9B8MHhaK$a6M7KZ| zh#JVKEW>J^lU;ny8SYAp0`|BjCIZqJRKK71XUYP(!I@-ecE*>~g5y40YCMTIL0hlzek)%gi zb0=FX{s*mvo7E@KDh*u#=RAG{8|CplWy8On6YQL=g6>M+|K3aJdb!g2>l#)oJJXz{ z{+$RZJG>5Dt;uWIym)gEm1jB^<>x?jLG7>7C{HFm|H-K}e-~aG0pR06-XSpBz_!$% zc99)zA!7hr$o?!geJp*H%x8VSKgkz9tyT^k9U#T?bGZ!%^-DTFc)&PU89-JmfuK}k zWf2VTLj1>X150{#0r2TF@a+wUqx)}|4wkm_1?$H*ap-^Cs>yyssW#XR&Y*ez?pL_$ z*xyqVz?C`LnD&RvRQ%HXjf-#Qb5RGpQfvL2fA%>6q z@s08vU|F$La9BbESXCc80dOaoV#gwI!S(&1Ac=4=qqwR6a<{xxpD@iL`HHT>DmK9KDkFBnOJ@s+0iF-*Cs_ydrV;$-Pc`HUiV`Nowxki*{xyyl+Qn1@!f`V;rpA2in z%QC+d?2&Q8mq3mhSXYB7iW#Igy|?!Vs3+F;Fo^bdk6?rj{ir|OGEmnK$+kWUAAY*4 z+~*RkNXIrd_sQ(Vjufe^{SL+Z#0k=B}fm|tUpWdeSCl4$~9o+8e z9#$sTDb<}WV|K$WS;dXUVhWDU2ru-J0WvE)H^ImG+p~RZQZpr=)|2@5EV7zrI7U3@;^Mw( z)<$l7^lLK?zmjyI@hT+lV0t(dc5Oz)5og(B+N&#sE+8{gXTq-kt6X0gtyzD% zg#FTau2i#0#(4^%1|{qL=;*d(Wr^34cXxQ2eMZ^L!mbhGy_`$xKB`C5A@Na}1lcsn zNQVt+e0s$Yv!1af<9V~nX9}%EgcjB7ecYxsaTestOjV(M?Km7;YyElJO>~h3?x`vV zH|Ca;$uys`%Ng)4Iz13$n;}&75S6<7jeI?vS5VN-io|9j0UPS4Ros_nSHQJAbt#tI zu3T%HT1gCNn{NA)f|)hbx*jL8WaU=7xAs%kZGQUDn$Ip*2c)#y)5+dYm)H(63?JmJ z4cLgQiK{ttPulQ1AJkss3$?Q3ezu7*5r1{h`z8)q#cR3zeX9sGhN4VY=?q~JeHtjL$zhv%j zT5?JM$la7@AE;iI+4Tl&m~vR&tGB$tI7cd8%0aI=l{*E{1mO{JGTVQj3l!JK}$ zUgXY|oB!D@P0-<7Ye80r&OoE5)54lwWo@5zS)MRGv9`7zrKw3-O?mT`Dg!?yM#ft8 z9!{>Z6$Tqy#k-5$3mH#aIk*@ess}il3uv0Qj<@Rz*AG2LGSSEJCX4+yhY;e?oU2wy@ zA>@#7YFA~3EG#<+ng=$b{=M=~{|}VEic0rw>Yl&64D;K9j#YfNxxW5^n~C90;4#$ihXv6eYFcN$?yfzrGBfx7 zGIQtNxo@p^-+Jq}=D#{sr>f50=Tx12zPA2l-E%Ldu9o@~EG`|F%E1BO$MoSU(9)Gd6=Rd%nyg{^V!GQywkEIiR z9I;u@JY1wy8HAn*F0TNxLhX82P!_tUrrS4DB`w20GMZncF5vZww1VRqB@Tj72dRx=DrSw>PBg4ja>qacT>hGu5~3&V4*DDSDDRW^O&Lyo@!-%z?i0?lKx!>$3l;dO zvcgd+{mT>Q$}yq)h(?glW!ru}Kq}3}5XZUo&VQ=SjsJ6LYG^dwqrH^F<>-eh_;!-p z@Xx7vRna2~vu2DbueNjC$ZQH>eY5hU5&3G-UmyW5k&+4>7BuE)$!hyreLP^cHT!0; z{fVtOiEilq#a8sW4RK(ZHAXl!0|_aS(tte|5&@(Lj{m{AuaC#N-XY^iH4Tu=?xk6} z8=6Q?@v)_V6Ip)33WeTsbR1Udd?Wnk*XE@4?enjs#SSjaI~;V4(v)Q7j|G%#$_7ey zh-IpX`2r*euJg~dsiyS2p2y&f?_cLL`V>8-2&s9gBF`|R05M}A9;1W1Z?qi?MbC7h zZIUM+S4T2F0UI}l3kyUibp-zesz`Vw`Ye@$PT?)GqRhFj7aU%Pm=@Bi9=6L$nlNcl z&@yICPb58$*mw*q168 zWIZsS)&fg)!g0lHJ(!Bcg+#Vof8+;2jV1HNa~F+SPraTC&AKLd$lHk(hjdbBQxW$_ zIXQ_UrG@jJfypf)As!hl>>b(<57SZx&4!AZDutqE?j5%kw<7x|OGo0yMZ?|?LU(p- z?UxV4zuZtM^S8x|dKGo}4%Q%B^LCW?lX*0}K)lm3?iC;1)MCbA-lPpc-qfbAZ`{hi ze#5%I)nubxdyl+;`AxR6FDZZN~2KFX>)$&A>_e0RRDD*-n12`gA z@<6jjRv@I2uUMg? zc^GzWK8P-_4l%s>WTHN1Uh0f&vrYm6tTe*(d4N4qjPr{f0F$HA;wO-u6u@NBb`&PABYTEA#zsRUUoaH<6of>Y19s<5#A|BRkVEu>t0B56}=LWD4%FO8)EzJdy zYDlRY030yCV|zHl4s2QCSs*+=4*`gD<#mU~1`@xqKfL*$W`Fo^E$1{C07RYg_lwG+ zlh-3vmt=s<0PmU`V4KKGSaQwK>kaN|Uk1m|9*y_`(@T5EI5DG_yclH50sBI^&N(lICAW|ekM zvI^XGBP7(7aVuTNhjZL$0FPzEH1}U(!Q2)p0vv1wimcryaL?;?5LRvwx}ARBUI z!|vTKEDXJl3Bv6h;SXstU7w-iyk*EkYmo%??_|95nQHaAUfj6BM9R|nQ&Gd$0kax9 zZOxf0)8ZLKRIR2r81ooQbBzW#Y)jl6$i6_|^U;elSZU01C8W6YX=^$;T%VF-f6JJ} zdw23S7L5m5N=lA4*aC66m|B=%haxe*0X0O zHmP@fMv9^)y>^qihb$_KG8FaRigHnS!*Fq2uhZ^W7y87?&)cz<0i3CN9M|(g0 z*gzi0Vm-bf$3ufuiinx=*5u$ZxNn*SiRT-pmBBovPJqffhrO%MG4#^7TcG!aJs&rO zfhk1aw};kYv6E|N!Mg>i!tiiMhE8~5~l}Q5R;&D^JlReMPmA&{k65qda195qPai#@(O3C625KUwHxvvrwEFVKi(pn zEDjutAw{%uHg`yfnKn?HbGJE=w1E{#;U}7;Y!Mlx=YiXweE{TF0pv!;^2%=sB(-t8 z0(^5#a-zX8tu6Z?`lAaVEhc-WCJUxg`zJBWeYXD)t>Hgs84vISfflOj+_t#XmC1KE z2j8ZtC?q&OpKIi1An(1$wlKZ%;)NtdJ*Cva88(m(XDCfGuuzmZmiEYLhGgm~)Kpig zoG?y&OVZ3q{|>tRSyuTa6!<#~=ZzQxv9D$6-$;rr%44RGf9Ey~wdvdo^N-(a^>rw) zA?kGU!Q*@*NyJV=gXaj>|LC{RuH)AL;6OJYC86Uu%dH&14{|Ko>++adm+o}Z>%mRj zc#nkbIZ6(92*zx*HABWB(LRZgX|_+V;vUF@V?qb*NWLiTF;(ZLztJf-qeTTQEz7xt zl9+R&3(I5uN52-}l46G|(9-6W6O zmGADx!WLlcTRu#__#wpSjewo8o^ozuBJPK74OVB0$h>N6xOaGNu4nj1RY}DXTRYcY z(dFqLJ)M(C{pshemu%aK*{6B3wecg!9VKhJ1;qrEt`PoK3Jt+?`E+l8;uwhms1 z$uUfe!O@`HRL;jGn4`nh0=1y9<%W>gI7nMm4^a+W;1AvP;Gv>Rt?(@}mLK7C6SAl) zCn}AHgnPczHFNZ_r=|4PQcZtV!ryphWG8uM8hA_;UtVE?LU{cK#1~(u@2Qwm z9-L&IPu{|kj-F#u$Da7}c&9ryh(06JfL~PfRh#iXukM-`Dvw*!&N&QjC5fCn7yRTJ^g@UIVOlYW`so`hHUSADNI|jG~)4r2s!* zoVFQTGBqHp3+3M&Es+_dRCQ$|}yyo`DF@ zuj&DdVh1qQ-s%4dch{=mA7Syh_Z<0@735?=PI?BS=!86Z(;3=mmmh==NcI_MC`@s{ zOO7_cH(K~1{f8cIzm;S9y^;((>IZD~iQ@T-@0rq1WnK5X9@hwJmk=B;RBy0+Y}x0~ z^`-wtSqj(MFr)ZpfZ_mRJ^9veWWTjU9{!FxMONK^09zJtWMF0faa7blrQZHC+HJZb zKe)*P%D@n{be3*qJ{9VLL4*y)eT^o;XF>aEqfh;IOJ(rdG8w!r!*;HgFE`#%CdvuY z_5u+F{;w1P5M&qt`RRXlQ+-!t?-y!0iCB?4LGN9zqiZ=^yS-*FA=K)Z=+diQz`H5` zeZlBW(@&uHv#B>{FHmRwNA>OmW<5Y1h5^5oBflL0%>h81?+S;i-`cS{1CRwe^h6-O zlo=efoGS3ApIsSW01mK^5#JY@e)RbXR$#_PQRi1cSd0#E(r4U&MAF8^k`rBdnj6Q0zs`0Yl*qz=woPTByti2nP6H}f~u z{Ve3)Du*oKL%31^-QA06T9t_PY*hrB$v!>L%WyU4{tY~8=|fj&ql?wyYH~47+*prSgyc7 ze(P{RD|SDkU*7l$^i=9p8wT`F^UuArbvE*>3*y7?i)XG5emk`*@)bdU)%|bjp_;Di)@FY@Fo;#N{QB-n{W|!W`=wSH(tjQ#?#Z(k zTfk>^0qiF8=gQUm)^$TZ5IPji@B5bm{gN2Sv?N56O#q!$4SaiB@)dW1mq+|)ok+jO z6Y*^zApEX3bk2`WyOeKlQz?SYsZw6`mLsNju8uEP!3p5=;JViP`ADS4tAFW2ACCQg zX8O&s|J;KC;wWhvtvKu;egrQ&*6xAY6AtlDO+nb#-(X`$jUcY_i~ycyBjZbgz=lX} znA79#&ad+9$6-F`055=;gIcn`)UU6U3ym#f_?Yx;%sD&mLJpAn-usqQT6}tIfR|Z2 z0bo1GEGY~Y`Bzs%HGA6PXi6u5{S?T)0zSi4fHv07VWEDQvvd>vdDJDlgE-FomKH=! zX>VB7J}OF@l0PN64+aqhXk-56H2M9M`Rg&p>jk*}31fLjQ(o{?2DdOv z_(BG{e-wbLwPJuaFOem|`KeQo^NkPM|il?4b!KAaY1Ws_n=&4(x#(UO@o z`OAo@iz6rNu;j4=$;wqRN!S0*zyugae;xm&gMVWTL>^{b3mqWL<@C)5<>vH` z#4rkHU~9@UfuMiS$cVLkP7bUWt_3UUfmm~LfrC4I(m%*D!>oS^V2$Cg0)jNYzvH)@ z(f`UV_n*E-r}%D7OUT{Wy6zP!?rUsiRYc4M60=Dc{Sk;rT|oiH7U27I4*_yt0WWhCc%arXx+A(+kY3f$hyI3PP?&VR2}YOWP$)9i*7rdWCIJO z5Tnddf#JKgw5n@@{j}|$1Z(yU3>3;`N#Z@so@cl**SGIY2R&*4HWV z^ITm7*H@w7(#6WjXKq?)W`dDFkYPq+gWJf)k?bxH5!!f}32}@J=L6flN(ahNDHZA|J-;{Gu{Z5V2&Yv=Wbydw8^Yyj zMxJU@2WDQqf*0`f>E0AEA{%119x$Z|;P9#GCL$bpE@4ZP7WmRBY5zjmVT(9JXWBTV zNN{NT@%8sTk3LZZozUstU6dRiJiRv!q?i)H`pC&^M!zf}b)qP(hl8kAdDTfVoB;|T zM?D+fF}=0X^U&jUgd_Ae-Yu~CHAR}m6x_Z6H@K>US}?l(ODn4|%)xQx{T|#J)t3 z`-Y!Z$lE>1Qm~v?Oh_Rf+|3|Vo9}m?kDc z>BVOd#VTo}XJRbwa%V_y@p4}|l z!|xU?7_6=0U`k9-~8H?wMm0Ntp67W=moSYsgKl+ROxYsC@ZJ$agAIjW5bj*uhr& zN2_^+m0n*lwA#>Jgnu-c!t+)&P7ert$G&dPvc84XU}9Rf)P_hN?4sB6xmL)--+(=z zl30$X%G-P28gr!bQSp%vd|K^7JxD6mXde(XS|{4 zg~sql8N1u#`dK$hxp0H_{Uewt($!o#^Tt$73XOE=Ne#Ki14F*?P+I`b}m zJ)z1VRKiY+%|Mea9zRs}$n$jW&k6JuG;yDsRqLebfrtqSJ#b%oZZrR)gkJ%nhIrB^ zU#I}x%8f%+Ww4ybFuOc=mbcwQfxI*LtG{>>tW#BeDe|(=s2nAC|O^A{z}nEUWG5mM2=PwvU5nbR)3by$ViHT zf>Gud-OeVX4Uk|9ww`eHu-xN-#xLz#5`$(_jkEHz$<^`o4B&(73Cgq|lE6VS5R;d2 zknJn@y%qJUrT~+MnqceL-SFox4iAeSX@GB7IZ%Hp09%*)72!U&J@DDEuiHR z&tT?sQUE0R3wg^1W^|3#Xq_-guEleIdQ>r{*9$+^MJz<)VSMGimQ6Qo84N0J@J$*| z=7HhVFw$waI=pz(39a%JR!~obM?6wxoUtlr6aRCNI0DGKNg09B(8Hfb`Y7+*t~dSN@@l< z5?~_5h$vi*v6%>nQFXr_P!+@#bl9BKHRdwxsB=CkqR+49!oNN(nR9p}NVcLi(WePe=VQSwK0GSCCoxAwu~irk ze0jUpvV1~>nXLVW6GBN^REFM-kn_gi`889J0riZZgE^YZTy=;&BGu94iYb(D=(`Ao13 ziJs|z3?m0Q(zjK*+ z+DZx!*=wL2<_|qXDzXK3A57|$I8}zn0tp2wZhq$!CP1=A5a1GL|KUCm7o*~LG~fVu zE1ossm}IiPdmsxK6ZsSED+boY#cW-)(U&}+GAc|f33vq+$}6oAFmPZ1$Q1!#s8DYQ z@8wd}htUcucleaNwJ{c<`kDimu-sldH#XhLNve{q;bvV%-s_O?(<#Dv9h{w4xCW!x z3;0Gymmb-ZG?>)97_l$cnA6)!dDo;W^jd0%$~dUe`|)ePe^KtzORH4j!@ygfW4*B?m{$7kri z&EP@evXm`7_K}Ml zWy>5aoZkgSRVuj~Uz>k7du=y2b|Mf_fE-m}Zs`g_Vnmi>KY`4#VpTKk zy?;3}C6lcRy-YxvG36y;C$Qd|^`T@BrCH3(AgC3$3>`7bV=m$fEP<2DOL$391 z5)Kz;pC%BwpM>`1@dQtJpO*(bsKdosS2cWjVqCVn9Jk=pSz@8=YJGGQhJ_b@5kBtj z?pSBG&L>Uxq9H+`6Q)(0#u~oy_sQKrU|`nexZ;`!P02 zRWWeUs@d^|7r~^_d%enuDqh9@a0NuCL6=n5W;&Of49F(dI5~C3%gEe-t zUy(u+`f_TYJsT6N4Dt9;^pkc8{3kYP)aaqnXVV--OM>ap(|9r9Z zW)piAK;=!qd-2h`W8BSPIq~E^tD+}P68a^osEYEr*NIunhnV+P1Oq3 zKj>_&94wml#~C)pU=Nn>7@CmUon#R3JiRuc!xT2WP~|uNLtf==XC09zKpKQ_5w%00 z8*9m8{Cw3HO)!*NBCdu`9@vD#c9GUmY*ag#+t(U1a^O9qFF^~u(1=!&qFS4fDO66d z5n{&47Yb|+ZtYpyroNyIS6HYgAGj;I{M5?zzRBPoSxc~i1~xlkekZcJ{ z%+no6+~tt+^gSUWoPWp?$f8e3FmaW8qkl=>eZtSH8h3APRA9g~R0Es*DPxg*UI!e1 zIuQ=ln4RDc`ozt#RCF-9A|9a>ZVaB4ck&@ihP7BVr@rHx(sxL@*1<`UA|!(JwrcXA z1Q4WGgs;tCF6u{Fsy3up2VGnIGD+9t$1%Lcr|LsUII)IcaDO>&!ik0PaZwn7rzuSn z))F89Wri}Zs5I2TrzLwgo!>;BxpYK&wsu9z*(j^uFz9fCdARt{Bq$e19CbdrrSrDy zx@H=Paw9En7-;czT4OcQcL`4BwY+*&odHRVfih#kACm9w(Ul#?PkdcYvV&TMyGKuu zlXPn=xe2R8^{^v=W=3SLS}EJfzVlobs_k{nvtp%W?Ldo-&LGxD@blLw;_tA;j}leg z4Vri6Ndg%59}R3yv?*6sS`yf0D1=~ywLii`$EnNr=L~WR=o**fE3~Jk5ek~b&a*|Lf1KrN%j(4~vC|5^`A#ar0tC!rSC$_4mK9=)V zWyZC_9+O5azGFl0gC>BNkp5FoBgX&sHAQ8Bvm& 2.7.14 + + /projects/scid/tools/ActivTcl/ + /projects/scid/tools/ActivTcl/8.5.19 + /projects/scid/tools/ActivTcl/8.5.19/lib + /projects/scid/tools/ActivTcl/8.5.19/8.5.19 + /projects/scid/tools/ActivTcl/8.5.19/licenses + /projects/scid/tools/ActivTcl/8.5.19/MANIFEST_at8.5.txt + /projects/scid/tools/ActivTcl/8.5.19/demos + /projects/scid/tools/ActivTcl/8.5.19/include + /projects/scid/tools/ActivTcl/8.5.19/doc + /projects/scid/tools/ActivTcl/8.5.19/bin + /projects/scid/tools/ActivTcl/8.5.19/man + /projects/scid/tools/ActivTcl/8.5.19/share + /projects/scid/tools/ActivTcl/8.5.19/README-8.5-thread.txt + /projects/scid/tools/ActivTcl/8.5.19/license-at8.5-thread.terms + /projects/scid/tools/ActivTcl/current -> 8.5.19 + * SPyTest may also work fine with native Linux packages for python2, pip2, tcl and tclx + but this is not exercised well. Similarly it may work well in virtual python environment also. + +* TGen Installation + * SPyTest needs traffic generator client libraries + * Please contact Ixia and STC to get the required APIs for these libraries + * The exact files used for validating SPyTest are: + * [IxNetwork 8.42](http://downloads.ixiacom.com/support/downloads_and_updates/public/ixnetwork/IxNetworkAPI8.42.1250.2Linux64.bin.tgz) + * [STC 4.91](https://support.spirent.com/SpirentCSC/SpirentDownloadsAppPage?rid=10492) + * Once installed, create symbolic links so that folder structure looks same as given in + [SPYTEST-ROOT]/bin/tgen_folders.txt + * IxOS TCL libraries need to be installed before installing the IxNetwork libraries + * SPyTest needs IxNetwork to installed on an intermediate server/VM + * The exact file used for validating SPyTest is: [IxNetwork 8.42](http://downloads.ixiacom.com/support/downloads_and_updates/public/ixnetwork/IxNetwork8.42EA.exe) + * The IxNetwork Server IP address needs to be given in testbed file as "ix_server" + * The IxNetwork API Server needs to be launched before launching SPyTest + * For Scapy traffic generator refer to [README.testbed.Setup.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/doc/README.testbed.Setup.md) + diff --git a/spytest/Doc/intro.md b/spytest/Doc/intro.md new file mode 100755 index 0000000000..3ade0054d0 --- /dev/null +++ b/spytest/Doc/intro.md @@ -0,0 +1,525 @@ +Overview +======== + +The SPyTest is test automation for validating SONiC. It is based on **PyTest** and is developed leveraging open source Python packages available for device access/interaction and CLI output parsing. + +SPyTest constitutes of following components. + + * Framework + * TGen API + * Feature API + * Utility API + * TextFSM Templates + * Test Scripts + +Please read [ROOT] as [git-repo-clone]/spytest in this document. + +##### Framework +Please refer to [ROOT]/spytest/infra.py for list of functions. + +These functions are expected to be called from feature API and they abstract the device iterations and other common operations like below. + +* Error Pattern Detection and Result Classification +* Crash Detection and Recovery +* Power cycle operations using Remote Power Supply (RPS) + +##### TGen API + +The SPyTest uses **HLTAPI** to interface commercial traffic generators (TGen) like Ixia and Spirent. +The same API are implemented using Scapy to generate traffic in PTF environment. More details are in **Traffic Generation** section of this document. + +##### Feature API +Please refer to [ROOT]/apis/***/*.py for list of functions. + +These functions are expected to be called from test scripts and they abstract the UI and version differences. + +##### Utility API +Please refer to [ROOT]/utilities/*.py for list of functions. + +These functions provide commonly used utility functions to avoid code duplication. +As a guideline, we should avoid adding DUT specific functions in utilities. + +##### TextFSM Templates + +The SPyTest uses SONiC CLI to interface with DUTs (Devices under Test). +It uses **Netmiko** library to execute the commands on telnet/ssh connections. +The CLI output is parsed using **TextFSM** templates to convert into Python dictionary for further processing. + +The framework API applies TextFSM templates for show command output and returns the parsed output to the caller. The templates need to be added at [ROOT]/templates and update the index file in the same directory. Sample TextFSM templates are available for large number of commands in github [ntc-templates](https://github.com/networktocode/ntc-templates) + +Please refer to the [TEXTFSM](https://github.com/google/textfsm/wiki/TextFSM) document on github. + +##### Test Scripts + +Test Script (also referred as module) is a logical collection of discrete test functions, grouped together based on functionality. It contains one or more test functions each verifies one or more test cases. + +![Image](arch.jpg "icon") + +Testbed +======= +The testbed file specifies the topology information as described below and it is mandatory input to SPyTest run. + +#### Sample topology + +![Image](topo.png "icon") + +testbed file content for this topology is given below. + + version: 2.0 + services: {default: !include sonic_services.yaml} + + builds: !include sonic_builds.yaml + speeds: !include sonic_speeds.yaml + errors: !include sonic_errors.yaml + + configs: + default: !include sonic_configs.yaml + empty: {current: [], restore: []} + + devices: + DUT-01: + device_type: sonic + access: {protocol: telnet, ip: 1.2.3.4, port: 2001} + credentials: {username: admin, password: password, altpassword: YourPaSsWoRd} + properties: {config: default, build: default, services: default, speed: default} + breakout: {Ethernet0: 4x10, Ethernet8: 4x10} + rps: {model: Raritan, ip: 1.2.3.5, outlet: 10, username: admin, password: admin} + DUT-02: + device_type: sonic + access: {protocol: telnet, ip: 1.2.3.4, port: 2001} + credentials: {username: admin, password: password, altpassword: YourPaSsWoRd} + properties: {config: default, build: default, services: default, speed: default} + breakout: {} + rps: {model: Raritan, ip: 1.2.3.5, outlet: 11, username: admin, password: admin} + ixia-01: + device_type: TGEN + properties: {type: ixia, version: "8.42", ip: 1.2.3.6, ix_server: 1.2.3.7} + stc-01: + device_type: TGEN + properties: {type: stc, version: 4.91, ip: 1.2.3.8} + scapy-01: + device_type: TGEN + properties: {type: scapy, version: 1.0, ip: 1.2.3.8} + + topology: + DUT-01: + interfaces: + Ethernet64: {EndDevice: DUT-02, EndPort: Ethernet64} + Ethernet68: {EndDevice: DUT-02, EndPort: Ethernet68} + Ethernet72: {EndDevice: DUT-02, EndPort: Ethernet72} + Ethernet76: {EndDevice: DUT-02, EndPort: Ethernet76} + Ethernet48: {EndDevice: ixia-01, EndPort: 1/1} + Ethernet52: {EndDevice: ixia-01, EndPort: 1/2} + Ethernet56: {EndDevice: ixia-01, EndPort: 1/3} + Ethernet60: {EndDevice: ixia-01, EndPort: 1/4} + DUT-02: + interfaces: + Ethernet48: {EndDevice: ixia-01, EndPort: 2/1} + Ethernet52: {EndDevice: ixia-01, EndPort: 2/2} + Ethernet56: {EndDevice: ixia-01, EndPort: 2/3} + Ethernet60: {EndDevice: ixia-01, EndPort: 2/4} + +The **services** section contains details on external services like radius/tacacs. The data in each service is decided by test scripts. This helps to abstract the service details from test scripts. Refer to testbeds/sonic_services.yaml for more details + +The **builds** section contains details on build locations. +Refer to testbeds/sonic_builds.yaml for more details + +The **speeds** section contains details on speed profiles. +Refer to testbeds/sonic_speeds.yaml for more details + +The **errors** section contains details on error patterns. This is used to classify the test result when a specific pattern of errors are seen in the CLI output. +Refer to testbeds/sonic_errors.yaml for more details + +The **configs** section contains details on configuration to be applied before executing test scripts. Refer to testbeds/sonic_configs.yaml for more details. + +**Note:** The above sections can be filled in line or included from other files in testbeds folder + +The **devices** section contains list of devices, which are referred in **topology** section. +Each child of of this node represents single device, which can be DUT or TGen as denoted by **device_type** attribute. + +* **device_type** + * Type of the device + * currently supported devices [sonic, TGEN] + + The details of DUT attributes are as given below: + +* **access** + * DUT access details + * **protocol** + * DUT access protocol + * currently supported access protocols [telnet, ssh] + * **ip** + * IP address for telnet/ssh connection to DUT + * Only IPv4 address is currently supported + * **port** + * TCP port for telnet/ssh connection to DUT + +* **credentials** + * DUT access credentials + * **username** + * DUT access username + * **password** + * DUT access password + * **altpassword** + * DUT access alternative password + * This is useful if we need to change the default password on first boot + +* **properties** + * DUT properties + * **config** + * Configuration profile name to be applied before executing test scripts + * The profile details read from yaml section name matching with this name under **configs** + * Refer to testbeds/sonic_configs.yaml for more details + * **build** + * Build profile name to be applied before executing test scripts + * The profile details read from yaml section name matching with this name under **builds** + * Refer to testbeds/sonic_builds.yaml for more details + * **services** + * Services profile name to be used for external services like radius/tacacs + * The profile details read from yaml section name matching with this name under **services** + * Refer to testbeds/sonic_services.yaml for more details + * TODO: add mode details + * **speed** + * Speed profile name to be applied before executing test scripts + * The profile details read from yaml section name matching with this name under **speeds** + * Refer to testbeds/sonic_speeds.yaml for sample + +* **breakout** + * Static port breakout configuration + * This is essentially list of interface-name, breakout-mode pairs. + +* **rps** + * Remote Power Supply (PDU) details + * **model** + * RPS Model + * currently supported models [Raritan, ServerTech, Avocent] + * Telnet protocol to interface with RPS + * **ip** + * IP address of RPS + * Only IPv4 address is currently supported + * **outlet** + * RPS outlet identification + * **username** + * RPS telnet username + * **password** + * RPS telnet password + + The details of TGen attributes are as given below: + +* **properties** + * TGen properties + * **type** + * Traffic Generator Type + * Currently supported TGen types [ixia, stc, scapy] + * **version** + * Traffic Generator version + * Supported versions: + * ixia 8.42 + * stc 4.91 + * scapy 1.0 [scapy TGEN version is just a place holder and not used] + * **ip** + * Traffic Generator chassis IP address + * Only IPv4 address is currently supported + * **ix_server** + * This is only applicable for Ixia + * This is IxNetwork Server IP Address + * Only IPv4 address is currently supported + * TODO: Add section on IxNetwork + +The **topology** section gives interconnect details between DUTs as well as interconnect between each device with TGen. Each child of of this node represents a topology element and should be a DUT name from **devices** section. The interconnections are specified in **interfaces** child of each topology element. Each connected interface will have **EndDevice** and **EndPort** attributes representing the partner and its link. + + +Traffic Generation +================== + +![Image](tgen.jpg "icon") + +The SPyTest supports Ixia and Spirent third party traffic generators, which provide client libraries to talk to hardware. + +##### Ixia + +* Ixia is supported in the IxNetwork Server mode +* User should use an intermediate server to host the IxNetwork Server +* Start IxNetwork API server in the server where the IxNetwork Server is installed +* The IxNetwork Server IP address needs to be given in setup file as "ix_server" +* Currently SPyTest is verified with below IxNetwork versions + * 8.42 +* There are differences in the way to install and launch these versions. + Please consult the Ixia documentation for more details. + +##### Spirent + +* Spirent is supported in Spirent Testcenter client mode +* Currently SPyTest is verified with below Spirent versions + * 4.91 + +##### API + +All the HLTAPIs are exposed as wrapper functions in the format "tg_[HLTAPI]". There are few differences between Ixia and Spirent which are handled in these wrapper functions. As and when any new differences are identified, we should be able to add them easily in these wrapper functions. + +Users can refer to either the Ixia or Spirent HLTAPI reference guides and invoke the tg_[HLTAPI]. + +##### Scapy + +![Image](ptf.jpg "icon") + +* Scapy is supported using the Scapy service in PTF docker +* Scapy service implements tg_[HLTAPI] functions which are remotely called from SPyTest +* Currently PTF does not support connections between devices through fan-out switch. + Once this is implemented the direct connections between devices can be removed. +* The PTF docker can also be used for legacy PTF test scripts execution as the Scapy Service will not take control of the PTF ports without SPyTest connecting to it. +* Refer to **Execution Modes**/**PTF Mode** for instructions on setting up PTF environment + +![Image](scapy.gif "icon") + +* Stateless Traffic Support + * Only packet types that are exercised are in SPyTest are implemented + * For example: Ether/Dot1Q/ARP/IPv4/IPv6/UDP/TCP/ICMP/IGMP/Custom Payload + * Will add new packet types as and when required + * Various frame sizes + * Start/Stop/Enable/Disable/Modify streams + * Capture Clear/Start/Stop/Fetch + * Stats Reset/Fetch + * Increment/Decrement + * SRC/DST MAC/IPv4/TCP Ports/UDP Ports/VLAN +* Host Emulation Support + * Create/Delete Interfaces + * Assign IPv4/IPv6 Addresses + * Ping Support + * ARP support +* Protocol Emulation Support + * Currently Not supported fully + * Only basic BGP neighborship is unit tested + +Execution Modes +=============== + +The SPyTest supports executing tests in standalone environment and PTF environment. + +#### PTF Mode + +Refer to [README.testbed.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/README.testbed.md) for setting up PTF-32 or PTF-64 topology. + +#### Standalone Mode + +In standalone mode, the DUTs can be connected to each other and TGen. + +Environment - PTF Mode +============================== + +Refer to [README.testbed.Overview.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/doc/README.testbed.Overview.md) for setting up PTF environment details. + +Environment - Standalone Mode +============================== + +SPyTest currently supports Python2 and pip. The needed packages can be installed using + + [ROOT]/bin/upgrade_requirements.sh + +The below environment file need to be modified to suit to your needs + + [ROOT]/bin/env + +The traffic generator libraries are expected to be present at below location. The path can be customized using environment variable SCID_TGEN_PATH also. + + /projects/scid/tgen + +The traffic generator libraries installation should look similar to content in below file + + [ROOT]/bin/tgen_folders.txt + +*Please refer to install.md for more details on installation* + +Executing Tests +=============== + +First step is to create the testbed file with physical connection details. + +#### Running test script(s) + + [ROOT]/bin/SPyTest --testbed testbed_file.yaml \ + [ROOT]/tests/sanity/test_sanity_l2.py \ + [ROOT]/tests/sanity/test_sanity_l3.py \ + --logs-path + +#### Running tests using PyTest marker + + [ROOT]/bin/SPyTest --testbed testbed_file.yaml \ + -m community_pass + +#### Execution Results and Logs + +The results are stored in a CSV file with the date (YYYY-MM-DD) and time (HH-MM-SS) included in the file name +e.g. results_2020_04_04_15_27_result.csv + +The log messages are stored in a log file with the date (YYYY-MM-DD) and time (HH-MM-SS) included in the file name +e.g. results_2020_04_04_15_27_logs.log + +#### Command line arguments + +The following custom command line options are added to SPyTest in addition to existing PyTest [options](https://docs.pytest.org/en/latest/) + +* --testbed-file=[file path] + * testbed file path -- default: ./testbed.yaml +* --tclist-file=[file path] + * file with test function names +* --tclist-csv=[csv] + * comma separated list of test functions +* --logs-path=[logs folder path] + * logs folder -- default: [current directory] +* --email=EMAIL + * Email address(es) to send report to +* --port-defaults={breakout,speed,both,none} + * set port defaults -- default: none +* --load-image={installer,onie,none} + * Loading image before and after execution using specified method -- default: onie +* --memory-check={test,module,none} + * read memory usage default: none + * The setting determines when the logs need to be collected + * none - never + * test - at the end of test function + * module - at the end of module +* --syslog-check={emerg,alert,crit,err,warning,notice,info,debug,none} + * read syslog messages of given level at the end of every module. default: err +* --save-sairedis={none,test,module} + * Fetch the sairedis logs from DUT to logs location + * The setting determines when the logs need to be collected + * none - never + * test - at the end of test function + * module - at the end of module +* --port-init-wait=PORT_INIT_WAIT + * Wait time in seconds for ports to come up after clearing configurationn -- default: 300 +* --fetch-core-files={always,onfail,none,onerror,session,onfail-epilog,module-always,module-onfail,module-onerror} + * Fetch the core files from DUT to logs location -- default: session + * The setting determines when the core files need to be collected + * always - at the end of every test function + * onfail - at the end of every failed test function + * none - never + * onerror - at the end of every test function if it is failed with errors as given in sonic_errors.yaml + * session - at the end of entire run + * module-always - at the end of every module + * module-onfail - at the end of every module if it has at least one test function is failed + * module-onerror - at the end of every module if it has at least one test function is failed with errors as given in sonic_errors.yaml +* --get-tech-support={always,onfail,none,onerror,session,onfail-epilog,module-always,module-onfail,module-onerror} + * Get the tech-support information from DUT to logs location -- default: onfail-epilog + * The setting determines when the tech support need to be collected + * always - at the end of every test function + * onfail - at the end of every failed test function + * onfail-epilog - at the end of every failed test function before cleanup + * none - never + * onerror - at the end of every test function if it is failed with errors as given in sonic_errors.yaml + * session - at the end of entire run + * module-always - at the end of every module + * module-onfail - at the end of every module if it has at least one test function is failed + * module-onerror - at the end of every module if it has at least one test function is failed with errors as given in sonic_errors.yaml +* --tc-max-timeout=TC_MAX_TIMEOUT + * Max time that a testcase can take to execute -- default: 600 +* --module-init-max-timeout=MODULE_INIT_MAX_TIMEOUT + * Max time that a module initialization can take to execute -- default: 1200 +* --random-order={0,1} + * Enable executing tests in random order -- default: 1 +* --community-build={0,1} + * Community build support -- default: 0 + +Log Files +========= +List of logs files generated are as given below where [PREFIX] = "results_%Y_%m_%d_%H_%M_%S" + +* [PREFIX]_[DUTID]-[DUTNAME].log + * This contains per DUT log, where DUTID is D1,D2 etc and DUTNAME is as given in testbed file + * One file will be generated for each DUT in the testbed file +* [PREFIX]_logs.log + * This is consolidate log for all the entire run +* [PREFIX]_stdout.log + * This is same as [PREFIX]_logs.log except that any stdout/stderr messages from SPyTest and dependent libraries also get logged +* [PREFIX]_report.txt + * This contains final summary of run with how many tests executed, time taken, pass rate etc. +* [PREFIX]_result.csv + * This contains result of each test function executed in the run + * It also contain result, description, time taken etc. +* [PREFIX]_result.html + * This is same as [PREFIX]_result.csv in HTML table for readily viewing in browser. +* [PREFIX]_tcresult.csv + * This contains result of each test case executed in the run + * As mentioned in the beginning each test function may have one or more test cases + * It also contain result, description, time taken etc. +* [PREFIX]_tcresult.html + * This is same as [PREFIX]_tcresult.csv in HTML table for readily viewing in browser. +* [PREFIX]_result_modules.csv + * This contains result counts (number of test functions) in various categories per test module and the time taken for each module +* [PREFIX]_result_modules.html + * This is same as [PREFIX]_result_modules.csv in HTML table for readily viewing in browser. +* [PREFIX]_tcresult_components.csv + * This contains result counts (number of test cases) in various categories per test component and the time taken for each + * Please refer to [ROOT]/reporting/tcmap.csv for test cases association to components + * Example component names: Regression, NAT +* [PREFIX]_tcresult_components.html + * This is same as [PREFIX]_tcresult_components.csv in HTML table for readily viewing in browser. +* [PREFIX]_stats.txt + * This contains statistics on time spent in each CLI command and TGen operation for each module +* [PREFIX]_stats.csv + * This contains statistics on total time spent in CLI and TGen for each module +* [PREFIX]_syslog.csv + * This contains syslog messages collected on all DUTs in each test module + * Please check for --syslog-check command line option to configure the severity of messages to be collected and frequency +* [PREFIX]_syslog.html + * This is same as [PREFIX]_syslog.csv in HTML table for readily viewing in browser. +* [PREFIX]_tests_[module].log + * This is same as [PREFIX]_logs.log but per module +* [PREFIX]_tgen + * This contains TGen specific debug logs + +Internals +=============== +### Init sequence + +SPyTest performs below operations before executing the test modules. + +* Validate the testbed file specified. +* Connect to all devices and TGen specified in testbed file. +* Upgrade the software on DUTs as specified in command line arguments + or as given in testbed build profile. +* Remove all entries except for "DEVICE_METADATA" and "PORT" entries in config_db.json +* Perform static port breakout as specified in testbed file +* Configure port speeds as given in testbed speed profile +* Apply the configuration as given in the testbed config profile +* Save the resultant configuration as base configuration + +### Base Configuration +As described in **Init Sequence** section, base configuration is created during the SPyTest initialization. The framework ensures that the system is brought to base configuration before starting any test modules. + +### Module Configuration +The test modules are expected to configure the device and TGen in the module prologue and clean it up in module epilogue. Below example shows the way modules register these hooks. + + @pytest.fixture(scope="module", autouse=True) + def sanity_l2_module_hooks(request): + ########### module prologue ################# + yield + ########### module epilogue ################# + +The framework will ensure to call prologue before any test functions in the module are executed. Similarly epilogue is executed after all test functions are executed. + +### Customize Error Patterns + TODO + +### Batch Processing + TODO + +### Scheduling + TODO + +### Static Analysis + [ROOT]/bin/lint.sh script can be used to perform static analysis. + This invokes PyLint with flags to disable some of unsupported options + +### Best Practices + TODO + +### Known Issues + TODO + +### Known Limitations + TODO + +### Roadmap + TODO diff --git a/spytest/Doc/ptf.jpg b/spytest/Doc/ptf.jpg new file mode 100755 index 0000000000000000000000000000000000000000..c4d9859aa3432262b214a5bf919cbce7b66184cf GIT binary patch literal 53676 zcmeFZ2Urx_vNk*jf+9hbEJ*=D5RfE6U_>PgNY0>=K{7~Y6aptQ=;eOve-}9%fPupubGpl=5uc~_Mty+CHd^QcbE-fx44#L2|0KEkM zg3e|@)?!YkMj()^EQkdJ0$l-NV{n2l0WDnMP{qUmT?Wn=!0{3j^N;g+8>DdkKicOV z1tJg@Z~~eH4GK5@);|B9gsq{WrJaeDJ@C2D7Z9i*_-qXH6m$g}8wdOH6&xI#t5>h! z;t}KHUAu-yapNW-F*PME4K*bd6&(XR6WyKr^i)*LeE04@;N;=qp=A;f=I0V(=jP!$ z9|Ys-)vI{d@W}D;$+>P*-RAmV{&V&XM05pX7qc7_;|}N&5e6m^##tkX7P!;PfPBtH z^q+q)E@5I_#>TmF755tOg_7%_OBk4#m#{D|U&aC^j^Pe`4#Fb3eB(B!DE3W-S2%ZU zh`HW}CS9R_Qcy>t*t5;R{o2;^DlRD*IRz!-T_)yxEIhmq`S=9{pFR^4mync_R#H|` zRa4i{)Hg6RGBz*(a{;`PDX$M@qWzp(I#$f)SgF|o-hscGpMnOWI|Ma3nh zW#tu>^$m?p%`L5M-+TM|2L^|RM@DC6=jIm{mzGyncXs#o4-SuxPY~zx!T@3Z$F%;O z*>CeA0_JrI3kwqq=X_ommmGlulL+hbZBFbPq6#>#Y;NA+dVhuZNoZ0*-Bo&S#ch(; zwmrC{3_LT8JLgmTV`l%oiFy8S&Fs&K{cT>OAUt4+ULwLI0)auB2T+{4yfqiY^qS07 zDIvZrM=Lv$`ON5Ko;P%l>u$$ecPNOy%xx=Ur{9;JMoPiPr_`lP51H3*8~eZT!k9-a7AlLgoi;rr|28sVazWymmh-yrxRJl#?zbX{^eU;A*pE z7k6)I$`h4@WZT&S`|Se7to|cP)&}ql-;N^+2fVAufxa7jEQ{1R1BrGcBmB=mpYoyW ztCJFhhl^z0n`aXt`;Mr3%2M2WG41{$g_|I=VN8DC0BF*Hq`IpDavui`AgV;3HzU@a-_(*xeJDKU5v89qvga zsutAJ@+5uC%&I>!b`~R6yS@Awf%Zie5OWZRh&d1(sFs)QC5Q1T7K*W0D(-xx+Ze~5 ze_~gLQx_ON>b|fM*)$#?Vm3zrgr6AK2L%-`rSvSAtQutw`xk!1hTD`WXt(f7tK$ zmwbHrd3M5%VPl0dplq4*cp&HAo5k&p!y8jiEgtFQwlAiBAiXqc-SDqPu8m%j>ToPE@nk+T^@ftl^Qykp@vq*1bymNqcqVhuWO%aKuSAbn?XUX7n zbss6d*o=>s*5{>C&an?z#!K3F<}$$E>SUD`W&Y_yYAQaGZYzxHuY>eB>$=DmA6G9W znp3#S*sdkJBXb=@e^^0fwOv`Q+-YUj40}>S^Iww6W_M0F5xrt3pr~++*K`|LIB!UI>(BKKd;!0QpL4>m_753UO<&L%H7;0e!Q)v zdmkOzA~CWn%~hIbezdkyg7o97yiSJj&;6mHE^7aSzDwKF);a~_a|lZl+-FV(|Ap>& z9sWR7V?hyjBt|fPq~euV0UiQPT2P(hN==!iEtO)tjN9wwM2(?J6WqsBgt(HVVt81m0XcH zbUUiXKj>qJWJs+D416@$+5P#0%_?pMi9c?}5giNYKdKz~aEM5~-dcyBR#Q@8C=lIF z&oq`K)VIMqMwDh+snYIcwB1IsNdG8>v7Me!r<#Vlzt=Xq*8`Gqut#M&6K+#kq&0Qg zE?)(+kW=}lW{5|H`K~>l*26gupXNJj3_H7}+QU*CiZtyxh~e|oFq}SK=$&lo)gT6y z*O>1fzgp|&E%{QO*rM7efmqA2?g4r1i91>##UU)qnWNr4El=(ZNS%S?lTT`JOeo*R zWx5w^riz%=u{M}fmSQL~*;t7V8-%&+46uzVeck;*r4R5NDpxsYvBXdl86l~ zuW$6hnqAV;@~)>Jr8?s`AY*Yfj-WyqjF1&_B1VA zb=T0?b?Jeu?O)4C!J~X-+^-FdL*3H4F6H-fNiH$bxu{y^n3DwMIhx`n8ocOeg(ZHW z951OV@Rc{+;JqcIqqq{^c|YsnGBLD@LYdCM!LoX`V#$-|CP|g|;R9S^@D@(?f&{HL zWu{(CnXNfb!xy3*H(!(!kOj{O(4nyJ^M!DD=jV<-*Oqdi^7|~5sfx9f zVWXLoJv?p}YgxcM9k@OhXi#eKdb1~Zn$DtbE{l9=r^_t8YV-`05t_#&-ebW!@)m9E zzLl5DE!De`(PxqFD~Ilj(h7^FD+(^kAm>-)3=OX+}tV>eN zqhYL;uYQz$zp@eD?ZK@g+YQw^K<=ZXJN&*w3#Ca{#){z3sa=NX21nIRv8K*k&KO1J z1xJgpO^}Z^f%2uRBKvLjSv=0K;*`88@HTE~MctlIp82f*r5m)Axh!U?r@KO#P?Gn) z(&TpEy!uQ+LsG-iSzr)*QsiiOL;X{F>`y|TG+;}KDN zuCv0!`4XQ;79(A={+*F$$Cv+|hYLT8M1MZGo2exrfT3XY%J{pyvP#Kbe60#wu$fQka0fp#r9&@92nPlNz2 z>MnAkDq7rD$bhgvu$(=K!x)0fs2^w<{eU3vCs(9KQ3kNa+}s(ivW&*ick~m<;anY1}2rheME< z|8nNR8c4wez8h}~*bu?v2JrTgHVVr^H$MujZt_j5F6D3?1;($(_ zfj|JacHyC>T$EhS_;SjrIKbS!nsGy)fsSzia_!gQXfHf*-6b*FQC@($BLt|sGtf67 z?u(=S{v3`3nbAD}clYmEKYt1RFQNZ0tn^HaYQ|af){%{U?@*P|hpdidco|skTbf6= z4k}%*4z{Ier9Zi2dKEWsI3BaNf&*h2ksmUYWg}c26-a+ih8U z`8She?!?+m&=eYPqv- zQ1g-K*av3)$7CNDKIaAF5w)6Y6D z#4;CqJH9WKOy0bo^^G6QmqfyM(Y>lU%uwIb>Z^X5CGTKoao^%@`;JjDkMhCD0{v8EZ0{r}o2W}n}Q{a;@30(SV*g#3gegf=sT#F|=CHX;G^ zK8)<^mb24@$0Wi+B(rTqIy^7FH_-W)f0aNT=qAm@u{}8hH54`(GjKM(Yq!??JL@9|5+2^rp9>JB` zE`osnidb_1G!xqQ{|2y>Q+jMQv!VAiS~Xmy=#+2N4SL@>^JEMTwudU{Y>*^q8%h_< zN9J1ieqVmI$`VrZLqUwB)kXAImQABF1L?j3J~%l86{qI0bPhn~=48%5u(~r)@?`jM z7p&pP!zhv%AkCPQ{XqZjgOyL>w5*|bS8oZrrl}H_x^p*GnB&%g3V1Mlyi1Sj-+1ND zKJeTbNCTJ|5NPxesYV*DKzGhSxo4mu0RAt3%#jY`PTNLKE!O)nr<)Kfh=Kmz@nQ%= zgtq_Ps)gYh=m);`M=xBWADP*e-N<(bgowj%APLX=dI~L^dLGW|w)<2}ni#PX4=!`^yMO!9 z*?R_>TT`KlhMr1uqs*m?>?e8gUqbpup~&|nOcn8HW&}E*H`(uA?EgG6Rbn)fOc=OU zm~|URRU`uA>GLyCFv=JW2ZF(8Vwx%~PWnwR%tjYb#Sa}>`r4PGtT`z^_J~-zEI>5Z z&0hCuQXOl4%^@&(lmqVo)?&(+f3zUefCt-3_yTy`rr`aXs%*dvM>&pgb~AZxs>Evb zw48C!f@bDVE4wbaHfQXo^>;#u5H}UXh&wO&F@O1|N0EcoL98Rng9Gh3kfu>thUk3s z$L))n$?FSFVf^`+9tc@v>nu-RhrL`@TtQ|;3GIUD0a6W#ggF0;p(O@@n2d!GHVa>xJMH}Ebr$9Xej<4iCokL~ zBxfKn?WE;Dsk2bGS2!>OfA?BDhMgRv_v|hT#RQ1Qf2~4kNDj_XKr7SgH9@*tj^h4l zMav*HvAbsQ?iU1_73~489{+8q3!?eDoU+;viNqP&(c9ZWEiV0*jk+jxUSBcVb1{qj zC1p_KZ&DtGP&-x78>7d%e#nWm0s4GXw=-!%#(f6$aMu>EZ?x(D2fRxi@1Xrd-9S}N zOEq+pc0u=a2694NtcTe4Is=L2IgfZVi%LYQA8~1mX%aqy?O_9OBW$mR;JyvLk<4Wz1v};AmUqZMHFKNZ zMf|H(M^g5Ost9=e|8jY#@dV#;N|uGlcrK~j6cwI-(dskG`@IO{jLF+RkVlUe*C9mL zKY%R1F3R#v{W9wJuwkx%D~EXTXw&@~kZA0tee1ZM0a^8;KK6~!9g|&Fw(URJ%d(GqztSj<_&Jr zI}Y9w`S4>it#%t7=PZD2y74;d2{wk&WBFAxl)V?EE1s1wbvD zhi9OJHb^9UI%IcmUjDy@ggOV%hl%bSH%}g%quMjjLIWC3k1kGZcBsYv(?Sp^&8i{8 zQA+3$$Z|jI7R=x{@(jdq22uuQuZQ4eZC)E~2m7GpBfH^&wDSD`+Sm`iashMX&&~LV zS2;=O6+%&0%~N`Kq1V?aV+sfMiDgJtC$}t@aWbYDsPyk&3t7s#MTiDa@_GJ&T%p_8&+WMuI2pvrMf=(xoe#)ZBdV$;4X$CJCxV2bZ0G* zkSu+eMSq7X^zy2gFk1vMT_Xw%pTdd{b}9LozL9^3d_IX?X{$QAwmCVW*SctuAJ$S| zAQDQDk-b{5GI)eviq~jheDsa}QYn%*N!x_>I$cT3pqL5uY@`H1szaL4vpWf8YDgUU z%c_=qso&X{o}(Nbc3qA%Ui6p<1JtXc?RX}cQuu&_ zN%2Azf0_8l3Y%4HaU6Jgdck!wfCcg;{;CpDVX@R>I@JyGXesd#E5o0!PtrM94>jvXUfkSN@qQ+8hW#NlkO%b&dXfYB(e)D zlpD+Ih-FSL5vc!9QFoT2k*WA7bsK})J5#cV(eH)JS z)8s%oi2`BTV0CV{3G``*n-XtkvfnOY`kxU(XVIK?f6M6u=#Mjyx2dW{8O`l}9Mpq% z=t7A1U1O!$VAyge3v+VJ7r(C>%Jua-XhqTSCyKD6>d^SH|=nI!w2*zKQNfu9FvhX^~QW%B(Q$cpy6 z;2vV1&>y;U^aV23vKv&5u0i5#lXV<-ETJ~_I9dS(+z|R{Mt%`ifA&RslPmc}!37XV*Oa_R8gz;w*xHi;Y31remuKt}SH_%y+Pg!I{#gL|)5Vd*rU~vl>W@VCD4< z=IhKzCq1kToD*WFXy!9@*|j;X+On!ja6sv{^Db?}=-}VbfhK5u)?#U*IwP4A;mycb-;YR|byoR?X2cdYn!d?bElGEm ziNx@hp^9dp2x34hQkaeve(flx$7{I-t9d{dRZPWG^+{A1B5MaL;hV7#yok~=mz1nx zdccopR;4--%sm^R>pw6*W!|t*mzEZo4~h=~QRG8TjT^xGfQl`M%WtRr+R0s_%$HM; z)$0S)gEviykkaPEs9Ro1Zqy-qDYVyAb*)wJSI7m~f-m%<#nzM8H{2`TI!Rghlp<#5<4jMd z%-f^&=xrCjTag|=k_eW*$Ji9IoBpnq7zVOjDx=M#q3mVb*Q3$W6w73s_r&3fcz$J! z)=@x!*Y{T;U67cHt#Xl{To4I zNS2;s1s&x@%u~gu@lWJ{{cDDXSIS~WOv@IJW<{UcSy@}G$I>%ai;+Eg-*SJX*=4c1 zTajTk`or#XASs$eIr__DV&zk<>0?Ji*Eg6>!Io~_`%%#zHo-N0RA`41T+5py8E6O867hsBF~&8XhVGKpof>@V@5IdwF23>_W;R zk*-@8?8JhR>oWwE*KAB4fLKG^AN;g_s&A;AqK;igwV+53v)kWneGx)h^nuaUm{N63=&Ffm2&DnK?y+NKr$Sg$N#4LFHmA7%&L)+463NrD#85 z{dmCaB_u8Bo-d%#Hve{MC)+e-^s&)H|3kHiEVfJ8ku4eIEd`yX#~u?F4k!XfBdg<> zCmmhRuMM!mk7F0ZV*MA_yPw2o2{ehCJD?!X4?c>}Y3Vw7vR*Tn>a4iW5YgDN1cF8q z-|UARc-w==qM&;%0Q*ws0Qd_cC6UP2hzQO1r2)DU2}Kw_^ZM})(?>axGhNCF6|aXn z5Tg$o#nHV|YhxLcMrR;PO-MI25aL}PqD6|)UK~nOSy`7FPJ4_!m^I-(wf%f*qh0=F zE)z0cIR}*Jq@3FJp`BJ~kJ-zCls>Dp9|yryFcMTEw|2f0=J%l$2axh4L6J5Nmutpk zC_R(S?}f_K=a7Ir9N;f>HR>RyjCma%23&Z8^8o=%CH$3UQY9G&^Uvlqnljt;`p{SB zJRa`w!aoK1-+v&57eetNbN>3(s{u49e48*cJYL4qK;mYSfFQXC-!2h*;6O8B(#IJV zms4wLJV%Lr7*ECf#Xtn~Furi9f~a-vl3ZQuiKr_WZvt#>Vh^{`2PEr}k=@}f`Pwon z6BA`Jc^`|bRyHTa;@bJeY)Wv6;^$w6m)jiq;*^-G$jDqSabI*!fBByHyTtdDuit26 z+HxEQu=*YNS@|m_Zvm{Gs=?1S|MN@&Z136GVmkFbQI7nxspv}ZC}qlx12E=MK>3F5 zb;xo_o>|)kuu+n;kg>zPF9^A`e*w*xCIL=y4EBdp>^04J8)@YJ5_pI~e(p<1r*X*8 zT{OTjo&$ak#55FfC9S_5t?TA*uOP<#;ujcj!JGg5|J)iIWRU01;BbHAY5?YCi*I4p-|SkR?+_%*b;kA8yq-L`y z`!?RjIwn|=L^uvHE)KT_UArt=y!Dbe{zgiM(2iE)sGE|k@%=n!9zHT_Y)2@Hl3_gM z)khjfW6%ZzY1>Os+CAZrw0BLst@Ftf8yR|V`Klgn`X*ia;vKRE3so(X#lY}3g2LP1 zM9l0heNRr+V%5GP#r*h1sLd3fQnpZj^h{<4(U{qj%nfWAIv&cQx`VnR1|;uQRk=9e zGN-rE1@c{&e~F{$&e|(v5cl?JW`2g`HvM>g8D-^SJ|XxB4g4nM zR59Af0-ZApPXq^_(AbH&oTuQZfq=vL3&%uFG0pg@HR(|KvoVH$e}c)(3< zuLa=g>AOr|;^!qge;e^UadY8<=>D`3Uj)n#fKlLi=T4HTaVH$HNWzdZQrJk!=Xl7J zMn1P%&lb+nU)#{>Gvb)8M8kvoHpSVsDk=On73@q`37pNUk`vC|Ld#8kGZ;b&cBcDj?;0F@vP_Nb2NUuyo0i2|?e6J=NatpR8g}#)# zok2$45(wC#@2SW{ZGynU%kLC&qRZxZG+a^aVOg?3*$ZFuboTUWr2`p~2*AQ?{Pll_ zcK$a80xjj`T5Rs=QLH|J8!FuRRqd@t;CceV!6xFEoBok`KRVIl#b=;IL0|N>4B*)@ zU+e;gl$RviQ4iQ3$oHp(g#z|A*B#PqW3n?QLz32*xoZ(3Yjs~pzrXH zBKH`x4F9O*!W!_--R1|HCpNN}D-UqVG+ANraQccV*yAl#pq`pG!Ij}6g_IXq5s z9Qsv!W7Vm^cY3sIxL+;Q}+W?_x8QWN7q46rQV`McCYKjBJJ z&{+$cnqI-W{+MoCH1!aZfHkhalt^3FhI%}ZGMuIH3HF;CaT5LbljEx0zMG@M^3sV5 z?Wj0K*-f%2E?rv{bJczn(@W9W=ew|WnE8c^}6+*ZddT;x5a!lZ$;&}BPBiVwaf-0 z^3l|A59q&ic>WD#fFlHr4m^SZJ00Ut6{cq(AqPSvnKopH7;?e00wl=H7@%73NdKNI z&8=~5>RHir86O)RZ><+o)z~onNFvZh5dkt!&n_EvNYA*`;ELOMquf34M`)i&$%{x{ zhWTRd9NbO(CsJUbKs)q!uaUI&4kbxbv7N-%aU6}Cj7C?+q7t8l}ZnU82$ zKFyWiAWe$jB)>bVgOSB4h9zjT9WkDfewQIk(jZ|&Lz!NdUf}K0eeNGL!oe>%J?*n= zY68=e^exP>7|ZpkUfG?f_^o*szi}=xNMrqy?APCKf2)(LesI^0B51CVlZ}Ff9DB}F z6sLxdAyz(%^FwN!j6S14*0WLaXG4;@KJ*hvTMX1ut==8 za}@J7FQ~CMHs8#Yn&hkQi99Q~SyFIRyN#t^`#3eb?%>|cYCd#me`wOMM>ATGc;|7E zmbTWEcpoZ0PF25Dt^p5YDV`|AX3#$n*Tll|u1uWiX#aaV;^$+tZ7l*XuHbko#|o#q ze)^F$hCsuPC;*phV(@Qw|JS;YcL30pFN@3;=0J;*>K)X9_lXoI0tfAa)rUPIv4f;~ z?q6NCNC84=G4^!5-J^M(^OWMxP#XX#7YkoWNLtIy9Z6x7&n3S1CK=MqU*g_x&6jT0 zs@PAUX`V$aidXbozA7)jK^!{xFy2XdWW}PApjdv0DbAzyGm!Y!F?A$K z_{_>exOii)P@a|0;py-~f_`YI)B(Rj`1qi>pKzRX90YIs5B&hiJZrg*69{ zQSh#2ku36_(O{2O+Q9q^fA#gnX8<27`3VC#^*1a|5>}_+c;hK|A{cS{|!1y)^_%`U7jf)ZTt= zd;88Xg}`hQ_;M=oYl5qL$%R{h8`kO;s$fnVh5;f|yo`E5jr;YIBfAB&`bJf3-J zsKBSKPAJoX`0p;TJ_jpp0<&+J%zvlL2uz3C^#Z=;#B2ZCPdIvh&ebE;U_A1%iuCOy zHZl-yVD6^PM4FprnnQL${gAamJrveJpM@-s{zt#adt&Q-Lhe><$krLt*_*9#gLmd^ zXudeCLs&A%KJtvbg%;nTw?fTTdf!rK=T6L^3pUFvC1dlrg`#U<2z^dL`M|$K`Q;hGk!?CK z=cSIe(Pw`NQ+sX^DD{nrDFY|nMKv-S=~D~?wsisQ4oRoG=fNA1_IXmihYDj=TY3`@ zux^a+idc>;o2M3#588*WxDuj2u$Y%F_rr}5k;ZQ5QX`te z;*LcB{p%%ORP8r{519HPMaZ0<6$+L99FTIrbqOrBC3QRpZE9U7ivdUQla`mJ`uAJ~ zkWl^R_jq5tLR=^{$9MRA;>03hxgn*AOI+dt51?qK6V>at-_gy}_`t>MU$4B*Vu_eS zV(!Ho5#-;<$xwjQk@_*`r-T5H=R#ph5ajqM9Myo#`7M%xU4o*80l`=U>FlGaYXEtd z1{7)xeV|3KnSbv%{IoBC)5NXm#|)!Gy!`c9|7RPsLC6MpodBjfkQ<;y+%qRA`>kIa zjgKTp(BcH2fn2gy>;0JXx*q>NPI?7PfGFsFe8Fsj7}HmBgqtJLx5HiOcIzJ2li8Vc zJi!~7))_EBZM0Qk-V1hiGcliq3f+?jK6YtzDFd0ZPOfWOt}p8+uO#Ey*7IBoU$s#y zR#r!fCEa0G)_F3{lKBnOX{kAhZFzw%Wb!% z`Y-}@Z%nven=cVv|wX4=gq(hE;7N-N_s%+8h zZ$skm1#c#b3aoXyOf`MIJmmY09CL#lbA{S@YQ&U`aQm7YuT@_8tn(iRAtMt7F z+3tuIK8c%*3VV;(-c}@}#@tYqdLEa^5Mo)*5mz#xi9Ur@Hzq0%z71(cbdmj94N%KeYhKDq4M_3U|WH!(@uIvKS*zH9utmHE|Y+ zAAWuj@e^AsCjhfmLiNm4B5G(zPeyCkYD@6yCPPVK-|GSUHTNe2Go97Tn?V4j3bLtV zalJmgBIp0K#H%?(o%C(V*CC6*!9>6E&m`iBG-yUHa8W2~^@c^`(v$o2-F(!KjXq<5r! zS(cmBX#~@3vFx&uQywb}mfs~2+|llGtS&p6a&@ui#GB@2=_e#T4c@rgc%`CXr}#Nu zZ@U~bwE|}~P4DrYrALf0pN*Ggx!j)|E?$`~uP%=w-zgMeyr1(v2t&w5VO?jdzh^eZ zQa6EY1%WtORZaF|O`XC0X@9b%f3~uOeW`nf*90L!>~3tSyb-e&OzST^W;;ki@coaP zZKvM1e1*xJft3B+vTy- zLfi?;U`5~A2Nl@ZAVQu?wAg;n@oHjNYhKl$_pt$yvo@XDvmc|6ca%_PU^3vg8)~Jm)uyjSm4I zw8SO(<)ah*+bzM&d44R)cS_l2%}M~a61GxxR84#;WBSY z*nR~oBr*W{yq(92h69f0hxaQ#G!}}Y_9kkUCi4(rBjGGy6{1KT3hJk84fy^&4|024qCI!u4&27nv^#-9wUTJ43>2;Iy7Sy%C2QV@|b8#kA23H|;kffC)X z`#UwG|MN@&Feo;aXtHzQ_=4)I+hjyKRhM6koTgel$HNP9+R#K(VB@<=M@dAo!HcKu zPoIXhzW0$K6~Oegb|I8|?^E9qBUSt^20pmR36Op@YsUkf*uA4ejhZZ4V@FXBg24UmM}B{ytS$mU>$v*0a&LQV7%VnL-VRB>==dWzvAssJ9OkAZ}nk~xIccl zSjLi`meG{G(x={~E%u;+n4iDPw5e}VO>PpteI+*U`-vtz19`IzO=o^cLuWx?PNu(b zGOO&+aQilA;zY2#^qzl(v0y~LTOEE?bA^oOXB^r(Pg;iQrM@8ZJ`aW!Qp!im*VVNc zm_2&RgYxPfgk^0AEwI8FdPQ%^(2l=-tl{BX;_BqotgKelqkZz+eo3AfRT5VSMEr0} z4*Aedl9uG-_FXY-Z-m;26;PbkDKzPN00;#M36|<-5 znag^+9H#JtH8S)&=2ctxEQbVw5=e6V>#V{r&>S=3Uqz9Jh21l7r3JgKIf=dl+^KCo zWS|mcC>*kEu1<0>b}VZ?16`7MD1%1H_wV~LC)b*wn16b;^D~PNfV1iFCD1j1CHf(& zmrXf^e8)?RH~^9(qP#DTLldlK+Itcyd0UgJgE~(5m8fJZUown;)4v)dK_8DmsJQCL zD?8u8W;S?ThLsUl>=p(hcxxOaA?E_M;D3V?LtQ^E_ujLpG2l`a#yfe6Wt^v8?`5!~ z@orL(O3B{<<%$zO!ltuXeS{e5-5qdug~pspUv(2io`U;f4(|4YhkOrfHMKgnquAO* z)>BN#zfB*Y9t}?XYJ{OEDA{n{1|K-uQlHtD>+N-Mr^CE!94Gjufcpeu#e1)BDx2vQ zA-$ixaLCGpl^~W~fiR0U z_1DYmZ!AOF_7Mg@T5PU0aIrSenX}?~vi@imW^V6H}7 zj3jH8kg$DbEtpHV*R=2)}|ZShF-f`|d6qbQ}-e@WmU71iMWqcD<{dz9(NJS}mj8FO;A*NW%wBs9<`#M@~>M&RqChat;pxq{;t zdI)X4ZT)388Zc@#BL}#dG66sz{%g^Bo+<4v*D~ag$m$9s1&?4Y=4EO!Oo;!eQxPYa0Bn*0|Xiyk&dhAqseI z(*^I}$+!QT)cg&AOi+EwJeXAtE*ioccfiNfd6b>oM@WB7HI#D$V1#i6<6*}+J!qfv zEh+yHJ^re1KidlzX`w^;PkOuZ2lA(Y7N#gxTh`>B(?loQr^h|}s3#YZ&Sk6PTWIkt z_*xr$pEqv@{w=gK3`klj9SGk6*n(KFH1H7rwk;zm4`3EB6#y@%0olK)wt7+Dl|>kg zwT^A~jYI1I*t`S4W^kmG!Y^$-%~J5)sC5>tzbO<8OH@1hDIfyKQ=ke0w}AwC6~XPs zblNDaunw3y4`q*R;Pl6a6$v3CoyXhqlx@Ub=JbHpbnv3E^$4{$b$U&E`8MZ;K6f8! zAt(<7{T3&0RhhDF(reraeOp-egte#Lj`A~0e7kzh&_Vy+9#v)HL+Jftb0a4dNmBl@ zlGEE|yy93lUm|IjB=j02(0FLM+)YeKj8`$P(s+qKp7%Mggj{kFu$)vEF#uSBW%mzi zyq$nZ^FQ*=ze6Ea<6`-wR*`$L{R(2vIDNk^=$~yGin-tuX37@SR{*Wbs5?|#?CzIa zO}G3w7J{4($g2Vpv9$g9!wYB4UhupjP!YFIkAwDM$-B^H6@)h!9b3&w^dkUr2^I~0 z{Ohw7YM5O@x-Zin?jTcOVA$j1iZc+c3l6Fp!o1eKHuVjH;np3{1({sfMFLhKzpowl zD#&2_G6_kCxT{o>>S!-Lvq%hiR=BASzDAAY25e)~|4Ksb3-V2IUBV|^*b>wdB=>JN z*K46_O)k8IYaJuGK{#dh!+1I@Km1_)88#`{vk6kC;tShxU@$ zG=WEY@{Mqp6qn+aCO)#ESBvOj9ryT(8^#pFV|D>^Z#m=b0v4l25tHj*y@w+~GojIR z`}A}MHvKjl$=^30wFxCXU}J^H_M+EJaTZ1rE9etyDi5>lbDU>FW2VDMb|clLSLeT? zH?P*8S_^}+J}i#PlCUU8+>+N3gsma^2eQj318Z{Ii6^b^*NI*i&<=mchY?hJV3ysE z7q&{eF0pw>)v`wK9hgpQFVE1!hqzzssPqc&HSfVYi z{Fp*`gAPfRv8c4FqFA=1sQZP)D&0jl)TiIhWn>8|e;ErLUTx-#j9!8?CRf(PIYW8A z>*H$ZPKsH5o_2YTOP-75U|C}g z*}P|wn=lX%4+6Evx&S%3e5p8xyWw3h-pb1Z(t=>5y+Z#2q)9h;pJy4UAxio?k-)je z{k;R{-VQ5T3Y`Vi8o~F?)B=YJ-K2pcHb^#Pi~~g(Ip?rp2yO7l_`WpN9GtQG8<6?R z3i&p*%L`Oc{)F0V2yUv2bNx z_zB1ie6R-WlQK)%p%@K&9-joY{R~T{YUCU#aBXELbROwEYpy-V% zgCZ7?oJ~B+8q9*w(`TyM^e{g7+@v9~y=#8J$*AriV?y3|x1XIYG$Ju-gkq9#d)L+0 zT9`{v1|!f|3Tuh%D-gR}B6%XqDi@IzRzL7P^Mxh}KjxQ}UfA$E-aH13cO0+ILrqzy z67dvwC6t7Csfb#uVorT9_Re?lc7-rM@9e2M@Ft<5gQP>+kps6{;AKu6H2@7IILaE|zcErR z|3&v!)(E9D2_65HcVn$m-zeBx6y+Z2GYL~`I#zV6hI#teY{FJ@RFME;N`L*+>7AdQ zj4y298C<&19qOF{-Zb}g8t}JUD#GeOjc-NkufP7;CNY-;2#;vM&3+A_MUZEgTlRcWvHIg}E%XaBNR{-0RH_4DhIJs9TNT4 zQ$P(7nuPrm5G_#a0qdHXALJa%76UzSkEXa0aql7D-jTPSH@LnEH;D4?Az5ogDP^d3~YO z{4-Cg6LpD@Qb)WnrLnTKw=Z%(;hD33V!*}Z*GUSt3mV8>lYPP_uU645b4^->i9!)r zvUM`sIifyHQL;*^>OC^t6n!9qj7?=qNP|4Vs*4+6>1bm9kB+K{m~i@j+VD&vHB*?W zow?w%QX(Bxbmuo`p_+M^O+Zjd%(Od;1xLenboJhB<+Ji zxM-2GVH(n~{G|10wz560-^3-acx*G1cXz;&gV#cne^e}hR#>p0=0NlsqFo_0@szwG1JMftc)|e?$x;E zN=lc72$ho=Vs&?NN_8MsMheR|SeT!|_!O#efSm{Z5PKVoT0)iXfW`$_Ec*3{O};Fec8g%?51pmX>-1?||tW(?B!E zG@ht8S>0wAwJQ5?8IG!`j?Z*7~jY z#X+Oaa~0HP2~)l>UYYh77)KPZtLPig3GrJ393iXL+yO6_f{H_&-a2pHfhtGvlU}u; z^PGJ)ITOdQF%}}awbgWYwagF&WY#dZ*dNr3Uu{e89rCDh(6T5W+6mzNW@s+aSehlA zywxtJzA+MD{q*{yZ95$*Hu@)lELY}Pt^u!olebCa)`+g;kKK}VxLUG{_iBgUr)+y+xtrb#Ej;(f z_V}L!W{dz8bsi3iV zxzm|j`@SYjws!dsx|kKL^eenSZ?jI0Xstf9)tnG#@DcSS+@m>I7;O%=nK`Y)YHTwH zTre8G4t=Uyqyk%KD|vK}jeYLMv$@c5T7_lv{_lGN)7g&x6CODod)z{Wi4PM>!$uM% z#54zr;}R|%tq);U6F_W4_+Kr%H>tpWEUS9E{0B9$_9N>_N``_)^54h^`=0w22ZWcG zL$z5+v{)Tgr6~DSiGoxjJP8%*mL<1z$^H>>jBOGgVCf1_DiobfBT+AV?_0#)^~292 zmd8z4mm9hrCm*KW(B3Ow8!-Jq~Vs zt4rA@J25gr@AkDmV==M@rz~qjS7#IKr!`nyU}hK0E4$D3^^GD&Y4GCFtrgShey;0 zBul!Nclxx8UD!)F5NZ=;75a@0r+Mh1J;cGjrh}<;e1!oE$&428IF@%_EU&86uxRT zyfmbfa^;}{C&N{u#c_ePjt?G@1d2-wI@N)lHML*bhT&40YUsrfH?YEIs zmK(i%kQ%M+R;`bdCx57J|9`dj)p1d->)NBJph!u#N~d%QjG(l1gMfgFbaxB}AYB5| zQqqj%h{Q-YLnGZabocnZtQ||(`>eJ1Is2S%pYL0L%n#-_^LyU+edc-Y`?}-0o|a2PD2c~T&1~_(=XDuIVw%YB5mnS!gAdX2j1Rpc4H}cHG83Ruy+HF1dPA(9gULE zLsAFuF$R#{ND0V2|F#plF;q={(7h|_1C&kY5M2PI``aYgSrj#VQZ z`5&KMu^9MjAaKRI0G+fKJbNb(z_9=U>`XbN!2NV0Jn^32{0|Tx@Ze%+bhO?t-E8!@^i=G}@EFXySw@+I=x+?TS+**uKZ%dF9#hmF#4S~du&2ieIi$7wtM`*W za({P|R(upMdEI_hb=~hkLdy|TnhGqMiuLyT-pD4uuegEooobIWplnbn?^egUWvVA3h#xz(<_@Cudg@-QGc6`)|210#{91P1{IklH2nB}0eAovF!2G}mhTYy zy*iUrC4+Gdb&pLSZucTF)8FErgL{X{C2n1vXsj)7T{B++(*5;-zSrtWD65tAtm4Fa@|tH!@PUYofaj|#eo$R+dc2>zM_#P%u^CZW zA4ACawBqOH?&f!C_PCemBwZ(u)dMYMdzR(&WCdeZ@<}6ICmAX2mx3rxS-9(-ktf;% z<>Gq4H0=-;XsnDhOba@ED4>I?17x6K0u|<*+v-X=X`oBqSJ~)Y?QQL> zn9sPhk9>f1KOrxr(1?0rM9-SYrof5X9!=L^j9Hvsr;q2JTv|2%{PoSc(1+{#=1PdD* zej-v;FF179k(30S!54s3n@($u_Dg2&r&Ar~&&~v7UBLd}MG@!2&<&va3%Ld9a$Db~ zp3?RXv%|(HC~DK`H>edV>Bt_BRdKm2m$g{wPSTQg6VVvWZBd%qoSzXgk(15 z!BiXE#k@p2u3nm^;Abq|%XKNxI89z>$v3_?Btl;2!Pj@fXY zazmDxGcl*s?QoUH#x~6eBt+;{YmAZiTSP0#?5o`wp5a9`(|k1rL8IJK6TYq?8f{{8 zPccd{=;MuhA#rnQ-HzFip6b=AHY$4RG3z~e@*p&>msY}|&Njq;X0HOyYE86C7MbQ`SZ)da!KIED!8*>c%J+}L z^HfsP1i;K3s#Sg;(FJBgzRY&ZN zqwr{pa9N;KyxreAc0V^Q+SWFPqO)p36h!Xu(k%E)G zBL)L{vSs;Rt#wz(V{V6b!mO_)qX(6Wu)9k~cg`!)-Lwhr;wRVU;1WclE14ywnOdh+ zlF@#?&VDmzKa$ne%l2(hc!F_4NcxcmUILx9vnK?)TsSDCZrFZ^BI?~;+6RQ5jIWu_ zmB)Tc(u~$P?p)DI&gQ)XujFvOwq}q0ElE3Ubr}0W(i}%W9gL`E4Msk&!|6aw84-@O*GP zG`oX;-E%zObC?PuVLhZ><7u0-WNtn8^)ON=MtT0u*xcr5_i%dgENpte{@-#Qfo5@&AUuJgzbz?VR=G|a53|{A2;U7**I>P<#NxD+}6($7yCmI~Av-Y~c_m1+V3MP6XDcx#6K?V)N z`CJ+u-4>IY$ky;7UniQ+AWsFz!&Tj%zN2PDn@E~56y=-AsPSoTbf>L~&pZ@&>FaAA zBNnx&C%2p_zg-Fu$q7lGYOy#`w9n$n8hBeVQZJNNJ+_ePSzX!!)616zWgCaYo<1DPn$qbHwW`o-Y9M=Bw9uDImbOKD+GnN*@V!k zuW$$m5cYOUv}(1+SjpVqmn6^;S&RSJVV?PnpSX21jI$^%f?kjv$HR%yiATC4>tp{h9G&#Jb9) z3=qpYJjxH9ojJ4l1qDTcnvu$F#?-K|OSInTDlb8_%+W?5G}@&8c@+I7i#Zo>a9~t# z*R%|}_PZ7E5X#{Ph`=Sb)RbEBb69>rf;=-IRbjjceBg-4@lnd*Ko>K39`S56y55MW zirAXZ!^=6*-T=qX%@LAH^Wkb)*d=e=NiV`BVibm+bbvGr9Edd)?yD&I^fU+O5LudHT_AQX&^6Ogca=*L^qgq=7E8ACa73@Gs`(hiATamz19jl9JFi^AeUMMLh zdkQc-UYaM&R6_S$P$C^P@6`goW{;#U6;9*1PJruA!#&1h6V3N^My@2IMKbD38_B41hqi5_5J`Y)DEIfpnzaT41jzPKG4%cgN>A# zkNbWz%P0$Trkt|e12;&7s7u6Je}+Bzm7qfAjG_lC+##m9wwN5scO%?1yTRw4e#(nk zdsx@Su2I@D#UEZmb2a>Of(1l&JlYUJN2MBy7jZ0PmyLjh&!>BHxm^AVMn4cWAb5gV z@p<#*CX6BF3k^`fH|axBKdeFF#553JURx2?5w>huw88+^-!9m%cleGk%S)Qq7y|-n zN?8FM1lZm;wAxMXtu3tyLs=F|&Ds>+ZG=*2>r#`N6N%rBuDLBn_WXeXlMBhx6X+h< zz2TeAx7H%mqHc!tTxAJ*#%dxPkOR9Udtg4T6V_Pf6PT!iyl`MRLXO-|CWb7QFXYn z`Ep-Dto~vjXV5Bs3IkOFb!&iWc`kN^?0e`?Jdc_Ug0Nm(i%!N^gpz~<{4TR1?`7u- zq1y8g2_O+OR#Qjw*kO~Cx4g+mCkCf@gr*wwky+Yu*~o0lxr)k|r>S`#qp2P}32|M7 z(C9ik;w$R6ZE>g3&SW|t0y;*_SbuTq|3t@ts92XJ=P3edE?aaWgwbUwhD3ubN}hGs zGw(oBvH^`W-ix{I-+C{2u^%e(yfT_OD~fzqRP)ayI-s@yY3}Dgb>&C-mcBaQChlRq zL2LgEzTS7LC|zJ~(3|&SV8X!BsR8Sx+G9GpYXaN^d|9#x1$m|iKC+NX7?<2t>* zq>|1lMH@jYT_i2$qz)=XLt}sw#uQcuE&GbpY)kGJX)NZ=pd`J@<|pbjJ7|ko*ox6; z&7;w+xUSmKJu87#mM~FPTI}gR<6C(wM*6u)fC6UwYL|EM$~Bd|OIM!08PBcD)EfD= zA!cGtz~y<+L#J+?g?23`gP+OZ2&q5}koj!QjxrB2k05o9vrcGPQkm}!RuHRBK@UU6 z!e`fpLOzT|hQwwO@x`@vWQN?YaOqj4c{Qh`hJK&+(z|9Q&hTdRfYs_6lRd>E&j_4i z2!fgwh_kD017sYInPvh6^Vw#lLyR?paFfMCSor;%0MC*SZrkM34pJ{c<@KNH?e5&} zN{`b+Q;e7_jLq*Vw@P27G_cqNJUH>i)4VQ3l z&8HmKXGB1YHa1L8d!}9+-)&)&1yyT0e{YAjL|-}K<{rV`j4na$nz^i5}7$6gpzmR zD^M65lBs!++kmd*%G1Fo&h~0V80iI^fy1(jbUzi^mdyzulmsoZEN?xI9;i1WPCeD$ z5*&A|rp7)sSbU|%?4#^0bVG*`nlJj`AL*{R*a+SBS8ie9+(u{oJ%2XfS7D1lU41_K z_7c4sw4JjBltA`9wK9snOf*6{2qp%=wZHZZv#=O!?1uWYrj+zu@g@1B?Hyb;>n({> zcx51;px1N^pD~+V+d}Flr>+Y5xkw;u*xx)sZ?0`bvHJ z_9+lcHK?C|Yod8zF@Xi}MUut4B?z|`7Ju$Ux~5S&Ey7UOf@ubOJ^ENWboK>Xkckg^ zpO<6g4-h6xEqI3rINHY)0%#1O&g;Kzd}mbu^l3zx!pTw-ORqObReDeHKwBRVNUXq6 za&l(}S6>)m^D7HvrPXt&>9k7sP;=Uy;e-{60o*V^JfX_~R4srg%qMD!`Un#;TLM3k zcNrsqyii@MJ9dL@LF-pNH1l1g^!pu-vr3z^els0x z9IAHX-Q}D>QyUPVn=lxFd2H-pc~Uoz_F9&*ywSFTq*JibUe40f+DS`%Mb_J2M zFN{k7DUcJ5oiP1z0hWT>zO4E_C&z57*gXkW;4)J4bs*H*Z|`7>Y>qEo6}m=mP5j8Zmj> zTn6it2sW`2D|F3T*{8xl@|9_f3gidwjwd+zu56_FFZGNlzr4n+G)vz1p_Y1Ir$DG1lJj^@o}fLvzunR-=;_w<2)26vviN zz^`@->MlIw)Ph>^@m)REktrv}V6qriF~M(W3@Jt^XND?vuJ~xBvrU@=2R|9 z3F@;(#Y^>zZA)+f`UV65iUR$k@qheBNb{`;)YUVovz|Nz4#lF=!dMeZ^yL|^0SfRM z9RBz78l?W}6rQrJu8-6<(ry_=FqX~VA-|?ho7m*F9v@`;jjB|o@%qVuxPE&OO@-yL zQW8Cd=cwaC=A422>fj?U<6X)A(U_fhp`3Gl(Q_HW&{REz=bvL$S_R2SQ@2Sdhy-RW z5l|74+^o-n13Zzhx!teVtZv_k+p-| zJ8t*-lNk~hp1|s+#;X7yf~R~r>l-CgpR$3zhYe5}{D->qF8Y!GDUSMo&ElhfI8QfN zq&_dte1J1~FLpB)aXeaW5C$~j4Z9iwJpk7T#}n=CooSW*Q+*QCS@mwZGD@s8@DI&i8eix#D+ zVEG4IE&WmPW@K6VLjrV{Th}i~==vCLCCp@rxDZ0*%c9xX@4FY0^I<&4Y{ap6)<4W7 z85j6vL0?s4sXUBrEr05@1dOXvlUvXq5bawzpkbhF@_O4Z$aDPV<0B64qB;_7ygo@E zuf0iWw$+*@3o7%pUqwwnsj2G5J;bNoGw3b9AYV98`ZxN^_JIDf8tUpeWUcOLA7i1QP_;?GlXep%% zq(KPJ5~#l^OmN0pRJ^Ug+QINy_o74_by`>j!Nv=39=|*mXVXO@GRoacz>bi>eB-phA~l zY1J9_s9=b-)f+#-1pEZ2H5%t&KlHTbBuDnBJ}R`8j>*8TLTjkvQHLoS&ut9|2~F7mNWS|yNqIoUadh^ar9B%7>g>e!=wWMwDV?W3g}a@? zb<|z-2k!j=z^*3$5QeF%o0uO-iBSADIkT0Oc<8_$7Z@@Boff{84jfmSsNQ7`N30=$Q1-MD9__ue_Y1cd63-ri`H z6ua6emN!50w!ooBS>3R00-#Q2t{KuHet@}Ph;^~0Pz?fC<6FcKx#!&AzjppfYRGDAX&IUpKaPaG3psiYNU6X zu5o1EyT+51H*p|UqIiS`}TrJ++~sdc23n$fq5%Z$w?SJRU9WrlcNNpQT+xo_WSf1v3+ zGl8RZ;8-GU@g_P3!PUGPF?*#6Mk*p=?t^MSJv>DQ+B1Fl_*irc1I*`Uzm)8@_jy=V zzs($#*=n4)d24TpN!!xh^3)Yy$AE4YcH^CRZOy~vnVGD;Ty@E&$9{^@FMQI6ryfX! z$G=3Qu9w#@_+qa+KDwQ=W$(5A6iB7C*Ioz;FQ@&g`V#Te|B8l94j0s2^R-2ZfqtRC z>c9?^dX%8(z5Em>^P?dspqT)XWnKGlF>;aV&$D7>g&EL~WxiAB_SFgut~bu2bf<33k(kEf)5~1NcsKF` zQVl3!c6HGiiqZDR`J86rIX*FbysrV_Cgs z>QO?0AMssaDYOx)#?Lt_lnsl$>nM0QGoJ3rg|n=Df9p7-cW(a42(DZftD-61gRLic z$!tYAoPz?hCa;!tzUroX-38QpqVOCsNMl1lml|bweI4%GbqPcm(gIk8#k3M?7hg2b-)Kt{kH>p`nzE&^!r4!rG#;)qVDwyN@rs0Tec> zgn`U+JpU}!Ix;n~KArs1FR$lChVX3i!=zYx;T9SVF8B^5vyYF-Muhm>r$GD1IpqPy%KKj6jmv*lWCVIQu+O*~!C-)PqmI$Xc3ljEIBO%yWY3U`;9Lizg;^$tX`}9O>lguYOe^w z!(Fq~PcywLdUze_@ky}sT@36XuWZD6xsWKvQ)HyM*wqB1fGV?woAbC z{HOSKA%6v|zy5dBUwFs^Gd!1JRX$frCxK2XcE?YF$L}CR0iVjUmA0!?Al2Z-#P__U zgZEXX;c<(UG+?_3PqN3EHx<95nGW0gRvT`AnH3j-jPDiT;ocUGej)h6IHIXkIDWYlVxXzF=%Q{RmT{mL`Veffl8po)Jwv4kk%zzy@I z3Q5c=xiSGrAf)nSq+Um3*jYB^iSO+&?$Auto~lZ&plpV#&z<`YS!;lzv21Qd(V|-K zu)T1rRT7c_g9ZO_}YV+LG+*`+Z6SD*4u@8pIuYLeQs>u_eOt!h96p+H>ifM zTr9xc$`v&O8#}xub~A<_ujW>Cgd|4J{RkVDoI%zVx?ZcP&#&nsrJ`se)Rb;EKhQ7^ zNf!XW$`ip+dL!3|wUV_?t6b>Kc*XUtp$x(>0d={+G>M@iWHm;;va+HujxXS4oXVjF zN%NJN+tWokpXOY4Q1E;0OLeX94Nc*JiV_p+&WFU7hNe8H!Y^%>VB7@8_E@!#woBeb z`b0)l*ajz^E(-~=gJDTMO$q`1YjkQvgB-Mt+`ZI75kkpZOht;R<8{^xnDH{xV&w7e z4vrtNgT$T>H^HCm<|-ffqD|Z8v=HQkiyQm6I5meFMKh2J*3f_I(R8W?JgSIC1<@Q) zYI%OnNDrdJbvc)Z-*kVR)%|7O%Ecg6{zUl5wEzoeYY3Mo;fZ~ z1D!;$vI>9aTVmmUAHGEk%)O)bQ+Pini)-P?=u;ifKQtsQr#{5?5i2zIH4X>l`eA$m z#en;AAjL52Uh}wBBCZBD2kD#W8T3zBLh>rLVAMGhxWYBU&M9P&&%xWEPA+Frny*47rh?PE{JXf zJq}PNo;7HIV~Z>oqsBg?cAHR_+s+Q{@(Rz;LXKm08&#Ldyx01s_IVECRE49gN9d9M zzI;4$w9EI7UaQQfsFIOQKttaGTxAP{hX7}4lLumL3U9cdz2l;%hy=vHbrSjhA`yMc zjq-wQkwbi_M~+NRPJr$^J|MpBNeveN6TWF*(iupkN-N-rkqx9n7DZpVEQ-Kb6A{Z- zJdoxIJ;DH@feA;V&q5auRr?+nE*t8 zOyv3?g}W7cVns&cDo?>*M)40EPgrsZ(S^9Ar&^JXA?yirr3n4hSb{>t4LSVW#!0TC zgx5H3o*p$vZqEHcs3m>*FD@?2{4EX+^3e_vF!_u0&_R}&L~S+)-`>=xR?ur5AGfkk z9vwTKc;@PG*k`4NSmRWLrH>^f=JI&cyYf~-ectHu$`9EDM! z;QEAY;hXzAgyqJKrRm$rJKtS_w!s@|SZ|d)W6TAQdGuADg-D?>guw3sC&T@uBScVm zJ%^Wk1y~AQ@AcfwM5vchQlB;1rlCMt?v3G7_4QGYjy`b_MfgXcemBmv`Jkuo`w;HY z+rVAiHz!gPU69BHWIB=a3s;IY=Unx7)Q}7}FCEj{X zKi2S}qHuJri7qtzEGbqi!$LC^Z#j0B89SFV1oDUfuIeS~efK1MViVqixw6KOHECb9 z)O@3~U)9KAc^fTVcr#Uy3*&`+-6jQuQ$dIrr8f&ST3!TUGmT6fO za#+@hYR&X^aN*5@P6fHLRCwm)&;}P_%!qI(xT6RQe}FLd0J#0Y6NrQ^0jGJ^#${yE zKx}t+p8GrRh3hUs6~FYP7DJ|}y1|P$nE-a=0<6wP=*#3C^s}gp6kMCywSEAp;j2<; zTL&zcsj@cw+{dBk@@o<)6C1!0A;j53Kq+qni#a!IqVAKHyt{2h%e&`gK?x>=20tg& zfB>x4O@1$tw}g+#sYsfGK4gfWK|~UiNvIgBSK$V!xoZcAx~agSCupw^Vsf<2pi_q{ zR>n!-ouDXT==DBF>R{K7#V`DO-8Jq!FC<61eZ*6U*QqpxXR)i6D>m2t@;rowY5Bz3 za?wgsk4mqTh2c> zg~bpPV{BC}Q?$0fTa3rXI8OH%#jg0eOIWBrT?l*a6vy*)K}7G}lilI#wF@JJMqim4 zm4nl51`?jLG-v~+M~C!Ws^|Zb+S6Hdb#doi`Z;_5cmDt96io1}ktPTN z(F|}8r|u7c7kcWpA)9F^{3>IVCo7o$2Z(HeG3{(faM|X>_VyM(yRa!#!H4FUvKkV? z!&-+r5;$eSSdTDCa@9ATSZ}%0!c(GAvZ$K5!9wvR$cY9&L%R%!5-semO0EjqiSd?S z)_Sf@kW_DnB_r`TH9fEOZ31mN?4cUpLWGnlUD%o{$!sBD26-Yh#NQ_*eg~3I^=IkL zi?6?ekP9`YfBuLp;0kA)$X|Goe;hSc)^qWAlGV^a)7kj^bA!Bs!hon7s(Ao_v6pfIk4^qd+dSFCDTo&+ zO+hxGDKCNYZAp%k03fgfv}1Aq-nY|^Wiy|mYSK>H50{T=bb%VyH7MZft$|+oXXcO{ zQ2&`L{{gD4_DKz0Wlnr^d@XdFdjYxo0JgQG6Nfu8qp;F4WvKoRU4U}bLrvTk7D z25?r$!0m`6__^ni+LMFXC=(bL>#j;az@Ql@SuF67WLEEyw{U*Cbzn$t>-+_%$hSKC z{GBaAb#KO!K_jw>EU}(UGR5{vwMNTrn2GAUKB@=1dG~%=Z<<{tnx4Y7HUR;;R3a8} zQ92=Mk$u0rc%i^LH@^2TUX$kd4avhDQx#bba>gP%ZFBUrYTt1*+VHwesAg4M6BQbI zr;yxnp(4=HO@W;beXRlM9TD{z8&Yjc3SaaTbb|GtA1mvGldMu7-xo{ad($nSh)&hJ zuvq-yI&T>ggolk(hSPi2&NR)!Pc(jv%DK#D&c&m@`^?FEQ3{RJ-+3Uh$s*@>bLh_( z?kZ6}%~1UedD@M7;jw#RHQHH)d>orfMsH_ot7i56-VtNPEy<)cOG69Ghm=_Rm{J4E% zMMd|(SjP9|u%L0V4<*UdY>Z1++y)W}G!w?g5LPTvw1Bg{bGCv1TQSw|VXpt$=Q+bj zC*pCF@9X&m!fF1(vFdxCHZB{5qMoL8p)X=~{hsiIe=MGdobJ0XSI>kp--UfOf%yo968wOFV zRc8h6_MbQ4Z(+-SmBD47E|uYl=Maa&~E?GQ-R( z#S&^2eLKf!B$d@=W!o;How2HrUWPb_uRiaw?zXvFlS@#?-*#0aP+X^Sq1w^IQjM1x zLTaae{908~2s@UT>*L#)#o;o&%&w@QNO*Z!NA=;&w(KF2Ob9yzF=m-03YoCw=|j=F zcU9I-Q|5b>geLKT@6Hfr7S?yYc^nRF%w<@C2=dVvUzrSF!8BK+rVjz%ik>}t=G2_J zsBlX+jT@+^{dBg*-(b48dVeZg{%1mf|Jri=?Qo-QlP+-}WvWWDr+Iq*B`M8eVT9D< zJ%nOoq2ukZ1Fu|7GM9)s^&uuDOtfXy()zJ)xs3svXu{H@vz(PTrx*(dAAsi_yBdUK zXv>@pd{6fTu-uqR-z~(;_ueyFfWYUrANpg7G zsP;Ir`gVn@(Mz91Z#h*&%t9-LTCo4#EX-nS$sXyluW4;h zmrGZ$!Xm3}WWS$=RU-{5NsxNFZAX$&KTfUo&`GBf!rAxbsGS80G z*|mY-^7n7wmPYq?3Tg}37n_I`)RsHB##ewPU?T8S2>zE){FiW?Vw(%_<}x!^@a3fZQIZMM7TKMsv*v?51BMT*(A63^WyK}JTx+r zUboii!POw=3v-pmXcV~g0pxO1moVWyG^JNkElQBd`p7(WW!(}omGm6`j`z_U>8eUO zZu6W?3R@aE8E`Rldp}93{fXdC+m4;##mmiGkQs&$TUo$0g=IEHK9M^kS_$=9+7C0K4 zl@sXoSyf6?mOTYS`*!GbK~p?4xkDMKd)?5uLzp?4Z?1XX(m*uV5PXETDUz<3NUQIK z6gvg>HPE437|Sh1O5pfQ8FyA(k;<+oZDs*^q>oiFzS&164xxK<_QoOabgkqA$K_bg z;2sjKS`*;qNzdy3ZfbW4G=k@3Bm z1CWNC#1k*OPMg!X2e3hYfcD__gR1Py4Q@UK#|(TER)I=HP*$giW}ln-hPJl~EnYQp zd3q)4C)>c>9II+aC2VMeIN}JILz57ZBNCob zDCz98?C`I`kb{W_G>w{W3;v~YiYuyCbn&bB6;Di>N5~{#t9?hBsp<`%N;n&p z63G&x2oqzh#(~_PIz`G^67NYz<0-g7a}XBoOw0$eFC!D2Z)g2YNpYnm@>8O2rOWoUIcg)jF9Wx&imT(LoCTZy%r`+KpUdP$< zJSdP-+nBGw@{1)bt&h<-;;qUH&mp?SVvX$`k_W#tb6mZ<+^3=PF=Oy(@B|_5YbnGv zAEdqmZ&Pf8RMz%e?FQFcNX{N*=FFtkpNiYM3g z>WeH710n*$Biua4(W{))S`ueoOND?r`(@P7$wLH4Q+Xm{cz=L$wR%217kWR&PnaXO z=FGKoUQl{_{pvxtBjT1>bl6f1Uc7&G&)NznhE`fA`h=ZVb^ju7x{mq7cv49RGrK+w zr?dWhg3kZ&_-!aj|B3aq!{DwoTdDFDqJ6tM3Zr4Z+FV>g8s3%N?ExSXr(rMu0Wu^7 znr6854uHegh9k;n7I<@1GKl3zROL|k5MoG|Jz(?h{)v9|cA{Zo2M{-Ek%k|{)umQh z=PIMrJSGM=4YoH==NC@sQS3m7zkrgJ_!GVBIy?}*2)K)gaL6XFp9Hl=+DVouJOIeZ zhOYC2r}#IWPtn0!D)&(Ip$GCOK!1rC;^ciGa586}5aUrP^_fBh=8}SDfs5i-nOvFDx_*xFiyJq0lH_Hdm!voZ|{rQoA{Ktj)2tAt)YJ&y{qG>-q z`P@)a=jUdClE70ZbnD|0iUs8bt{lBE)VZnoyqJdUgG8bKG0T5ib$`tAUv}g_vzE0r zikqzOm|ET`zw(r|;TzQt5DQcOsuiZRm8C_VgS#zTe+hdYl*wl8QtjO4U>}*ka%$X@ z3_4`{J3 z?qGNyX%$IjvTs`~xFFalvIPI`EPnm2%j&?RYF$aE7vsQTZ=!nD2OU%hoRi!&9P1F2 z^Zp5t0R7o=oZs!rdIw(91LQDbzFv{|w_gJ2jk;$isnjlWFDmM2Zmib2cAtLwKK>9d zTJC{CyE+;}Vuc;L0G3p7oz9+tjnKjD$}H*!XjzYMH1fA+>WyCU>rQuGSm@J;PExIB z3B~L(^PxCeBS^BM8r$vXq-lX(2*GcMs?HWk;G><>U~C01^?m%^1cMT_)3geh{w$C) z{n<)gFfF}~Bq;&isTJMNd#zz;*bQN{fn962IE9_=#xwY_r)eu8EUcyjJ0rTdU9-Nl ztio>FJrLYW&o*3jCQyY2SG^xOgjeXA&&vFk$;CY|ntonPZCNY>HQr{Mdg0kAIOFq8 z*f@e)J&%WFOcsspivqotcwxbTF4mR*AU>5v@SDY4Y2X1O=kr zCW5kkjX|Z#X*628&mnGTzu7CrgidF2Yy_Jg7BJ{Q7E5$=&mZ*JoNmn#a` z7b{O<+#A#8aK&KQ4=-%AMP^cR*6gJXDGiPdjToL9lRi65>o@RfxL_#%7V`9-AqbK` zFwcJpS+?zY-5K0Du4v1Zmp6*rtdGHFGdle4ytKQ7G+RHvngD!Pohx;ytkPa+?;Hh4 zQvEJA2A^R^#s5Dr-qxINd4Xw6OpKFTM1#okAuy4~_eYYIn^b!TO3opG&x`4wOEiZh z5ZsOO+p=lagJr(|*i1_OzcZ6gK%(g6VXk?ES+^O?Xm*6eqiib*W0V~1fFq`_t^aBx z6eYR~h@m*6R5aQ7f(4YVIAxMT)B~Zage0Ao`4F1k#ngaJoZK6S%Rmt|Jm$&S;V3^q zN2Jgc^HyH4@7L4L(k#6ZJ<}xlj=spO@Y$PaoB{$mYv1~P1Yr!uEgc)XEImP{xk#HG zO&~cjRW7N+aWc&f*+@2V6fzJw(u@nH+JA-NK z!=oPX`Ydoj3H3(qi98T?=}4WPME(H1OIv;q`2a53gf!dn{NC77XG3#9o&%cQVY4p_ zDo}(FfB3?(G$6ee0of#g)IvA;Pfks#_qTu(R6+}sQBP5A4M1~ZbGM%J2{RDQK@bDs zKR^(G1vU+gbpF1cq@C29#ss1shNpgAKTt=}10iZY4GIOIpn4z`bncl&%cTzA9N!~8 z@uQJ&oVfTz7jDbbE_54xPygN_wq(FEeCph60jIuPxUHi(>Uo@%N`ozmV8X!*PjvCF zc3Z*i)PKzLpMU;8=K0({`*l4Rgskh7DdH{(y+!W~>A|#_WTN;;?g#D!nkvrkm<#Jm ztqosXVNEjZoAK>9rHP%p)%}*yBT5`dqAkyKivSjL0{^|p$az?_(4}sfmUX$)LV$;L zJ#^#*NF`YpTTB#I??=K&8r?QketQ{f$QF>Y*gF(fq=Z*2W%MKlw?7oLO3u_#yl~qWH*2OY_ShjjpL+4J z&6mYoMu>*_E)%6fnL*by;4@R!;Z=VENI&ERLtPGFKPu~|X zGJeg``h_O$jhql+s7ghc;m&fhR}~x6t<#v-2fZyT{trR;7&4k7BGAD&zMFh%bFN zHVtc(99K}7wp~~LooiVYkXDpK44lI&Z4MT5CCNOF^v66-g@dXds)afQ@}`$ZcMcJL zp)t?J4{_VeFZYnk7%?Z3e@4DSE64T?Z#FODHO@^Aryv~ht6~?-C-}S(R8Y6+83)`7 z-b0+wHeB0|^gGaT8SQ+uK?ItQT*bpEa!DX$l9rc=SW4rZO z)l4SNk~O3Nd%fVz1-pCx@p1?^Hl0;~1JorRQ}Ly{KH>QR@>`p5da=mPP}pD6jrxil z<#|9|*_}87!Aoxa*mduAQB{(hKOf6*2U!As^t0OlH^Wkt^xzW{k@UJly2G8QXl=18 z4!2j*t$jrqomdCs;rS$eOoe$!%IjlhVLc>3Zy=Qq#{ZDglFzV+`c zZ_&bs@$nxFR2l{B>`0HyO*J(4gyP1-*!Ei3igQVIB#;C?BP9vOv}C2`&c@hDy~AdF z(O)xq1>>|;EW4P%PFz_Jnvr9!4S-WV=i@H?NV4jSa;`Bq>A5gvA%mX#f_|@g@Chq? z_Bd`3fY;G;=A@{fkX2#LVosUo@z3-6%T56W%c|D57Mq!iOqfMJm7&0#;{cV;WpS{yNc0&gBferWrYJ={8`iF}H5ey9eo>eEq^jMi(z zv|<$#3E}+Kq{6IaecJ$2PrKy9mk6XFA&09{-gSh+bQ$FQ>iruR&RkvWH>W(`t-C>k z6zFsol?RqK(8ClPoG~tDoq^nFqaR|wt@iF|V!cpuowr-xkzAk%rba~=g(6AGJCk+J zJE61aNEW#Zt9kFhd>ZCYW4_9k7J8qcg9$U~3%N@Ms{}g4-7e)-L*w}TAgp~R>vup= z$r-%h_yZOFfj-Z$`rj;F#y`;KPm#_449a&yuLU0*5B>nHP8`Yr^q2GDQ9;Ewef&NT zZu$)R6yCgG=Q&HFm3pj-;>2$d7fn8Hd|8JlM!Wa7+OhA3&?(p!4Q|~R0!}nHM*db~ S&L5wh*Bt%dI3GAahW-yTYxK4N literal 0 HcmV?d00001 diff --git a/spytest/Doc/scapy.gif b/spytest/Doc/scapy.gif new file mode 100755 index 0000000000000000000000000000000000000000..0a5e1475e2e4255d8dd45c20efa2a6ee88bb4cfe GIT binary patch literal 60829 zcmV)TK(W6^Nk%w1VKf5_0`~v_|Ns90oW=je)&Ogi|K@u-l&${&00000|M#5#_q70w zwg29GA%v7Mgr)!QjNZ<3_ttxawY~rMt+mCRt;OCcN=k%;O3t-o|L@+`_y7O*@3r^; zW7hYrV*stSy#RZq0CSYy&dxfV#UWaA06L5rN=hMXbH$}vLY&@XoW<`kQhT+sH_aSrUW2LoHF&XC05azAL=3|tMrNv{d_qCKdI_6rnV==WlS~{&-wa!Ad zI+V2;@3oW=5awF75LyteTD1_AwHd`@ty*K|S`ceWQfo0G5TyW=&iA!r0Il9yT4Orz zoP?zr5bvCX<{4UhW9F0)goIiE0AnF@weMO8<~o$MO7E?N@8%ir=6mmi5E&D?-XUw0=B@7`LgomZtq4+Ut;J(P5av>I?}QNMwf9=J_Zd=i&SMbf zgr&W$*5-t32tq=oghHh$dl_Tit%Tl;V?w0}dl~ltArPF5dpdJ_5OaGvN_!AWI=ze# zy^QzPW38>`jMn!6TF&P8TITl|LPF-|y_5iZ&Qb_Mgx;-E2y3O3S^y!nQaW=|I!Zdd ztq{Gf03kX6A?C%-#Q*>y-rkf-&eqPwVf3#ijTE|K^kcYmDB`V>*({Vj%brcUw(Z-vbL-yCySMM(z=I1PPQ1ABwM51_KZv;)S~6O@R0B;KPfdA5Xr#`Sa-0t6$H)z5Dm@<9MO0rvC~^*TE1Tivon;ottgGDQBE?!fEH6 zch-sLo_h9~=bwB6>Sv&Y4r-{Nh!&dYp^7%j=%b59>ZqiUPHJhTm{O|grJH8T>8GBC z+UcmEk}7JcsGh2-sjLPnqjw%8cv?qPfUwFtGs1aAh`jdd>#x8DE9|hu7Hcepd3uLn zJso8*00sWGiGb8w%Bg#ZMWcZEAF@BhD+|b=B8Wjy6Cd2ZoBZlE3dos z##`^a_U4Q4zW4U4ufP2MD{#OB18nfX2N#U+!U{K>@Wb|!cLh8H+2O9RG96GruhJ%{1d|bIv^P>~qaP>l}2?L;o!F z(L^I{bka;O?R3#lD;;&yQ$H>B)l_3`b=F+FyzQ>um6-xZ1>_U$v?^z-HnroSEp6Ly zuNe25bF(e?-E`w^ciw#O?f2Vy1Kzjbe`7ni;f1eF_~M8&u6W~+KQ6i3kyAc--HuzX zIp&pfetF=XgZ}y9qbI((=%+&;9q%Zy&z*D7Xhk++Yl>rZ;}_j{#yP%GjZwT~8}-OXJD#zQW&B|l^9V>S z&d_qJ$OlJO0Rohf5N#n`VcO0kyh)m;l9IgScraNWPI8hwo?K5T)icWUloCCsq~s<= zxk^&D(v+`6Wh_-Go=?s)lmDH(WiD&UN?y9sm#_q;EQLwS@Ys@=xCEsxm5Iz<8nc(r z{ADzQIn7}*b6Otc$aNC;%onXKgWm$4xN;cI;UNy3;1cJy$eB)aezQ&G91|qr*-m+~ zEd=rO<~;FPPkr7qpX1CGK-VeIeg+hs9CPPB2YOC=Ce)raWavBj`A~mG)Sv`KsI}DD zP>OC;q5s5aM$*+i&Pm5Ww&W(u>)OstAit1F9YRlnNRt&;VX zW{sT6(ki*NhV`m!75}SO)e6O_f^}qajVoN=dRIs86|aBwYhUeJR=yS%u!H3)S`E8c z#2!|#k0q?>N|w|(jnF$3(`F+bYRPvR(}eTfB}FMp*E)fgn|L~{<4`u*&@!{Ns5Pw! zV{0eXmbSLDU2SS}d)wTSwynG6EO2EjT(QDdx5WLeacw(X+!mLr$L*~{l?z?wE|<5> z^{s1L8Qttoce}pq?scClTI*I9yW^#9dC`lvJK=D&>4|Aj6}b^cs?BYe6&_NPiMIWE z(>(p%(Rl*=DgcAWzr#fwf|sgb0UP*a1uhSP$Mj%1MVLGj{;z}&?BESUn8F^mFo-eC z;S1Mo!RX0wg8$cs;sKX<#3$}Bi9f928H>2aF*dQ@T3lhv!g$9r&hd}?#^VtC7{@@? zF_H1+x}%OSsmAMAW;Zg9qJEaegInjcejCWkVzC{AWEATF`wC^p+D%;Y9ew2?h+3|G5t*LHTHpuKHrcbnSZ zwsyF&Juz-=8;aZK_IA3#y>4;0o80d8PzTv%Z?g}!JUw!XdeFwI= zxoOJ(8cAd_t2sC320U(*d-&ZV9uJ*Fi zJ??hbquk|o_r24t?|J8;-|wmSz~gZ6Qa!xk=Y;rQ)2Z=XR6N=NKl#E}Uhs>5eCFMb zY`8-{g_3_e{a&z13#(~x1fr0~q8EAnUC{H42I3$=q7<7XA zl!9urf|bQ-b@zfASc4sSgR+-{Oy+_-Xo5Hxf;uRJFgPtlID{yOgf`fNL0E!&V}wb_ zggYpNQh0<_7==JMg-+OlO~`_V1BFYNg;pqrSonlKNQEY7brmOnILLys#xf)Sr)&t9 z5e$$IREI6F=Xwr!eBj}IGPQbumR}5)CxWPFq*jQNM2J$Ah=jFOlB9^X25gXsRgc(+ zk{F1KSczQah?lsDop^|u*okDtiGb*dr3i|VSc;?giLMrks`!Ph*e8gniK}>ut(c0K zh>M;!i=qgNwkV6Pn2VIyi=0MQn8bT5lz`24NC@aRm;`m+0f88yYo6wMyS8#p2z7D> zd99cZ+z5=`h+K*oj?$%0-#CuvI9256UFIl`>X?pJwT{L$Jn-0#!tEkn?De1*wn+2_p$9kqc=_$a8TYhH*3hSU35_iQ&Tl zxVI5{FaYFLP7`K`3&>!eWG&FRlBL#J4%m|Qr)n_Cl8fV#G>JKPbmz$LQz(k zlusaaYtjQxX#h(Z4Ln&(LYZJ8Pzp+U1wI*+KxvITIg~v~m3WzxdKs5)c4dHxlXn@I zd1;t_sfXm^Su2%WG6{^Gwui1Fe!>F}^raD)nUO05h$lHNfO0;RLyD3#n#9OBqzP=M z`HF>;nw7Mgj|Mrec{sD#nx`q7xA{0;R+e0ulzhNNrLdMH5d4A)O9Y=pUHy(_nO}b}@gkzdpbXAKn(7@RDhRkD05JNaawZZebta4hbhMKJYgRI8-tWNl*cZy{=r=ZHlsJ0d|uvx7-z(E^P zhkXcz0=jTzCUBRwN`u*q&XhKGX`n0guI(DHa^oR=ysJ^%$F>K!o32MAl7q8c9W@RWl8V4RQosut-T1*-xFTbJOGsY=NQ z2UxI5@Uc(1ur>;!A}X@d0;2}t0}IQrP-(LIdapU_uj9(I8>qAY3bZ{tuRBY$LF=<> zw^SIpq|Yd@2WnG<qx31NE`1KvniL>PaqoZnI-kFqFFrwADqO$rvd~37N8JtNe2qTBL zH(Q+IQLtV4mFZBhYxxAriK;VFu`kP(8_1$i39>CBxlkz#fM7XYTRd@#wweUDtxLD7 zOE+%|yRXZ-x#YUHJD@~#NY6NHCm3vR>UwJb$Z2ZZI3x6?{~4~j2Ux}kb~7hBWfr_8 zM^`n*PmMcb)=RxocT=)^W!GE1+^f0=7QU7?Ti?6ABv-!hb%7!_qDtumE=nzx3Y|ae zmz?{tYQwpRE4qZ6lwrwtr{%tE3BM}>x~01-EM0(_OxX}~ZGT;Q9& zfo8ti8$9Hj!Sr;#9E`!(+rc9&!WW!#s^x1b1i2)aeL2TBwj{8~Sbe4mrn#nj>omhd z=esx8yNVM+b}KH*7HSqX#I+~HI(cJyG)f4@Lre@&Myw}AoWw`A#QrwL$;45|M8Q!R zznzPf!qhPie4U;vE&5BTr7E3VNxA7(!NpRExSC6{U@QQVTeF|)9e*nf-D#F!Q?SA? zmGhgN7>g@%9GzP1#Nh?SRNTbqdS_Vt#E*Q%NUWDijL3-`$(OuhJtmos$Ar~-ji!mK zg@dfo2(JGcp$A13tqt`^=!Z~3x((j$$B zB|V=i{c$Y4I40fFLi*C{GFuZZT5A&xN4dt%oRRvazjR!<^?akIJ06k?2&ChBJdL7L zY0dxW?6He0ztgO$Aj&2X@SOtiw<)?eP(7-lMS-1lw>3@E1ZvYWEz>S-)+x=_d4tw# zt=8j&jPy5*!3%K4J8=1WjTmRO7a^`Nt%=}eTXSTDW^}ZElEaX7#A5qeXsMr4Y*e4* zUXtzDKn2;9-Pn?>*_ljd0Jl-L*CGlFmQ(#;GK-ynEC5{W12n2>6??6%IoePymgSt3 z{Hw88>7!s1xJtZQ0N5+|6BSMs!xK zSD+QBX{rZ{gr{NPb$0wDjTb?Uer3#aI#0k&(|t38OKVVJR-P}Mp7Y(U3@G3CP2d0d zZQqachPfSW~dY|Fp*A{`w%*}9nXVFsR$|#vF9N16@x?jL8y(;t2tajz7tG@LMOd7^(Tb{vS zzF7E-<$JagK1V4&6-MyH4b75t|7~cm0bH8pHmRHp;UoV#k-^xLsDxdO3b^ zIsKU^v+OSxP0oIxjOX1+-t5%Q!z=8Be%Q#g7r8gfCUOkv^NW>Wa}H}6oiO_0-@%)g zYT|Du)RV?*L+zX3zLtRelurOcD{J5xYd9Sf-4l?Va_^$7C{+8FC zCs977z7*}%KJ9*a?8m2#Hcs(8Z1Ebe><@3_dWNh1<$*b^$!6Nm*5~TjzN~iCLPlN@ z_T^$DZCh6~*t8`(ZS{tsNMn0l#Y9`l{Q6=#EA-3M#6u5i+fC3?4w?Utj-7oQ!CYCQ zg}x#f3nORDS;}da=z2v5kD^nb=Un-thz{rb<-Udt;;bt58_vaI`CkBg*joGQykxe^ zZTHHA_v-3hb)$zkAojW+UbM zBG9M8iHA$B9<40jr_LJspI@uGKG$@5`ih-{s()swPc0Y^`@fr}W$H-bKK6ja;KPaA ztcpD5Jm=b6R_;ES=8m28Y~Vg^;P))(P{|5t+!-YJrInn=dDF87~5rA#&03kr2 zfDq{h8VoV;pumI%4JuTakRZf|3@uKqNU)+rh!GiPyeJSM$dDa5N+ikA&eW*$;0+%JoElJ}gr^IpPlZK2*wlcajt0*? z#Uh}G(g|FuVh!?d8iysa+L? z6=qa<7U;*Mk(1uM8Pg-roRfFrB>IzcXw08A69w(_b7_eU1vvKn*mGN+3t_{aDYWNM zw;VINCEB{=s(b*9A4i^C`SK6>URF#8JETL$8+HC(_#6M}X4KbXuY}(HbUN4DRX;9Y z`!vw>qrbDCY`$mE_NnQ|R}a^sZkpk1>r`pI+M+9or;Oft?>_^%OAVvl6ojxg1P^Km zsfVPCaHs*z(lA2?$E(k!`}h-)q7Zi)@j(<##LvAH<@1fg+0au^H}a0cQ6$&8lC4G= zU;A%I3=`A|o&lCaGD+lC0f91-BJyvw9MSqtIQ?S7uEo}XBT2F@=Ub4=&7Pc0Owyh! zQzq&9BEFqudQ% zSKU*wT5G2rC|nz_t+?ES$xU}uj$Pe2 zFfv%3j~mCid5iLlN_|HSRNY>y>`~EHjc!)aW`lh4Y4xJURo0iJ=8Wo_p}e|k1{K13 zMIuecQL+a$Q#rnkbVJKllX$i6+&kHA5Vg!%QzgnVYIQ9PSrX$Ft;E9o%&2KK!+yK# zt^e!@ajP5mnsJC6=Nj_GAtk!N@Mb!R?XDs7b8uDPjdM|j)%|s6osDF9K^|RJ8E&ab zoVQq{P?9d~$?0~OcF`BLcc$NyK5=;7iVwAS6^*~HK7(j>eP}!pWVq<)Plx$u(EPm8 zGR;qh&%2>rR@kAvRjnRXxNdDpGtZR@_xKh?jy3q*6I5RK4BsVQ(ig#G%-9n8Pr>u#9IKqZmD;AS$)Zi@NF{-8SR4^r7ov z8H$~TQink$je}S(n;XK|B_A(7Z$m4zW6%!yy`g|GGVS3HbRKyxN7CqGl^j^D1~th` zZqg$*l;i0jxSh_d$3g^*m)9J1tEJTqD6nDJcnWr_iGhS#7D^@1On0*;TF+kJIhQ0Y zsmb*Xvwfb-U-`Isbl_>76( znIKGdNlTd-@N7}y*cBTFzk~@Wn7lMyjLxSaeOixV&BWe4_o=3X6%9K)%qIm!Q^z>r ziI9&%;gi^x#Us9vpZucYAxQ$g4uXzRyL^k`s;JB0+)<$ZJgEy)3euGZ)Nc69XCQUw zLE5zPad#`ww|+>g)6I0ABpk>d$GIFmG|7q(i;^K42Z4CFj1@!vCesmRNL{Ps$oCVCqyh z0&K|dVE8oep=^=myDC@z6N}ibGWM`o?9q@eJig za%IbX58;qmI-|i9cwajes9f0LfhO>Y(Gx3Y(FdOI>83j8+bH<72|-vLQ*L|ZbHoBUsJf;fxWH4q3(22>|jE0+^} zPm+~v5hgL1s34FIU9C*aE^3KXfU0AF`5DD1Q}Dw)jZK=b)k-~2i_do!Vx9pV(a4Dp z!ZF)2ub`!35fANz>N<_yJo-h7(n_Ld>QHY(TWBG2aFw?c#c$EMf9G+A=CDEC`BY=Hv`3A#{|uJAhDR3UB0iQ7HhHZ`Rf+6RJm)N-3=I_H^JvX16~(X87&rSxD&_Xm5( zp0cymJvyELXwj>@9xK3+#;aVr8r0fBH1k zYWrWy9964800LqsrvUlzz*^sk9ic3{fiwm;nR@8x!lBgF=H0ZFF$%Gm^E}g#Dsc>D zE@XBMDZ)c;oW!Vm&L5kmokN~&mow(+pGQ&6N2T{2rF7>j+^FBaYHH|%?5&1lpHxL7yt}L=;{KF*DThwn}EbBO^_1M zV)QEa^}>6dG@tLqeGSy*wUhZwnl-YW&+O1uPS|^a40B2>Q)d+2U6HZK;+U=>wGYY8 z_>olqT}h?wl_r~-xU%uNibvm7x(d}>iMB)KJL6mp#-_=Y)YGJ-;n*L zfAwc${yjVt11Nysk7EN6z(dlNQ*ZTL-Y>M@+|%-5o=?kL5?w!MhSzUBP*-pCuaAwj znbQ_1f*#b+9esJ0`9icqK{xJb38j;|@6slSnx7M*4WqjmOHnHVB$cHxHxd%M*HJmo z;t(pEzz9sFwmKF5YnQ=8LH|P&{JRvKLYz$rw0ToB7(p*UNfd*-JH8^p5o>^aPzCWj zjs^gMcyOu^^K#(?D6wJahB2%vnYq$Snxrge%O!2}^GanyO!8v>X zy$nMW_1!}}>`#yLe!sF0@ks}1y zI;)$Lz|C?l0(2LUutK`pqWFp=D6BF$>a*i|oe#OX60DrP5g7bp9ID%$-HW4JWUfTh zyF=@zjB~Ae`9JqSCA0anv#I}Lo)3X~s-F`v zT3g7-qZh<$E~9C>D$+0*i^U0yFs}nKjEt5r1Uk41MVA@=$^p}b7D>GI2}Bu&m)z1n#!@4~jinw?VdAG(|v8luLfQj_oFJ9ik%zsm={ z%L2UkHaR4wc8(F_yfj^8mGtbsXjGEYH^BX(OHII#^<^TZ*;l*!Anr+Uv&axukvK_0}; zHw-~{ipL_0MBD$mljvfrj`Nt7T#>L*I^iS)1^`I0?9JrJ03#eZzN?1@fPs1-(Gm^O zdME%Ag@F`p0238a6@^h4XwiBY(Gs1}8Z}WGZGagKQ64=}7$wmb9Z?}o(IBN!6xC5A zEm9*r(iff47ERG5J<=TgQ7S!A9u?9O9a12*(J3|2E!|QtHBuba(h~*KA7xP(P16>A zQY1A}A2rb;9nm7aQ9568KVg1q> z&DBpm(m_2_91T<~Wm0O5R1#%?Mx|0umD4!wQ7iR_-y8sV$b}764h;Z+Pl$)#1c3n< zIe-L6fA9x<#aChD*M0>^VH>&M%!hyl*n0I>dsWzk{a1qx*n0H`gssBEx(7Y*omdshDBH-vDbu!S%w|id|26k zE!chC*ogJmdhOYT1zC-S*_gdonWfmBP1=)f+J|*njm=n@-C3x$Sc0`!sy$hJ)meuf z*_8hUSdkUmjg8rm-P(IS$g5@8r`=bZJ=(9`*_S2Qp4HcoE!lxZSar2oyX{()6-P^LI+N$N(x@}v0H4>jS+_BZ!!p&KL%!js>*p>ZI zlyzHmtyzp6+sIv7eBD>jHQJ+<+z$nT2Ka=2aM#<#-Q3mP-R0fh_1)hE-ryD9;U(VU zHQwVz-sDx@8QN-sqLy>80N4wchKEUf#ss?d9I?^>c0oHQ(FK z1tC1&^=04oMPK!G-}v=i3IKsmkl*{o-{i;zf3@HI_22)!-~H9u{srIyHZuVpfFZav z;05+x0iIX|X5a{x;OI?X(}my(#$f&BSoqE04qo0|NLK}L0|h8p0`}k&M&aBA;d~Hb z5*`2(PT>V!;TA687`EZx72ppDfF1q-fV|)v2I3%Aog8)p9`<1&{@?T+xgrjL9!_8+ zhT>CPVI3af02tvKj^Zqi;{9D$CU%1>eqk+k-x>bm9R_0<7UMJq76DcOE4E@LcH$pa z<2jyS3W(SbDB~SA;wPqK@l|3FU}8OP;v??=<3c_T5MJUo7Gx_{fR9CELxyDe-QOh^ zVGe zHsew@V_4SZYX;}$wc{n8=2j+UZHJ;)=xN5~b`ItNZt0pnMTP#*a;{=#u4G20X`3F) zN&Wyu2IhtyT>$_f#Xg1?}=4(!t>*la)yjC5Y1_2%3 z>tOEbosMh3c4onrUN-jPl!j`IF6vxvY{$0gaR%hc*5jx?=dMQTf(2{LMvh*tXs#|} zy3S*|{%X>;-i6+4YJOuyUS`GaW+%RE*G6cWQ>!bE9uz5an@#6mh6!3YUu{!(avt+ zU11g;>JO0bp|Zo+nIzgF+$4sZt- zZUp!5;$3K+?c0y_WtIMHt`=dTwQ!Wh*W8})RMzZ=hUxP@?5!T?!)|ToMr89|WDQ4e z&#vt7X8-RI*WDF}fED2J9_Mi?7`f%v>ISb@D)8|mUjcZ<@oRQ#fi~)b25H%jZJv(s z{&sBG#%Cd~Z(w$E>8^4SU-ES>>~aQgGak8i4fBmQ?FQF!(B5!!9`N;cZOu+?ttN8= z_inzH@lpEwoRH~0V$uz^2EXuWo0+y3t9uJM>AYD1Usu;%SNmh?u? z=O{;S)K=!^R&)v{=X-|n=|1yNmuJn^@1X{Pw3hEZFK3JPZdYIR{KnTC=jiQ5@j91j z83*s{_H$J4UGiIS#fETB9=Svp_Gc#S54Ujs21qAI?JmD(eSY>2_gpInYDTwbnI7~_ zAOCPDALTD!_Tb&$=!WzmzvxphcVpghBtP<=k#z=V=71gm27vb?&w}t>YVwx!6Q}EH zzwii|jVFOExcH9;0T$qRjBojRXn}|Se$My)w?~J3 zK!4At{D+5p;WxRtA9AS%hyV)QxCu~jV8MI^05ELm@FB#A5+_ouXz?P(j2bsmq~H$( zh=KtO5*#^D;K7t5SBhk*a^=aBBuy4{qafb^RUCKn?CJ9-(4azj+5joirvE_&Nmq_^ z^8tj7o#^sxX!wFe1^^OTmZTr>j0jy;Y zs72cqE#J6_M687?moR`12T(AStF~<0p^_(4u53ADfdNXH5|mjIX-RD2sy+1x*5>YW%z>X`LJW}iq~%ZKY#)HR6sr_ z(I%2i%CU44X#zU>5g1(A}8`Bzbs484)ZJxIQw<8*4RcORoy!3SPL>!B4T0NiOo zWkaea78wA0Nr!5nwc7enW;79mU`qzhbY_FM3OiIT{uHR-g)GInYn?Hvgb=UHmISSA zW#XjMu-X4=Yf*|v-L|Wk1i2{Hr05)SPkatV8Y87u!AZH z;i)!l35 zl@+@aX^^kpcER=Ny>?Z%DbY@1g_dDG7dDn$-U%!ok&(82`D9@7MrXsgWsUj4Y#w`H z*?0eHoYb6hTD)v-%gts`$e_z^oXMywmvU3^APOC)TQ&q3b&S$%kU=Fkpk?05ONUm0KTGPm%dgGUU!AcQm*LAaTY zK^O!Sn?UfJ5e{I4ZNkY3Q3y89xu$a%gJ5S$_!}9@FduCyAr56IvJdhwLPo*Uug>R& zm0iwmQ8Ho=lV}{Oeau;~@f-*qro<9OUpfOdI1(vN)8Wn~Xz?X4L;; z7Lqr`HA>`dF#OmMp#r*1eXmMvydz~aXFBG~D*BPq#-knf2YGSC(r+noaHOQje zc#9++gyo+KKtHSL=0mHT&1N+AIb5Eyc27i^%0Q4PVltC~OtjD!$u`Vpp6HngawIf` z=&>?Ik(!%K<@xT^lnHvXoR`yMoiMaIo0QU=0tzH;f~h}@%`%v`bmt#y=PLuX5R&-( z=g7!sH76SCAngQbaVn`SP@-*vVKd1=5&A(JxJftiR3|wz5*-~COfJ)bRgPhF5BM;nu5&TJ$|LOu$IY^CV6eCOJ zilwqAG|5fs4&ZA@x`Z~;i)>{IXk{Zh)oD9h9`dYzwM<}n13`YKb3-0vL(NJDV3jU9<_&Oc0Y@T&=e_LNO|_9_n?gnT$!r?bu&qII)?a6C! z1FD%9_VRTLf^2vzqrr7n>|~fU?@@G`-pdV&Oxfg&Wib-c=~QQF5*Z$MEJ)I!sWGb7 z(NFR8g|q|Phbot1)O@3c)k2mIqQH$XPo6m@v1-U$Rl^!r|1mtW;eIPLplRicHZr`( zrOI)7tE2NcCEx+~?uho13kOWQxCQ^qF?7VQ0H8Ny989APU-PL1} zoaAlQSFB`hrBE+hE=M(2U2yp}+mT%Z`a)S9NR3)m=&=`utxC^Iz-yAWulDD$1RB^e^1zxw{Xi{OGE&zDuoqyRa z$3-h#^%(Q7?*LiRwIYx>w+T1&zRW*k)-wwZV8P}c)jce@>pxq{v2YT1bM!p2sijuC zlz_-UK_wT2VTMF6j|I#V$!k#=Olf8$_&u5R@2IU5|Lj{~714!qO<^PQT)4pc71OAa z!0dKXz#+8TV=HTi-@P0Jg?QVyqG)Scx3HQZ*CNAHH{NQrw|iMyBGQEwTB>8N>(aF< zcEiec9|wR(%?rBr&gFQUyc_nix>S=wBCJ2KTu_r-U7cNNU?XV04)OZDQocB#Cd(8j zt|Y$~xpS$3cL4u5TOyDtFu>YETJ*>&2g}{m;g*Lt7{eS;&x~e;zGml1MGDquNsG}S z-sQi>cIGYDtxeJf?4}z`t*uje98*m8wL7Hic3SMz8O37IR%$$aGmnc88%Wh{oR&pq zPD|>|_5fu77iU}n6@FOW6@i2bSv33#46pXVl24h51TCM361`ES$MrF!`VsYL^sXD( zG1pzZFCmf1VUygG@+Hy$09G)<*_%*BIW_VJ5J-Cw{)P&!H}!hy4DLL2yipK#h}A8H zDI^h&*(e_`L+Odr+vg_HzkkpQN=QHXy^s66+OQb8Fa9=x!1vdGv)fn1&)P<$=X594 zT2Qc_Nn1SEarXL!&^}wTT1@c&=P%9*w8;lF8Zf<`J#o_jnBV^;Ss<;F1DaQv#GOPW zA47mc3+PN%EL%g^Rq5eV%JGr(6`-M6-vS0m_U#gdZCEIo*tFn;0+yhNO&AMW(z|J1 z!NnB^%#}mf2=XWo_Y9n%ot|zv%Mvh(2BZKD%9e$}powJPv8d9*z?Ytg$O|%Igr!`# zWTEF2AHf|TzXS%mjTuAe+zoml^sQ5DFaZFBKvS@T24unQ^-t?rp@TSKp%Gfk#mSi@ zARPu-Y<##p&vDr%w-zJ#ve&ToC<>B+{Gsi6k!gV^lb zuW(^30-z)|7L$$19#-Kn-d6CrqFD9c%Ml{<#m#}hK|{PDo`l`1MdLDBh8=dy_r-(_ zDPuQAR{BZPK%rvO3DozE4U`oj0AOPfp$!R^BWgI|0Com10aKfhqdjt6#-$FFt&%{_ z;R1f3hpCer+`|#>nnkJOKT1aS1!O3}gb4X#M0S-mHInMxq7`b`ioihzcz}nfAui!{2-_7RlDVbJ2v7-s38W3fK8}j2s{)k27oXwr7Hf$B&H4( zqUCF?7eQv@MZqMJ$weI43$$$sX0-rTnx4WC((S-wLu5fsrleaA$3*^AG9IQmo?f%m zk}uWUeUL)lfL4{rIYQ3Szg$qOpawv z7rSWS^R-tgcA`A#+ao>%&JElP5C*%Q#&0g*H!8qIo~GXAB5xHMY=kEA1;(=FRdsn5 z)!`aIeHAA@CDeuELG@h31!rk&S3eME2|WcSA}3l6S)(~wxOqi&5Kdl3NsP?hPBmbv zUCcJUktd4V4Q3X2UPfQRpc9@adTL}&qF(+H8dv$(U6hoY8BOoZn4oQ@7-bbSG1dxo zXTf>Y&Upe0fIzGPApqRLbZ}2T#M&Xm01L<;o^4=>{@KoXq3Z=8Mi!`M4#ZsgT27ti zYMGQ?WrY}aAycNH|HNfu38G>3%^3eK7{N?jb9L8T=nYcET306$m9O8yTg5MSlKxkvGDd9Mw}GdX7exalZCZY;-ktjB&V$c8M&#)CKMTE>vhellVDnU5Q6 zYaG51v+}0Da_YSdPIJK~x$=$8{zVIf>ML1n#Re_K3N6qMt6AFlo^*}^%CF#uHhao;wG-*E-vFXuH!x~zm27IR zoRLK(&)@magfZF6ySP>aH&9wyx{GF6<6bK2R<;WsK{9 z?(Yc{%ehY3S{b2W|1J!Q5bUllYw}v^q~P!-uktQ0^EPknHpOpn?JxbEAjND6>RRacIvxv#>!Z~M-#{MK*$-mk^vFZ}8+ z{o?Qb{%`;Cul)+J{{}DsCkReR@72uUH32W%b(=1h<@R!=IyvwHme4T)p73oj%L*kg zl^Bz;pa+le?U1le5}65GA_|u<3!iWbudoZZunNC04a+bL&#(>Ga14`m`;Ynb`K$No@@AOF&Bq% z7mM*1moXZX|FIe)%Sj~g59ee%cI0keZ)%#VawM@Mft6tnBL-I=VfEbu0`eaVl4Bud z>ZzLZCbA+gG9#ZZ_4e@WzHtb9CT@o9wrOsB38Wf&gFrf>^`_zzv(6Di<_wCm9=2Th z;2-{xZ#ce$_>ONYpQ_iwa`?vbD9!RK=Q1wuaz*NLFY~fPGDYC81At4_iC9+&lR-$fBuLhGP;TEp&b@R%3bG*)5IG;1iqBA(BGdr)d zJDc-B=Aj$-a5It5FO7+QdT-`B>M?2Ieclc$rR+{D8_xA5ZyK~Yg>w3qae~TOLjEP4 z?e70X|6e0lv_)U^@{$S7xp6(mnbkyLhBk4%R_^YO^0MxhC4&~8{tjOTT0~6iPU-C= zSMNZAV}{w65Ad0lzU;Ko=k zUTg3M?{!|kRA1kT24pkGA5l7*!CG$EKNKU`xY}?KpZ^T$b*b97vCv@4JmI5L$185_Z-pU)c0;BqA z6I&B^XSSn)6qJVCmo}O<|HQR=gByGIvwJ&bdp{6!!#91Ww|&pIe&;uTueI2{)G*hx zn$n!S(d8$4bVb@~Sk0q*VfL1+Sa+mHRa6S-`B`XdB`IDMSVy(Wp*B8uEBH|(5FPd? zgLfygc0Uj5Bv0S!z{!cTIEtrsajQ6uyZDNCB2EP~a~$J5M{-AVbA~_aDT=h%!m&1U zGiy8JQ1%2tN2rvj1<$C(a#^@`4*+vvmcKiN`JGX;-xre*D(_&N7AR?P( zo)dHYJ~X>G_y6U}Dc`vph-5CjAs}WRWDoSAo$0IpFvKuL3axqCSe^0Hhj*tmDa z3^w0QGcBF;7-#$$ukprfycvId8hd=mcf7Pr@5ex4O~^VdB^Ipf54fZ>@I9fs%f?S? zK?bn@A;w#bpBt)PqA3qLI>P@`C^Oc{KlAEku@c&JZ&5IDm2&Nl_|q%+(tkJAPrcO} z{nbl7)^EMmcYV}LvDaE?fLC>?+E>S1cCZKAGMCLF20)I1rGjTt+GjSJ0vwzobU8u# z!go-`Q=dPZIh!|`F#^$IJ?bM1KH?v~;wx+7GydXxYvV)ynxhXRiA`|@)KjZ00G%#wDXZBC-_df3b zK9>7_@B_c3ViU@^_1ArpNG!H+!)u_wlySqHhTUO793m1f;!VkA>XV&l`*!vwVGvjU zddt&v-=Em0C$~zds?v|N6&2{qwkynqoFC{I@_LBKELwG;)TJhwCS5vK zD%GoB!IF)7b}QSpYR$q;3pFm?xmoX`g{s$WU4dW`^1YidDPV<8g#uN)nDIoBXtApO zYOrBr%a<`{w)hyTRjqEMPz)hh>`l$5QK!C~FP}leN&mJjc~JJ>nIv7uo(h=%tL=fi zy8;J095-yjSdSZChZZ{h=%!|E4gNWjm+D^ zR4VDqZ8(=;%58}aTxirRfZKI+GD%50-B5`ogjC7}fGiR^mMSNWs*|dF0KlasYfW0UzaORJ5~Xsx zGuKuiBfB-k2z3Q3u~{=c5<%EBL{mz7`Gqe%=ZnXbl6*erSW4Bt({8d4dmoGp0rlQAOH^Jyff9KRt*8wd7zTQ z8~^eiNXb5XC2d9u+q&yjy9x`_D-3_l@UR%G!qMyj?Zxh56cum}r_*8gDfO^^czmnxl43e9IQ4Mx{~a%-pm)~l2D5^6Q2ro8i%>Rd>Ikpgrl zKm}f;RLXH)r=mv^SiKG;yz?CZ(2<7?KqLVzAYMh@@WBNnB6$lO657~8H%l4pb|Yg^ z^RmL16)`AS9Wu$D_!OqlyhAzG3LIE)2qZ7aW2a6p=s+Pymq^ z#Bq!Qx{_|FgQlQOia2ij*wO#AR2JcU>1Wg{+HhbdsY1G82LMRbUxa}K!db1c63 zM z(P%oz=B1B2l3SAbMic*lz=>c#fl8k!r{xBe=|^Q6V3M_fCOxb)vP&MLYIH7y(?jKk zmZ(&uaAp#~mHy;kV}h7(D5KG_#_mWGq>Wd*S|5YN>Lbd`;07Nu!sP97gBV~z4qk8o zX!>=eYdu>&!X(%5B&&#BylCY#*PF?tO*eIW3tf(qH_Lhw15_cH`NlU8Y=P@rfhyls zLgTIUeQuSw!rcC91X(F=6M~Oz7rPo%A{ogGp@x-R8RZJLiXqdt3gZlnBx_rYW+$UN z;i&M0SHcncZn}ZZV>6*y+{^rttHq2CdsJ5@;Y>6}W_4cD)YY^~!OwFji4&{^zy-MZJ1TZ zq@;>?z&qY_opXElkh@ZZuoxuX9H|KaWmb4G*L2rZ@s~dnRWxiSa$sk`Swt2JY9c+m zhdZ8x5Df#sP9`c@5FZlcfvif1MB!->DZ0k{)lXLwdsTmE%eAsN(FZIK*iN3d2y@pyG%#Jl$jEE|u25 zDx0%_RwHDZ6zi((IPJR*WYmA5cHr=$HE88!uvE9H#+o9Uvi`)K8^r2ojWn=QklrO- zRjuZc0Ehq6plviU!3g3Dj+i12Xu&iAFv9Dq*v_%e32U-L&Zi(3!OZcT=i~;Z zb<@s?b}TxW0?WqwY#O$8yEe8lvdnjzDz&?3Q2#EL+7~ip7LAROgP?}H@mD+`8w@0_%BOg3$fTe}-zi*cIDFFkPf(lyA6Wtx$QbW~LX z>hazHnB%k-mQ^uM@`ybw;zvug#)kGkRF_BL3=F2qJJy~#*IY;c%Jkbn%j~MQ1lAkv zGp{MayM!O_014;0NAdvz_l-{Ijj}dgoL#*T(NH>*A+v4;L+e0R@wUX&H>AiSP{XJQ zM1#L^QM{tpb=X`zdeEaSU>yAPh@=i+tH)F0Yw8_)U z;P1X%!G(tT{+^d{0B`Jnjn4eZ&kIQAUaaK%lOUxlX5c}mcbIPtJ;i6Pc!@;VYOklT z?5otk37cwig!Ht%anG+>S+u;*o+9hwB}Ht4Q#p`X4nwt^LTQzptoV zn`J2J2|WF`?<%;Ot3J`ejQitGzOtS9ckrjg_k5|%@M~Y=`{DmDEAH0XRe1km zCv)&G|9WoL?BiUN#2Wp{M#L2fM6jxWyIZ=1$x zSpEsquxy`Xsz$gj2*-^FaZv8og;J&sA>d8+QO=_*;K+S<1uMY`K0AJ9+w#16g zs`h9@1OspI&Mnh!Bk|C1fM!j&wj$`htxV4F5>IF-P9*>BlrOP_kek5CvYHRaw9K}= zDp+pJ3D@oWV5?YE5!x(kxk?fA!YOUiPV2^LoFeP7%8=}c&n9S2)iMAVaA5_Su^Cq& z8ly1_;9&s95BUme09HX7xe*$jaRn5?9YFCi=8Xx3?5Bh<7y)k_RRhpiskWNNVh*hY zJEk^jFApCt(elyTMok|nuAvHybi(iQB#crLB}OK(0M7~n$?Nm>aVjA3!88B>R)AqZ zU;r3~JT&qmdqe@N&Ne_GBnMz5g(B+6BkJOT9mz2a9nJUNkbj!O5>-+@(57~B>v?>U zv0O1-)~Lq%Z3Tg_MSLU{6RhoM#}vUW+{W)hjOPE^MD0Z4PCMMr0HN~ySR@PmYz2e@ z1Qg%{y7DW%@&i^VZnz>WTgNOFKqRSPCKU>}jKle;F0s~b9V-HO3d^scD|m(tc=EE~ z*m3_f4kl;m9l=X`EN|kZO4B9<1{chQ>X6;eQg7v6Lmm~05Dl>pCn+&s z5DJMRBEzgA%Lq~s@Bj}JNL&wr24F4OvTHJN!bZ>8AP)F+LYnr>ulzEwc8!@j$S*;0 z_|9k**U+(Itjn;joS5$|Kd1Ym52l=L7ng`18FNkCMaFQ;^!9Kkr7&N-soWwEmZDQ3 zuu|1lfIJ4%7?&+=)=ns3Gd3B+=a8>GlPmu_je;&GVvk565r&O7i{~K#6aYF(;67^4 zhO;#?&LOF1X&Nz=Qf`(E40{yJ?;0sJ-B9TG=R0Z>5Z#c_FjRY@N)We-pW==JB{V}F zC9IP1-n6oezHZe1(DOj$HRV&GRFE}UPBH`X!*-&Bk_R0MZa@W;1wIJr`0RqlarkzT zM7VGMxa|9+O}4z~ws_GTYtianjK&V+YP6}%40Akl6FphuC^!!(S5OaK zGd}ZlF?CE;PY17k|~+S#)48< zK5-+24Ors{FY{7K`4YMcYa9XdGYc>e$LQ6jo<-GZ&GfF49)%RWUe4SM$)A_VM;WBCztZ z>GsP=izhe(beRk^HW_uFvXoZ+8Y{7ea<-_?Doe0Du`Zhiiu#;%0e@qLl#M8*)6A4D zDF?C@r_+F##%$GtU0u{&WrQr{lr?C!W`Tk}Eegp5lyT^djbsobh!!FMlsCmO*hmN> z5dSq#|7Z>Q4@DJLAmK4n)n(&6^lK=y0kNj5G?5Yq@*d-=eaR&l^!izI;(<4^wAp(nVgjSD=^=R3WNvCjZuWe43 zb?RuWI?0vw%(PndrYG|${AdU|LT9re!!h&qCvmdevTvfuQYS4{ZKT3YpAJqd*G$zE zf$|}4W0$Nta@_!A|HEfSCIIO+%)j}4hMPV$fyS&=c)kTo)q5&4l9nUU+*kO#Ru z4gdi17?CeoC?L6y3)z$N_$2lCk2krKMcI!NnIuv9kpI|}OHz~_`I9+0l4W^>6%Ku^ zNc5UwX6INoPtTF;c1d(}s_yrf6{7(%G9y`eBS+FAN3xO0<07FMBcnN%HUILC?RcA? zIhwusBBL3c!`URgIhDJanzz!Mr@4`-*_zS0ozdAOGm?-&004vs0J>S4)!CE9xt#r& znYXeenOU8mIiRn3ovT@&t(lq&XrlQMz68;R7bsHEMOG=Qa!n5q?tjfK6Jab-E%ld8b(xBSkkRRoJH? z%q?Arlkh3B?)8|7x~Y4*sne`wNA-2p7^*jd_A-QfCQ5>ABd4uer=R+(D-3NE~fD2rgLbF=22bnsp*Ep;xg0AH{p)|Uyhx)EpEfWjVv)+VSHhy)k_Li?>H+V1t zu?-ud%2+94da*e|5?>5L7cHEA6|zC`XCI=cH5;>ksf<$?tv!41a&+WU6f*&&uqR@7 z?`U`ew6&csuu2FWepWa~yN?tbB68cF?fSO0=tVyujeR>K?8W`?C5p&ZoV;2hR?NDJ z6q-8d0EE*n;Yi^6|8}@>TOu;~se2o{7brb3Qnay~fU3@bb*a+Qs;>I1h9AOEGvJP1 zp&A5E0;&NMX4|9gKvBIryX|?ut0_Ijd%S5{D5>H{zBsO#J9s7H3Wips&_PMxAP3N) zg#I^4{ri`=`@(%mS~Pr9T38=(moQV4XLl#Wu6rVYR2fDJ9XMdZMa-j6EOv2Wl4-og zZT!Y@JjZn$$Fn(r!ukNRdB=r($cenjjeL<)(kH0k$eFy!oxI1fd5>e+$*H``t^CSa zBgx6}H18D&c~UIs@{NSGBJL;$@Sz4aC;_H{8C;w|nF${TH+C1Aq3OKN?flO1JkRxf z&-I*~d7G{e|KOqde9#HK&<*|22|AU{7bN@`ksUqCA)S;TUD6|c(k;ExF`d#Sy~i2a zligXK6*|$OJku{-(@Xu-Nj=p~UDZ*2)m{D3874CtsA2~YX+ke|HA2iq$|58n5OTn; zd?$o5pv`GJFAJ7llQna+X>*jl+3&{Lnf=+NJ=&>#*_ZupczYrapgCLXsH@1^yZzh6 zJ>1D1+#kh3#<(Mry};SM-QE4&;XU5vectK4z|-_p?0pmOz25n~-~IjH0Y2dCJ=SrF zCC4?D+9nF+Sa(B=P$i7736p?>PERqCr=rp~3ix19}7F$x86tyAo~etOH^CsE?&p-vQQ zzU@)|CS`SNpWf~1zV2h*4b+~EWh{GQR6)Xgz5O#H0JMWDUd^rHP$|6F-k0&NzVYY& z@vR>6sb2COUjwz|D*zq3iizXegY!j=syyt{LO(P?bn`sl&>R-^Mc>0lvqg7;;i`vC zWdA8W)Ylt1~IANies`JI2|aTG+^ z|70~S3X%R^Nu4`rp?lXc2xyJ|#gRUCYcPt;pRvwgh|_=l(ZBuG-wD-?vo|~6iAj5& zHQFe934>S@NtOBl!i}2%1O*lxXz-xIf(rxW8vr1p#EBFuQhbQ;pg@Eh18n4I@T13$ zB1@7y84@MPl_OIQB&aau$c6zFfb^(wrp%ojb@t@xGbqrZKZ^=AO7y7GhZdVUeF~t! z0Gmo{-b@ISs!Rm~3P82^=75%o3lbeTOCm$W1TAtz3_BJ;2YL}Za+Jvy}M|I(;o zvSyvSwd&TTU)CNR_#)xirhEJT4LrE;;k{x1hF$yRXXC`1JAV#6y7c4u^5xDeRkZJo z+Fft%xLiR#uh+|)KaW1W`n?#ZyMGTqK0&Gh&I*(~`6d01`v>;7?H_;uT9wmG|0#5! zfdV49pn?S&q*Z`3Rd-%{GtHNfP5NcX;f5Y!_@PxHY6xP9BAR%URs^~hAcrd^q*+EJ z&In7@2MOUB=yV%C*FtMtS1dot}O2)mdDfs|jkS zKz~LVr5m`Jmk$WYC8``&l154?i7|frYMh|lRca}r0DdHHJVI{6$D&)m)O@6C7L zUsJy6XNM~L4*xUcUim7SQ zvC-72wmvhRKjh%7OMg<7$I8>PKv8XhQ)^(nQniO`Jer;mKyu1$$w+ZVAu$V zs4{U-gCg``2xn%%>rF*mH?x~)hITZ~d{2BdtYNUKL$HZGY==7R;lgqx5`z_?o<7|WQhhJh>+~zt69&A*0lesb**e|t6R}p*6PI7 ztOJ;9UEf-rvEEg#Yu#(3=o;6thBdH&^{QaK>es`{wXAWqtD|C~Giz3a3T_>MV*hH` zuO4=@n(b_6Kbu$2h8DD#9c^0u+SkFhb*^OX>tIle*w?alv8^pEY-xMj*5>xEyOr%( z@A?54W)7CPn%mthcsDx^O>ccEYFLmO5v96?Ew88sUe;0#OtcdnR+Y#Ln7Y;Q^7SF) z_3d3jAlAC_R=w&aD|==8UiY$BzU_VQdgE(f_|7-K^u6zX>xy3W2H3t28E|?#hTe|7 zSHYM~FMxTQV9TEOwSOIOqbQ8v_M(@(8J;kQFYIBlg4qAT0gf<$A1gAdaM4&4CU1Gk zs$dJdcd?A!YkOym;~VSP#yqaEk7-O}{5n{`mhEqSjci{77Z|`szA%!VoZs|5dBahL zGJmOjW&O?<%LLxA9c8!TYZ~%jZdBxPtkp=OiX|fG$f{9OkO8*j03C4wz#Hb=PP<(9 zEo%<1R>}7flc+?Zgsz`U3|(lHAbQb?ZuFs5B9!0NY9)@wQ=mzLC%gHCC67iHq&v+M zP7h@yTq0adItG=&T>8?w67?`MEofKE8rGiz`=4C>gOeuO}^0Nqc(Jz&5n7 zcS7utB)iuFNg#{9MqDdllQ++l)R%-Cnl$GHoO1tmiky|vPIqzu9oy}$QNinHdTumk z>~6Qa-wp40zo$JU@>ZBD(kOZBd*A%-x4)x9@23q?h5JF2iOfv4nrQng;4BMtD{!HfRQ2%$xl9;l8fBrDMvXdr4;hs9pr_z5svhL zqjIiM59T}POwUjGb7{pK<}Wv($R#CN-Q3()ryM%bn+|nt3DoISkNR;!4p5nka@Rng z`bqmKbe>~f=voiE&&Pgtfs&o&9$6ZUR+IGfC}|`wBohozNb+v_1)TUu1a#kd7N$xd z5IIolSl*BpjJp$_XW@9qAGz+9wzOX-+E)KhRAhMvU7p#1)V$|25Bilt32{d}*Gi=b zNE+R=U(AD2#BldgHG(nrum9qgS?AB4;@gWi6dV#-;Be@9qQj@YJ@rIMs6Ja zxp53DgIA$5b_0Sd@g2P)cplipLCe7b8ZZw`BS53Ueop~TO66t%@Ll)N zZ4yTc7UxdowrBeHXS_!oE`k&Frfi_YD!OwbwZeus=s9n=hL2Hi!EuK-6;MIM83JQF z7-LLx7)?tzP1%HpI+r@Umq=@PZx>;P%Vbi;heneae8gvoXVVZd#VusEIezGMX~ajP z6C6v2b^o*-qlk0TH%Ol|As|3-%y%OJzyTN_0{xdrOj3nT;aq*@T};(&2`~-TAWt2T zXXZwR|LW!_q%ukhC|vhsE}|qZ&=`o`5sflajcU?C9Rwa`vV&5{g9TG_+gNu{M?j-< zZ8EbjTNjQPh#Vw%bJ1oI{CAEbb3qUUQXYkm_6S+dFxQ%e;DLU> zptOvnv5*I;kZF<=Awhx=X)6aLZN`Ouwef@^ARBJO9n8fSMWu!4BoXhF7TvaZNTqn( zHHOCsNjOv~Z1arK(s=`uhFn7~H+f1q`Fu9nlQ|g_iIhLYqidk}OxaU<6{B38h-t5* zY)&a72^EyZgB2DcelO986R}a4riTwjiIP}}Vc;JSeK5m8svjI)nk!jc0|wkg!_P#q^NL1wQaoUROfUS-NhDkVP}1Ye{x}r zF8N3xh>jXzf(I!zA_OX&c`2T`Dt4zkXu?rGS4st0I;a_lAMro5fq>bxjhxsZt#Wr> z_mD<+f+3-V0N`x&7&M%tOS~kUGB}*UNt^}QKN`4{I#YxoL_s#koC@SM|CA>xmvwz& zooaU|Z{h}AvK>j-m+OH|<>MN&Sc?Gpke!i>l*yjy0XS_$mWm`nJ~^K}S)Wo@hxAE~ zNts0$Q!qjaQ|RZPWn?L}gG4|VM}U|ykvK3tDW3`vaKAx*T4{y^RXPb0n`FZ_{~oEK z9U4ELc#7R}pi^mwgfn?Il6lNzp(Pr1DawW|>Y$_pj#?9;2a=WO0WwD!DI4i}xK^0$ z38YxUe?$mC(^zyJIdZjma!T5AOxiYtxQ58~bl%7tN!pA^I-9f9Oag~R@j0CliGfw> zb}2z7yE&x7H9!XUZ)Zqz2MIZ-ktz8}O+N?{tyw>$2{vA8F8Y$EoI`C=x*f7qrvwRWdMy(?f1Qk&-WkyCxP?!fh8|tSo zdO0MelMG6EN*RfZnu(2i3hM#NGD|dRTokANtgB%!CrSeLUVdAGXH;0{qLMFF1z3EB2>7Ac6L2NTz z3-YiLsWlEev2l7R4_H9{I5xaQ8Wqb+rt~JJbT<4NOtc9~5c@%HLV*=IrMD5D4KsjX;7=^E!6t56dIShdb9!sqB}{7heR<(TTz+1dSnT;|I1S;rPnNX_(z4Z zrFYv*rc<{?#I+}St%M|PhSWUL(yemp8z-}~iQ5;wNjJ5GH4Mp3O(UQegae?kDwS(? zlLjwET63;&0N(&3l^Yx7)20Bsmr6^u7;A`08lC;au~NvngsGa{Nv_T4vZ+R{3HX_9 z*pSn?jx`uK9)Y=CGQ1vzsJ|2wu7CsIKo71Vx=e>k_vEB`TZ(LgO07FUA&o14}+qNW3rx#%f2I6p_`{tG3iO^$9*I962r?;W(j?p86e6FRRn^+ zv64NKTDCt~dpbHj2@Hb``hYWtH)fekR*6sp_Y^&Alf#EB|9NN>8eFjiQM_c~U+oF+SkuVI-dSg1=FvpDxVheK_l(Wpoa%3zzeZ*+Q3tCPBxireD`Q=Af* zI}GfE181?vHSxb5Y`K^44bl(*@W4(6=ehZC3A4P)|CTEdu`J61u?Po{5A)LuYj761 z3=@|yKREys&K!6H!OP3sc$i_#2QbSvAi}hcFPYOwrWL#SyI>;S9}Gk3|g6qP1 zsLr*5i#gjAN;R3%B^OyZ5mUu+N`-E9A=II~9&XpM23f`q=qKp7y`K`zvn(Z;>%Y?A z2Cq!e%1h7#fz>!b&;h(JUj51n9n7{Y5MJ#D|HoVM6e*h%`7;Qiu;gmTQmDI!ESvHo&JC@{YdzR*4c6YA*WTNoLk`YlBW6@^hR1swn z2S=?`R8^8mt<>%retNWdgS&?c5^Qq1ArH;XmMho7yT|^V+WPGgYw!*E0J^R>AA`LH zmaE4Bkj%P#+a8VFZs5#X9RQb*2MTW2q74q=70x1DjS$7X;000K98TKQjNBOhhjR#` zG_|eBqM%%hIm&^p0r!-q$D#`K&VFiAC~VdG8~|Bd&~ZK3w*24Uun1fHQ*3?U{{7(? zUg8lx5EO3VijCoGa0p_<)gbQReU0U$o!TQFoT6ClN~Z3SfxTc)s5M>!E5?iaK*_y`gfk_;Mj+4c0*Z z)d?=e1WgbPJ=6#r2<{QFUf~}O;e$No&|KHjkm?#u+7b@oMqb;CIqNH7>mvc&rz^b$cw7c& zZOV$U<4Ei<+tk3T>lUbxpZpasU~cB5l1VLR?hxM5#R4Dj0#ViQgKj1HhM*_DP&NGC zF!!xt_vo5-+xT6}ug&2?{@+?&P;8yyl5WiZ9_`^3{oi2@<>U-cyxq;Z{K^at;V|Fw zq@D+N&ES|p^YbI&dl|x7t5U@pmG0fQ_M&26>%P*W^e+vluJp9_L~T~1VQBqzRK79@?#I+#$Dk?EW7_NglnwQ_nvCdarO6Jm{(6F$sceN@($HS%(oXxt z?7wfFO)9VLwr|(DZ{fUu2=wse9?kr*7W}W=yh9EUZq^*g5x@P~cdHULP_0xAHfP~S`iV3p}vv}x6@W!u(m z&wK+?T0|+bE=ZJkQ#!2McQ3@0b&=i$Y0)7LP;*lv4&Vw0iN&BAKL+VovYShXh7s)e zP=TuhBtkCw#(2%4yp9jKVJHp% zjv-IJoOw4<_4K=lG}BO%kK5_VFQ4VdB#-K|i+RVU#cw=bLOEPq* z(upo)^HJmWo6ANqWhx3L@XW&v)tSsBi6)=`WQkU^x>Tt>nT|paFr;GLF{U#|x+F2& z*v#*-kltjo&Vleez_rtiLKRMC7YlS#N()79(LEOpAhW|rjr5z1DxL5EbnVpiI6{RR z7t;ktg*3E}f@Ss9F|T{iqv)<}k5yf9I}g__&#G}dUnk;#Ck^H?7&?sxSjw%gj1+RK zs*=p7*G=`s} zSTO2TwW~)};bTrmi(=MDG50`_ktw}#RBvzVd>ou87@2c5I~5P}nX~#R!*A%Gjh5_9 zZ1+>szK}M&6An8=o?4>TURw~etk>pR!cW8**kBp&+~2uz z&RZ_`f{p9toeCJhxtks0K!X(KHnDh>ES^y+MV`v3N}am$*r}h<5ql)~eRTT--0tNV-!xd$I zxa-wN4kncUV$E+R>VuA~DAJu=)k;_dY#`sz^gjx+>RPjE-;VMJK`+gvSAeOX+#q;E z{(*^v4}4&mj3bv9!Uu<7f*J!Sra_Z1>?DF?39({wu$kzpA{ay%`jm%~m7J7nI7BEAv4fnf3@2|mfRR3S01lY!CiqB%C_IKfJm&FAfy-HSUc{of?d)(u zY?}-jrXm^Iusr$G7m0q?IDNIPB*CJP&4Nca-c2k!KB8WH2-Ckm22Un+;iKJBa=f$X zO)-1%ohX|(m>cSEeNbGJ+T%RS?%GM(p>fCd?pT%~<)kY)kYiOsVnQl^MxAQpc>uj{p@EZ z9r|*C@A2TQI8rc?UZf^@yrP$8!o?I^bb%gJfEa^BGM%8~aTQ<^Ptb7)PiPM(-h<~n z&3aY<=#80%anAIL=et6_b)Y*-XiN>-I8eIBkZw(6Ug=s(=~NVMtn_6okJ+5}+%GaBzxWwk}u`^AUGY1OGS{93dvSi*}5NpX` zhG~?ze5flyi%b;-ay{E2j{$Xrl8@r?ikOU*8y>b35A?NF;Jg@Go%>wq@1%R4s~8pdL)1`NHdWVfEhH8j`Vu!|s9EYt$$Dl)@*?Z-#9)V8x*@ zs2=W>LiY=lhJEM1*5qM^p$f45QMF>tjc$!?JWpxnr#*To?EHM zxW>wrMVp1K3Hb&ioe>td6g2Ia%%kA{tqID1JP(WlhvZX=CzZr&DJL`NThCVXu=i!8 ziLhC-wr(dogFUQk4cOR4w%4Vt^0CYcCo|yb)srkbY(YCoL4?jvm4|E*2$*`!kx)mQ z(90i@zZuROXIj(g6X7n6s#5mqcZB#IsS1xcV8`mzf)q`#kL#)4n!@_Sfo@&{>37T& zcGsBvLMfl|nH~-zTB(17OGxXxqxb?eJsoDBQIpMULZ{SL;?44?f4AN>gP30xb}H|D zeQH|6n$#T5c6U|yo`S&-RZA7_@-SQys0MV~8nNo8?R{@7sZx}_Hr$hW*F)_Q<+*cU z=IieG`fYE**1b;7GK!2GY0uu(FI3(!AqSniiA%?3jQi_Fa{L@piWzW~6*!-LbX#2# zdBmHY_&!V9Wspzwvc;qIpWPfxiaNZ#hMPHRQQk^PPmq>VO_9ZK?l(ZQXtL&87@z&J z&C?EgxtPXx)&H%Y7MGZs-^m73t2u)0ThAKQOx*5Bd5ytPFBm;wzw65HVqLZO8k9V$ zr-)ts()8Xh&|k{B!0g9vrUV$_&&Dzdzbnh6rkK?hj_^&tMAENY8>2czJE~WD!pMue z@ZS7V|G=K&CJs!h{!(A6N&@)vHPzY_iMP;kJ>#p#UiLe25w3}B=Oa2&>birn!TF6) zNviyULe%NaKg)AE``)G5P4vZo9OB-|In4=QvjJn2!jY`F*oRbH#}Nm-Ij33q-=Ai+ zFH7eTKQpG^CchsaemH^$+0({kp3?!1asPGy=2k{Z*5~JCVq?1H(JtILE}hVTEFizW zpt{*>!2hcA8xk?EQVYFfyE|LcyJ<5u_}ReGilnfcn4vTF7uqnDH_L>M$9Laz_3 zJFUYo@PfNO;xQ3p!O%;$KC?mO87_M&x`rW~0jeq*b3ilPv@avVxC+5gnkl&2F~)*9 zoufIq!arp4w_USB!?Lwc0v6Y@I9C$5J6y6I)4eUzolx?>KO3~*8o7arG7t1Y=j%C+ zOFCS`!(*z%`*N!4h(q)H!{F;Fy`mdJlta4fL;BmPo|`h5;|`LunWgK(YXZNA+p~gV zuKzR4#Z8;82(mExF*Lnf!Szc)v0FdP!?GUqx)sc#@nSIdfkI%6C~XVEVyrtAJgCXT zrAQ36&HA7K+zF&w!)Lp-B3!bc>K8*Js8s2^UemP}tj0Xdwei|PZKO6KT)x~ZuTSeB z2*WO66q*&C6kGrhn!^o4gt(^Qe&~h7otGUKQ zqWyEpiKMGIyF@55N-4v`@H4WEt4KP8CP5_0=s-Gf1Bsb*nT1(JD?3DN3JeBpNdK|K zv6l!v69Yje^Qp6BKB5w+RWhknL!`WOAJMa?wX-P)g0_Zhw&k-tKdL;SJiH9+$NO;z z2o$$<`$n2XF)EZV*8{>=3d7E;HpKj}&(pR2Q81ojxDtZFCiAun^g1T{qJU#NA(FAX zEJBZ~6}~ixMoc|->pHtsO?MN<8Q?eDg zI8CImjkGjjyFFD@v$i|5Ldz@bSTy@`#ql5>U%MOP+QhEwrR9vi9*itM?6ZjBrAkCTVg*jdcPboCB))MtRypAYZ1lN$^k^Aq~sO2Dx8JPP5%bPBbRWt ze9|zj+bPi;H*=J{d#u7{+_tfEwG<*mjH;Ur>@|WCMk0jJ)Pzg-;;tuh&K-fkl_JQT z&_WW^!L-Z3;!>FpLPx1`Sh|(aubyt@HSRn&=KSUDNIufHYlGzln>Qa8o$#4!?O* zI%U&4Wm7)Y)3{L6JZ;lA{nI`zR6{+~LmkvWUDH6N(?w0xG<{S+ozqCA)IM$0eE0x7 z`!ThOIN-w4o9NUvj6$Y9@Dv2 z6sl;W9{YnPSA4|j$;vF9#cv8P=;Of!6;o$T7^E^c{EQ;M0f7OqRyWO7H{DZfjZ>GX z32SwUZyi@`-PUjIR&do;b=B5xjf*=qS9X8unR6g}qHXYPI zmDG`yQI=V;cb{oS@Gz035pz@pJSJYM8YUgce0=51c*eO~Cr zjez8-O@h&TL|Tn4uu)T@!pJ(hW!pRJp<+sp@qOF5g(9^bAh8`Hv>l@LwLA4KU-8x2 ztX*2Ly|ciYB^o8cVPnVkkTt&F1HG?`F;F)6yNuNEY*F@~{y*&EGEW49DiChgr=Bexpt zw8ZP)vh=XR0N|Stys`rY%#e#UVYhdD|dRO_-+QV^p~Z4G;k%O)1~SVq3%I+7NotK4lBOpE0!YraZy!sO%#J;Y1K1fv(i}eWEsrRRJO+Ev&NAAl@!yq z5t6o~dNm4FM~oKMzOvrcBV+)W*wH)WFF8!NL}ungOWk9_Dx9wzojjoKBPxwHNs_xy zyA~F;QO?THsmewWJWK}rB2-2-f?4G)F~~nYh@8lRj#jS*mS`+qw55DWVCk(v1kvMV zRYI$%m9xa#;?F93yVIJ(-3b*V7Q zR&8hLo4H=JNk&>l*=osJq|c2+!dfNIKJrofr0fpnLh$O`HpV8G-)#4 zCAuSSl6vp0hBm3vLX&$<0pvQX7e!SCM(Z?j}z**~BX&qiK(5|a(*qhzHm-@LskpPwg7QJ zj=wI|&?5Fx1W}3P&rIq=^(69NV!u}L-nf44mjKqkSnN`A?ck*H%uZqn&oa0M_Wj#y z@HEM^tTpER^=F#sg4uAqa;GfKG zTfg<+?d_IMX(}>a{(fdE?XV0?FtHupx(xLddI%{90hG9hcgQ>&frAX-CXY@_{4w!G z&gLU*HcO-(uXd zw5HDGas%8Afr;1w7udOp*Z>SraDV1Ve4oU84$mYLW;~l2wmwz;$z`Q8Xr4gpc*HV& z6FQvywTtnBj}F&z2$iMR(?FKv^Lp3ML9{6m98ZQsAeV$Ux>a<(R+Vtqw8 z^QbD%lBP@YmoxVt40E!7R>3Pe)C7>4Snpy4bt(n=R|8C8#cCYAu)7a{CD42HC}7=< z9@?aDy3@hNEXN}SW%;{LQ`X1eM*d*~sk3`T(^O=bCf@fhH3*%|Re|m{=Rf`>mg#qc z9T<6lP!Pb)K!O35Y`~DhjlqEe12}}pFycfv4N4#>wG69gKAe;;y z+(^+GB0%As8dBVIu_Gr25fBT9kn)%>*|}`R_T{TLv`dK-<9>ddvNK)P5c3WAI&yXZ znJ7uu9uO6_%8I5CD8xy8*Q48d@R290q819%*I zt?}}~?>2NDoikCI&PxlAeROr{+~_^$-b%GVKEKT8)>Yk6b%8X1Q~Dv8pn?lB*q~1` zDPOvZSN_Uqg*4?>3bbg5{KBCZT(jJTD<6u)zzw=^5qw=NmZwSfvdtC zv&{c7UrJI+Xqx8{2s7DSlcR&$L|c35l~i&ir`9W?6Gs+6)*{r&lQ-E7lsrU`k2JY^ z5lh>dwCatQt9qzuOC5Q@hI?LeYIWokSaUVY3p7Unp|R{+Z2??{zCU1bD)DpQ&K>gE#IoUA|fYv8pW zm^~4}j&>?sAq#bKB{XTSY#OqVqdFw1E$IzOV?$ez7UiDRJSrnBVw{_zCMoFE=p7ze zp5z2oq#T}YZd1yX+f;|Sq_GZ$#L-mf_9CH4g>H%!f!j|gC7UQVN;)+gkrypTIhueE zZ>s|d6t#4yIo<;Uk5g9jcJe&N4NFA~ft+cqw>mAF2W%L^;h})0G%W2cNn%mk#Aw6C zOaTdWryCI@*+`$tm53<^Y9T308ME@`Yh2v>k9~e4v37~6f8sM=;L3KNu=)SzPaAlX z!3e& zxZGcARJk(Vov@9rTFVf%BS7)ZXdqV`=CXoi%%gcTh5<5;uE00HeEO!A^b?tAc7@9D zPTviUUO7)Ncy#vcfdwgC3G?!t|9DSh z&E%}D0)#E-urg`#%UB!dOUuiK5Pvku+Oxuf5IvgHfi4?mWD9KIxOH(<0nOrG0?Q_! ziq2516G>yiDy~XNwP^DJmbXe*#R`KJI1@%GXFWMpSdzH96;b~i8^!40%yH35N#&^f z2G>^#Z+OFqGV!3Ms$$Wy?5-se<6$e(Jk{nYG9iIn6OF0SA+x7L-I6)q6c2{nrDT~5t3)Q=+$k226{{qT8Dzy`5v{*TRe6p z$-U;A?#9$3q1&eTRmhcV{v_`47RRzV*$3X4 zK&QA&rC4ZieXAUMABjj4kA%i|u3I9HR44^|S)xOB+nE188P`ZtE|c80mfyf=xmd1I zMCo~IPck}fOag~V+f=u3(LCbn<`vzXonweKCn`IZw8dZ(6~QI^sch>wNNH3sjZeHt ziMoN=Q^Kvj)5K-M+O;Ea*a@6+_8~PZJLMJabiL9`Yih8k2w z{M(;?6{%$NVgF8S#ez&6L|cS$&_J{KwSn~W(zbBoBl-9aKQV|Fr#%`Q+{KefN#k4k zGGQ4G`smyvqJ1_C;SKM77S77$Y&Qrhow?{AUX|+GVr4QH2^U12lQd`-+bwbT%-(?g zNczoP`$JWikIfd8w2!|ugs(jaua%Q-k`;=Dd0vJAg7=Vy*Q0j1xCT--++DJ4XP07yd53c9^#c- z*r8O=MB2txoR6GY=P44t4V-9fRgZX*UQy#*>H#1B{LNCGn?^7j>+xF__SxHj8G?Nd zo^{xbp&5x0#}PW4yd@t=1ySlX;k_|f5PI2&y<0(@)9_)KKuJx)si$tneV(; z-SOfV`BTnW;^qil&b=46Xhf$m(0?$U(!ItP{+bAB<9D=FJr!B7%@xWu;yM3vN=7Z- zxupr}y-^gc&h_aKxz(74aoy*np|@QQ5eD4bDP9|%2|h_j<6RFQ4$YthmgF&B^6A@d z%mgQqo{&j|^vMw3v7u7g3k*pTh=toxxe?@1;cux4nQYuu+!O+1uNL`^KA5P_lNP&SC0!Qr4;8fKXj@g$SUrJv5-S^%=f zZK0pw3EIFFnt7Qbl`ZA=++47Mod;^&k=U1~*#$U_Vp`y2y-^xDO|e`Gmu30fTRv9>S&^+#8B~%5T9yYkDdkJHpo9OsU?}1XQ|3rp zm?KeY=2qN|QSpi-u>=V03~aE(0I-Bipr%ZmW@@q~YK~@WvSw+XrfI@vZKCFC&SqD< z=4-0vYLez{c13WyW^2;sZ>pwo+GcIiW^u-*Z?fjj@MdZ^!kqNc}v2_PO-jD;DGElt^+gcu>)W?HCzu$%*ZWn8T1ae{zsnr4Wa z=7*AKR}8?1hG=?{riemkh_+~TqG*bqsEDrUh`K0kj%Ib5Xo~;FXlO2{j+Q8Ojwph* z=!~A|j*4iKCMjutD3ZQtjVftyMyZkhD3a2sdX6ZeIAd3w*Ly_fP8g*~&elY%=aDWa zaLTBZ9_euQXqxWmZyITl`lpiG=$e|TY?7#*mgtbyX_CGuj_zoa(x;26XK&spp~h#C z<|&JssGMFYoziJ~!YGqIA~*S*ocU$Ot)*moBLjh;|G*!QWafpEs#L^Vkn!M9nMhe! z(h_b6t2&_SwCX9Ii&O;_s}7;7u4+n1)vE4GwX~{<@M^Hus;{zFuu96XA{4KN2}@XC zrhMm4XjvQn*w6qgtSYOpYKyEcVYuNcRu*fq0#VJ`DzN|FYPi;FwszL8ZY#33E3cC4 z9CE8kouIp+8*u2|ub~d+r5imS+#;50z)Ho_<)8iaKVoIqO3^WgMJosi65repuobc>z&!$Fj|f! zjRz?o=amV!DcsGb-ww^9r~nnh3T+Z z!7(k)@(tSftk%vVN)0W&-jI$_QNpqv?!jcF6m8`NZP4yi*wT&h$=;aIM6@!9PTC&C zb8HK!GmrqV6Sns?{MbF*dG61~2ha;|Hpx=b%e1c4N_rpuL>%Wbf zW-+VT9%Zu?)fLhl)Nz>Ojv>vZq=g0GMm{Cp;B4Fpjqky)KvM5##ss9b&R{`Bc&20} zZQ{fzikGSCyM##w?&P*PFtDCl=cHZ!q8L`96Jyn{5Sl9}kz`9=+}I{06;Z9eLa1l% z?8Yqxz?#|=Vqo6sLGC|_5IOC%A947y|hHGdY;}V_F5a9Zy3~g+hvFVd6jpfZ+ zpk99EWlb(hm<;={uRbyqaRiSCiW{jQ+MAJ9GbP;&ca!9fR{G>07+WPiVbBDlEumqt zFFqTts-~Xhk=np>W!34C>6QPeikTvN^&8i zrz}TuCrjrmH>ZaRCnx9UAm8REKQe;ma(@CdAy4WrBlC0CBvJLov<^+a=H$NKo^0h~ zNbNBnTjOpY3eAI0(3cZv`4S#JMXEW-lvilG&%orN;9dILZ`vLiZ$k)_2y)!qOB(e z+BIXdW=7S7tqq7c>pv!|w#I769Oktet5Q2}2P5^jGWEJjwY>Ify8bFuck8;IYf`5K z_-dq0m=OEL6Mg)A2}|R41|UPnYU)jo_%o*K0iD$ZGaxb9QHY_6I>4_D&OK z1tK)FrTU>?*2Uy!vvzB{_G|Z}?MRTx;$2PSvK_eW|7-sXfKYSl-+ zi@lleyMgT#Z*6jqZE<%KcV{;mE;oE9A>g#__>B!nf=YjrmLLAN>`h?_M{`(HF}{73 zMMSo5XLDQfA}W&K$W-p2xkcvcF6uV#_DakamtpamQ0OW&?QXFc_wJ{buJK%x?-s{k z!WW$7k2Owiu2JBGPhGbZ^v*cH1Q?V%anfLMQtFm+C3fD5*|vSL?1q+4J9s1ijkC9mwCCvLIC!ky5C>JrDFm|QX7aQLQcjx*vVD_YjQc=IcMDlIxRYqV}o0ATx? zF^xM{E81|{0^G9m)`fUG@s?o|J1p=D9W^3*?q>HYf*Nm;UtHGSXtS@bT)Tn<1*m!3 z`3VSv3uUcuCM7W#1gZ{;-EV|W=ys=6C(#~^omu4niDBw21$ZaoD$^bk?qd;J5adnl zVT+_Y!bDL}5=@pg7)sduIJ~>5q^kBzn52=W$9&m-_~VsH29948?w$|b?z1XkQ3Wu_ zzkHsN^__p2&fa9kchr|J2;Zfo2Cv$by-vUjJW;-|R3@JSS2QfWYyyA;DAVRlg=>3lU(cG&6qY>k9 zxjT~#i|I!R+>CQGE}G@$xv{aAx`(siD`NQh{C#@mdxG#C|N7kodK)y+des}=2(Rta zmx&Kj9yC7fP#^b|6>%k{p$7xhF~;A5H{R0!GCI%8n#GAx^4S~Bdf^XJpIMW-tV;=D z*P%j!n2imUwPIWT5)BAbW_2I!pPONyrx?&GeW*7QQCMw*s(0Ve@!st#3H{r(HBy6{ zej~b>=aQnkOroN#Vx7U?KG|80Yw6bkgaS7K2oe++z)iw}3lk1JxDaB1gA)fDbXZa1 zz={e9He|^0;6{uE1u8UoG2=&s8&{Il$T8o5iVs(6ESL|(%Yq^U#02mYXixw#FDj(z zawkWEL{oaKiPUJ(s0bk*RcbM1$C)FmV&oZdWXY5dr-sz(l`B$;46T+$+0txFr6{kG zoywHs*PAl29_=|mXkfvE2^Sa;a&1!oh#vD^b)#TF0m73hSGIf^b7sw(Id>+UFP~hU zP9thXi1RVn)TT{0RDkfc>ea1nc8tyXY{!mpdPjkMecWd58DbE`IK9=V+zlUjKMx^OtEE$Cj_%{&4%+c{W^M zd^YEXxKAFo6d2Pd)ugLr}lSHaro< z6jfZ2#lcW3t+a>y>M0+mXzGnVoyMw2iVzaI$AuV~04hkTup()R4Lqo5NT8O43Z)(i z;sAqr{By7?umXT1gD0cK?{#BhThZ@D}`Q!QZb~mG}MhAe{^B1m6`;=%1)$UZFp|_v z3`7_)t)|%H(#!bbOrfx7ZbOdpIBmzV0i=T1a#%xW*2Leh)R=xA&)umbW?(^$qT z-Hlm^OhlI2YOTF?LkpXuP`SpQL&!pvyb`fF25BQG$vby&fm}C+Trx>Y?ZZ$jUNvIV z1B8ZTw>^c@ZFD+r<=eJ7INZs%QFlQ#(hXME)Bw#!naCg)IcwShfFcp%0O9~RJu5la z4g@ze?NsV0T=P%{Z?bUz8#EH;le}`-!ExpK@Ffxdbz_HjSW`IPc#k93-i;SlxJa&2 z`?sbKL+m!boeV~Jr;grL&tZ;FEf*m!a{(X=tuWU3;Zr{*(4=|0b=fHaNj4cl4J*?i z16)>E!EU_o&O7hB{az(Bn_upZ-kKqUCkww7=X>wQSFi!w$R(dVGrWq*?MTc4#mKA# zbxI7Po!3neNG9c+iz6m9urh#mO>*5)bTTlh)Wj5! zLIW+_@v)ZrAJgf>2#(oJXw z4D>9WpdrJyJs~FlOsyZ(*ZXhkEVDz4RIuYKj5rJ>2T79r=oBV55HDiHL7o;g(7Wcj z;CU6974;&RqxMw%7pIl}IpaCCFR2UQReP;kM8~_MFupi*$33KhaowPs@ z!r3`o3*@^1HmJn72Kng(d5tn`vrubnD0IO?Q z258qcGB$A`??XtOBDNAXCW!(Y_zCT}7{(NAP>T)OK?v9s#(8w4kA56uzS6c3j)naqN#CTNm>FShm-^FJz7p(c1zf9J!>dJnI?MWWWcVCKeJu8bJ@eWEL_i zc7#>O)8g*`x|o7>fl+!~D&rXMRGOW^31T9N(>&lPM*wUw1GR)CL9#f<=JBzM`&daJ z#W=MKG1GyUoZZGCX08RAshg6-042F80&SM@lnj#Mb^`YtDIx@Xg8O9nEVF_h*6;%g zP(VKS+0T6P^8*1xW7A+Hu&@Bsp9$q>4+l`hBt8_P)Y@F^>H?BK70OW_Im$P&nSw0l zN|zoJAe{Q=h7C~4VX|W$`-TL)CEUYSDG^?h`lri;Y_n`@0su(QNYYN7bRczt6H2=@ zz>K7{bvq4e^{VDN*bS9a2@xtdt>im{?#6P)S_)srgOL+{rbcSHl4+Q=kz(nvJqs+! zAwSA!%bVWm9{mFzQkh1ThqN@2Fy+cj<+!Afs?E$S(gr4f4~@>LY#ge<(N9dRgvqe@+lEw!=9Z0$Nj zhei~)|GCXp$=W8eKJ|w?_mRy*4ml~CzNWa_T*3~5xh7M+>vt!W%`$oUpy`3lq?AI) zAr)z=kYaOA%th>(u9y%`glTHEIZl~2w!fT`iyaOW8vNv^Ix8)5yV%{$f51XB1Fe&0 zFmi+32;<*=sRu$50*g9hbI3Jfz@{Dj*K(VCrXgAIrt+KDz4o^|+OaZ=Spc4dajLcN zM(T9&8?Se})L`@$m21tphnw2FwOKJnc#V`U{DP!9G~GDF<>}LJPqRc+H3yVcLoz*4 z_#J%?ltAK&k2+cD&TZjGzzT@TZFSpaFI(oUz;ero;(1{Dz_O^UvR^}3^*)|yEWRRu z|D8zPlo&WwFT#eD6C5_+je<}Sun=1_LAcbrbroch4T-BA3;nE7g-N8HDiva1V$*@d z7m}t#=5|(XU+$d6Ee~z^%K;boV6>&SAs3RB z61sAg^Yh@cHB%@a;PZ#eBvluP;M#8`&PqlnyQB?=uO1lAyCz=aSDMiS%MFrL|M$m^ zS|0L>AWNf?un6gLMg-zPp`FV06(;;P>|ldykj#R+Va2f1IISVDi`wbQF+FX+2|=A1QmwM%QMa^34?ynr|@8sgZ+9k%G1u%6ZOBE4kc`FfVFBBVBV?aZ{S| z$@wgK?k=AfuWJXh*?mB?GHfeN!7}T}C^s|jWg%FEG3Kwxyn^rJj7T^*;qt&aAMh7x zGF3mjD0^s4b-N1?*Jp9_p{E3~JjWc}JR3Hds;6jEC8>9w7IBGPuV2c(RAVMSrbgs0 zY3p4YV)Tn1nkKTqHp*VIT;~8EXU{+5Per{8;S-J+5@x?J-bEN~4OxSI|C5~1o!_VT zQR=OWQ`RdWvRu*_Qp>7h+P6{MtHZrQ>=$T^`~F>kk18T-NOlS41kbL7h1*0x`3_4b zoKI}TNcyM`vCPf7(26y>r=Vzp)y@YnI%D1r!*|qY<5b3?vI2ArMRz2khc>VCUQo9Z z&n^%{pbQUYFmN%vMyXCrb+qooG>y{)P+zbMXShjBhEU|JW?|F_#Zn;cFz$@UA_A73$_ew8pyTEWeh7^~NXzYA?j5qwq|IlhDX*0-%)m!=08+FiPpl zswQDJX}|)fJjmtXbZ|~0%a_6W$Dv zE)K8q4-30wXwE`)x)2w%;!M;7x;7$<$S`TRsvCDw+?G$VzU z1$8hmKpO7oJjauw3^o7`29L4JHs=RzK11zQhMXHRZjZNG!1J1Dw<7^3Dl9?Edcl^Xv+64a+{|(U`=pRq2@1`V-n2*!e zP4wPkBJnXJ%B(M(Le(12$uj3rqHaEZqYPD|BGDo<13j1G8&32+Y_R~jFFzD zG{P)%+G2h15Z@T`F%{%0R7XshQ8!c)OK?cFvTr^VaI9QnvJ@~+((O<<$0Q1ZgO>3j zfkoYM6_u*1~H1W+YM0Y!OjM@j?qL{S$WrR6yCMCeAHk@{K|L z0~Y5(K@rr|HqI`LZ66=y1ef9&b42@2Ek@h`d{V^m)Pq3ZttpA@I_*tHG|xS+6p0E> z3n4GYitJ^4D`y1EK;Q=b%5*k)2Ax{Xq|%gOPKGqOX2=Z1Jy>Z!5@xmx?qqah+o0@1 zGQ`MYW;Fb3@W5}?c*_s@6ikthVbn(QrtAjg|FldaRct;C>cnn#GO5*U!$|w&GXE=N z5G+o=)JVBvPakA7^K=hql)*Z}4pFHl2-P#fQw>E$%HZ@Wd+{+^5KDKJawhUcuSYsU zNJZn+&TJF~5foS|5ldl0{V|M}tLNF0Xv5(s;JQC`GhhH)7kqLV-pqS??A5qLp6r!cGrl1YxmX z%5^8cm0iUO56@~tO-+Qd0w-3aNp)yOe#0P41|1k8Tj6B3O#)J$Hag_~s0yS*NS>L8x7UMI8 zBEXo)Q^6uLmF6;uab%+IToO_kqb6Wz)M|y1We3&`HPh;j4n4bLW=&PLm?#)yOu@vJ zXqRYgNrpvIXaE8V>`XQ4I#podlssa{V?h>hRb--~g)Cl8Cp6G$D`k7qLR&8uLKy{P z?=$jXWY#`tN5FMiNv$bK_C7}@LVaUrF}EV)sYey^hhpM8Ktpv~NL`_kLe~v1F85y( z)I?LuTJJ4MZ&0)z^>HONQP8bAFceRy^+(CIT7h>+SL<@&b@X; zG_h5OC?*)VMd((YYF0l- zIE5d>w!&A4SA;&L(q18A``ArB)pmC^by_nDc%eepMq_rFg^PPR7ij6%%Cw?vb@Xamcs%KsjfcyW+J?&L)bMhYm6nz>Qbv?Zm=-me7F!l8 zNq9iZFMpHPJaI5)rj}(_xq~rvgjs2AC(lux&Pij7hkN-?A(d6D_o2cQE4P#KfKg^v zqmsWDliOL1Yb0RVIJFc7o+Gwjzl1q&cc?AOSa1tPDN7JQiK2vJXGX@LHDIl&W!Sf6Sh%Y@fuozN&0B$i zD6?^!h7TCHfBU(c8@+u9xwYH2v75dT{JnPwx@a~`wC zAB25voXCNk$Eo*EfgfQ($HhwLye4|ZI*x0Z)JaYp>rKSch))B@Ye(M4p$b7PwB`91oI%ZaN1PZ znVDOCy-r~?bn|y$FdapJS#1G^WaYGcx&1?snRiV+XcxNI$=#j4l-bFBle6r|nSIXV zH)fOqybgMGJH?e&dhzaw^n7TFY=lbmYA%)jMONPCB+nT4STMwE&3W zF@!D(H|Do^ULXEQ?2KOjo6%pXRdGN1a&_J)Ir?5X-YvwOw7%q+w@Tn{x?kP4IJ~#e z1-`W%*p=5e+ubxNM)P?48CM^3G?ATK_t$2VPS7)YQEL=KPm`eZIFp=4?N7FX^ZbNO z@X9q~e5K7!+{S4N_-d*VnK-*Cpx^C5Mf7x6ct+BsE{H%9wED%AGrR#Y_cLJ?hm6VgMDofCCF2Ot`S&!-x~N%r}7J$pCo4 zLOzR7V&sVe@q$cy^zlc}9zA!wjFB|jq8CY%6^(lG>d9;qUVd%y>gbb?*+$--o2_rz zoA-u3eU-8Q;>C^i0uK(nbw}5)X)CpSwxQ$91vQt>eLH&g>fCD!MUC4wW!QV0qj$_& zy6Mf{V^cP5TKHzu#*gcVyF#UQ>;M9&zyN;jXW)SdCaB!-B!7He+ZWM5$LYG=)TKtz3a{tZ@Tl=OE13m0{rg4{1z-QzXI!vFu@5kR&Kcq z>sxQT1FM@ZxfQqjZ^aTryf4RR9;Xt;|I)khxhGp}u*CFITrtKd!^^M7{n9Kk!4Y$e z^1c2d>{iM;Z@jV10DmlV%q*wtvd$Lc%yh`&3Z^YyJ$*JH2ysTO_10X=)j(f;5%@EK zWXH=cxfqX(aoB0IEic&lfQ|OsamTGR+IF8Uw%O>ujrQ2*y1jPUgtKk9!)3eOH`(S2 zUU=Gg^R0H+a{v7B+=M4CcHe+IY`khqxxhe*UC8u5tO>dy_4$iyZ(@rh96VJ@P0 zL0mBHidf8|7PrX7E_(5cU<{)e$4JI9n(>TiOrsjt$i_Ch`LT`RJ6;@f_QpEe(Twrv znjD+Q$3FV;kAMuMAO}gvLK^arh)kp+7m2*%-RxqI+#@3=Ny$oD@{*X$q$U~HNJyIN zlb#G^BtbdK@J+99%)1vSGv~-Tw(^y(j3q2*Da%^Y@|L#DB`(*cM-YT6DZs>#FoQ|V zVH&fT$ULSplgZ3wIj?xmAbU0Fg41M78;V5 z+BBv(y{S%f%F}8VNu&ma%7$tb8=xBXs3dZW9+Sf<*Z}IOO@*pcqe|7OT2-o8t!h@U zs@1M?^{ZPAt5?Me*0F-8tZ1cGTFtuFwvzR&Y=x^^<4V`L+EuQ2t!rM}Id?+0iOT zw4Eg_YD=5i(?(}goQY3-PU_j!+E%lz&1_{^%iG)T*0-U>ZEuAO+}aZNxW7eiY}>=! z^{af)U1VX?4S#7yA{idh`v7n4}UHkOida?Imr ziT6Wd8S#sPT;s6BSja#=a*@lTWECUX$P;=plcT(3DOKPN zbdeB!Xg@hR(1u>LY}1oyMoa%%Sb#3HqcdIUO%uA)nLd`YK;;_YbjU=G`gET#t7=qB z$I_f$b*f?A>R7wFH?+?6Y-`PHU9;NPsGY2^IXi4pbBR<#0hE1;P3dMc``N^fwy~w{ zY-&TB)TJUR*D8!Y5Ut2ZE$R%L!tL!bkvrTQs!6)3G$nO&C`#w{QoFmfZg9)n-SbX2 zh*oSPVfCbHTGH`PBTVpu8@!_X{{RhQcKr=zvYaEw0jl z0l)z;fdTVC5cl8$&><-xNE#e~f@e`If#L>mkO3ZW9!vlLIY=CMAq61-fix%(CI|o_ zuq!oaYWP(xLl^){umL@|U_dw!E0`-g$P(|6E3LPLXEB9tP=%FJgc^_q7$F5Mk%d$k zgbc=nTi7exF&@fBdCk%sSGR!FF@g9cEvH6xba+;AxOeZ-RLHVleb^d*Scrodh*pIx zouX}9V^T}e8YbpoWpN%9LVHj0d9kM_nuv+ja*4N>iRrP4qF8mFXo{t0V&`hPpAPR;08O8g|CPeYoU02Xclnb4jw=wJ6McD@`P%5iI#YS#pqZY zW{DA!X3|0t_i%(m=7bx7jk~yeZg7m+$bu;*jo_yc%1991$Q5qD4z&1U z0SGRA)`7i;d6hAUT*z{#7Ke)hTZ`y|^#xdXm?=H?ktzw2ZrGA2NhvVtS2Br&E*T>d zM}S1wj+r3^W2lQRP#qaKax;i2E;tl7=nbNAj7;|t+lYrycNsgF1-LeFYsYw_vJ(}; z1Tz0flmxMj1F-`dAYN5i5L0QDhr)Z>1eUrumZjp1AW?-+$btjmleHKCKADTNhn7b< zdqqig97!GoHXbGNhZ;C@eVJ+==p^rUUJj;{jQJamNpz4|nE?_Ranm&w)`}<+d{n_E zlIMS@S6TKqny`0%g2{WRd232oeyO>IuF0Cj5u2fwBD5J>kysfZwtMV&mjrQ)OCXjz zz!Dm;4|w^L1@VMgc!dB!ivtmZx#$LF*qmet5k=UXDF~N4!IWQU14W1dB5;GmnF7+; z63fT|E^rUcnE@>Uo+a248ZZwAF`Yjt5z}de2*I4RNMjdqj`nGu0U(~unF2fD0>l3a z05W)k%c!6h2ABeIpFt>~v{;}Aih^WTpaH-HICv92NtN1ogf7A+Msjadk$Ou4B`3Oj zi$Z?Shmd;sdLin1UxA`B8X_uMqXM^^H~N1zT4Xy)S{L~(eMX4hCwPhXktGR&CrN^A zs1aoHV5-5STIi&3IHf{oh*rvpN6KKLsH7%&mH5;lvq+u@VTM$xo;m0S8xWKXDiBi% zmtQD@v$zk^`IB~85YS15Y>IDyZ7|p=o(3u+gCbda0#)grhwAj&n&F zX6UK*G^P>(owdR$)d(o%@tD>JExd}DL=`H!axCemU^NLW)%R<=25j5stkT-7)are< zf)S4Ys5LWjW*+B{Pih|mIdvmC7WnsiO!kX=n2qRaA4#^ZBiF9|$F4@^uI7p(@tPiD zBChO)5WV>jH;4fQ!3?+JkPu3A8wjQAid$sFYvW zsOt!xYI=l+T9@tEoB3&|nL4rUxTXR@s3CitDWI@-X%!}7uq|tsgxZ!OFc9njo=my2 z8}JV6T2llOvXXkUVi}#YD4zpyg=&})@kkN1N^e0cA@%2Q`{tl8%4HyDi57QbClM5q zVUYVewqjeh4@b6VYqnZBR6P--*fOMk26lE>fhRSTbnBH^TDPz0heCa3hRS9r*Vz(u36u_^t4wK+mimqp!H(gHgR~Ww)iLlmjmSmxx1DcfE2>{g@vEnJQiMO$InxV8S5u*E)Kxw+T*t6RBuyRP5 zyz8m0s+7Q6x&pzueX;=_D-k;wltjUuh&XCjvc0168n8ks-y0ykC>&7LfyZhg=aplO z`*wxWDD5j*>ifPa=PU9%tSPp4vl4cWqIu%dBW9+4KmwXLW+e6sOkYx?u6DpYn!ug8 zBJ-rcRg#(z?7;9b!R=PT4Sd1l>y72ukQULeKc#|`%er64f_JKdE%A)#8J@)dXqD_R zo_6VsAV<0*9G~ZSpDv-4a|#h>`JMwB!=76aIGn;L%o50G9;I6lLu|z7Si%kgofg}l zV5zU_w!}Qx#4CspJ3JO<>Jag`6Be5!-}-Txc(1?MehL?_QNqEeaw@d-9#3I41UY`U z2Yiw!k-AyOcx=a=sK*rc~nu z$&dVfcauVu8$H}k3_+@$u>)PHupWvbMjMojDgsN80XE>feA-~bip-<`vdpnKR=Atwuz=sRRk|?8SmabLu(DF*98hpVF4biGv(WH0LM9IOZF>ZGT zB_s=3&U>%}+o}|h0l3f^t6R&`3lUZ5x^4N�VZ48p$otuLP zVTPYjp3nFcW@^V$BFF6ad;T_W5>3U^rDSX@QwKRDzl4u+9cg#}Y@5oWeBX+^fKAvN znHDn|9fDjZLmG3G<#bkgl{Ti%8OU|HQHLEEb}E_9X_wheId+n@&-R4bAc>f!-Pxu6 z*^`};I7w;P793-mlEyq4xx9`Ds+X=CsjS=9se6NTX@(V>0ZO^gq+8V`=+bIvf|aVK zRSlhyahCu3paWW-WQfcJ@xoN;+&%4~k;=G>`kVmJ+%JvI1e=SO`_hRW8!x=l4m%K3 zogH&Js^X}O3UH{bk;}YhlHoflbJ&ZAIJcxNc(?}HBGQCkWzIsCBAmR2$ug7;F3%4x zgit9|3VB}HI#4w6uI~pPahxS1@zC4rXrSqr4D4)?{C+0?KG!IYw^7o1f&1bs{=j)m zz(ciYAg$sa5z1*%mSnBenhTHcx!rk+mo2f# zOb}9ju;vM=Fj422E48>cuPcVpzXFi57G=#vBsD^Ahh}CTjJA0}<1r4l7<}n2j_Lg9 ztr=HtGByy6O($^c90nG)^c<4@T#ADGh9qK#_qC-ExRFiz>QJg*vR;5#`e$!QEsN}+ zVbQn#ZI(QFB_{G7Cq*^eRD;YxCU8jjU_`8_!1pv%-#ex`|KT8%&`_iLa;{ zv{kncI=z7Cba;5YE6Lw;+meJ-w)rM>qQ3cqkjm#%Hb$1J+gi`FG_{h7At@?_lF9QW#Voo{O^b;BoA zgMQ(t*x@>^xDy$0{pjccrz}|09~PznuYLL$5(=q!?pJ2-2k~D2;$W|%VGo)RZ*iVy z?Bk_J`Oxh69DZ46?3Y={_WaJ$g75#)Da(3?-~Oe}|NicA+sS>8_mKwAr;+#FZqI^U zfQF)4F-Mc#p^X4wz3ZWy;yY*tzVnXB$&m+r+y3AviHLR!j%cy&L5 zX=!&yjg7i~`d7d1Nmx{r%=bAK9R-CcBxov(m+E$#De*BViC_H9W&HNb8p^M||4mer zQF4#C;iXx&pclvZ>bJ-XCa9?LHhyd#xBVDARG#epCei&Cm#-3>*yDfD`>2RMQm?a# zf0#yP=-+S=?2Xii`yh9~01+U78wCR09298K&A^2U84gT%uwlZ13MNi`7;&M*hy^Qt zM99!1M~V_XmONN+W6Fml9V%3*QX|NUD_fr2|9EmHM1l+(_9U3ID>6zR*G zM_XRR`SW7Nk0^~E4e6Ar)~sBwqSU(6D_E9TV=~N_55QWsZQZ_w8&@vdd;=O<#JUi# zMVAo+;0-wuuR*GV1saApxbI$!Ar*IJ4EC>7jFERfuB^B5UCIlNS@X}5QK-P8>PW&8&$0?9oq+l-#toM`ZoP}Xs)@rGDTMJwAGv~vNZmlH@HY@& zgl;M5Sh_MwFUiVrxDkcqs>7BD1S~2Wb;Qo3?SM3kw%?*GO12B-J5xj~X;Kl$BhS*) z$fvH@Tut7^NrOU1*2lQ>c^MIodzWV$^Z6^FUa<0Ho z!F%Y`0`vN>L{3$GZ&p!RE3H(*x=U2Mj0|HhSR{io(63g5Q|wbxF9i11B9D|*z~{Qt zbx%`~q;t9nwN2B%?Uao%JY>5nGLA559h0?mDGuOf8LsXiprUB7PZQK*f7`Y|PscLlj) zHf0N$*gjD@KJ(X^8}IQ0aSV@6q{^G6G>maw~`xU@Ft45ub)q@^Xc^JgllA~#L0*Yg=*Xm!pJ z$S3vAJE^_>?pq`!k={yCwOgvxVa4OBD?Qw{Q;u4YS!E3JSBGjd!qUE_^Kz2I{cUrN zI}<(g&?!b&tWJeRI5Eo=hU#XchZS>frckSO*{}e6Gq|sEuDsUB$J+Kf0d;L|)CjF~ zuWfLNdyU+Gq3>5?^<)}JfQ^B;=cs;uZ0}sU|mKy!vL}{gdKd44RIL52qN%?G?X9! z{inkfJ}`wq%-{iCXu}iokb*>1U=m@7#1884h!#X46&)BsC@xTmP847l!x%&pc5#VT z{NfA8h(IuE(T#8P;s8@rnICd9~nwej`EV9Waa)u zxye(4a)?W$r5|(oM@qVqlZI5J3x9dZTL#jFveYFoi>S#~5^|A@bfgSd2~11=GLVZT zASjV}%xB)RlF3ZuC!gs{88T3it88UBFIYoiK2nm&OeZ=~Da?5;F_pt4CMOkn%WB$F znSU%NJ$2bkV?vXK>D1>r!+A+mdh(i?++`$3$-j6a@|}aUXf`eBOpIcbqoQ=DMyaXJ zac1I#Zg~)TTGZX-;*zQ=azJr#}U1P=z{Fq88PtM@4E< zmAX`>Hr1*BPlak!r8-rrR@JIk#cEcyx>c@r)vI3xYgolPRvpSHLri=D`5Q^Si%n0u!2QwVH0~;#Wt3)k6mnJ9a~Py zGPJUnwQOcFyIIY4mb0JrY-m9{+E-52w2(cmWKp|X)wVXZuVpQ4T^n24ing?N%?A~N z%28X=a*vJ71<&lPTTk;`1@PS?58Rql1ITixt(m%7;f z?smbe-R_RJozXmRdiRLl^{SV>?{#l{+dE(Q(pSFq#jkuJ2}oP+*T3ETZ+`(iU;!5x zqXa(xuz@?NVE!i9!3%zHgCRU&30FA45xy{mGi+f5Yq-N4X7GjuiOmL+xTF1@U{KS- zoA$_0XiytZjA1c*sWfv5=EoWF-^%$W2DF zlYQLc_Iw9REXMMcwQOZBr>M(S_HvlPJZ37FIm=%zGnCcLWHvv!%}aK3oZ&oYDc3p5 zcV4rc?VM*i^O%;bfOi#Xht{M(T|37q$NFRN>|#_m&SCaHN9z0 zciPjR26d=KJ!(=@I&nWm5R#;|>ejJ3O0J$wtUEPp;LsY@wr+K? zalLC|_Zry87Iv_UJ#1qq8`)p-p0T7&m}pab+R&zuYpRWHZC^Xv+y1t24I?jm0Nk@r!eO;~DQb$Um-ejFa5sB@emDMUL{3r`+W&hdFs{ z2DmEK98fn03C?YvbDrQ~=-*1`VtnrofxSp&L_Xufmj0p-*_ZCvN{Wgh;t%RY6VJKmqC_q*Gj z?tbU{-}4T5!Vli?doMiV58wC23tsUhFJ7VcHTgwRUh|bN z;C2y;%Kd)&&)@y-w}1Qz6es?Jj&SLRzl`xe0IWZ3(?2K~oCNH@G65a@3mP67h_~yh zxNDzuDV)uaj0?=b=)pkMA(sx+Ko0~#4&1;GEWr^pK@mK`6;#0%48a#9K^Nq}iwl-o zs~j6lkK@Tfgt)JeB(%a8t3uy_!XfNJD&#^iyh1TFLoY1D@Cd`}2@)Sno!~jbv7rj#|MMwBqZ-1Q z83o)!Jw!i00Va?O z55PpX(0~*elzRw8Ke-1Na03p|#H%?7Ohm<`xCa2p0)dq%iX z6Q9YwW`v43aRUkDskrNwYiUB(F_43i$9SYi^0CJjyT^B&$9#lGe%wcV1W0|X$AIid zg8WB;^v8ijNP|?!f?UXlR5_zi#T3wsCD;HA5CMeXL=RXAb^rj1|Ky1Q*~qMbgACwE zcECvND9H`bfGl{C9RL7SOb8B0N!OUjjj+g?oIl^N$PjRfWI?omu{5D%G@@KIqf|7c zOf;noN`hHRr))~uIW(r6%G8jS46Gil2p=#E06Xj{|FM{HL&33RK^ROyvm{FyJj)nF z%d%w4w_MA(Ov|=Z%ejQhnyEIRV24mt2q}<7gBVGJm`GRwiJZKMn^Z+zq=^8K0u0cC z4Dd;TSb`mJfepX~!CXa}+e?b{2)}emoY=aR*Z`kY8FJ$r6Dhl)^SYj4I=NxZpBovX zle)iY&Dd1UunQd644d10y4Os*-E2+VRFXQeL?=Oys7bwz|BFDi0IC&JmH@e#td!2^ zq|QT|PV21B$U$bmS;#{5tJ6qK86h>>&yzg&x7j87Mc#kL4VPFxGV{2b>YpDgjb z?8(s9$+*sgybS%&&C^g3%CI_J?3GbRAsSM9X(T3RgE(}SZ!5VMb%ejRa({6T;)|<_0_U*o}<(Z_H=`i zbb%<@(obXy`+OBQI7Nvd(EikbeMpVL%t^kW%=i4$R7^?bV9x+BR&a&Pji^stkW-NC zNo@s5-0?l!0Lox#$AkHgh&#^uP>y&#mn_8B5}nt5mAiYb*L>~QfaOC+7Qp34ZBr&wK#7*m4)Q#t)l{M=c#|KP}ZeU;F-j!%)w)@4Ipf!-&S-twScRkPky zB~RqIUhm{y?d6tO0h%q@ zU&`p;`_*5EZ4&(bU$+^R0S@2;<`M!H-~~=#23Fww<=+ODVE?7y0;bZ636p@ilEIW< z-F;f0=u((b9(Hg69>`ss-Bz1nNu^cOl+4yQ4FOf8)~p%flGt6B1YwTk02kN?$sElR ziA6X4mR2p)uO+xmRUe{k(yC#=yWNr}Zkp^F&MBt2Dwfef&D1C+6DX!)EMDT@k&3k= z8@0QTLxI4dp$|~iskk%GEo>f=pV*w#;hH%vkBPHm`}=J{IIZ)?+^w zm_a^dLhfTa#$!c>UV({NZ6gU{U5%lA+Mh7c`9TU!RD}$gfPq)ZpD6>R<5|xh_TbA#ei1C$brDm93F@)4Mrudn~jAOpee+FRhlw!>6d}& zDUBH+iRqd)i<>rQq|oX5YuKs5zn@;4LiHG+mbgj$Kbbxn1(XslVdI=)$7y~;K<+z6 zhS<8ZYF)u#9!czdLKS&W_eao^m;sTa;GLj15ap z-Zzz9!VC|;WYC#>3mb5Uza9)s|IC8irRa;yNN<(hGql|hzzf0UnQuI}X4?&#j`?51A@ zwwOSbJ>$ufm;P?#Ot^NFi3P^cVcy?sq{C>NV9eRKNJK|)R#6P;IzF6|P1VExKAitP zXZ!9I|CZW$u0#MQaMCmIGVYL)iJky|Uyw@~0^AYN>!&2e$Eb(%?!E#oaxP1BYDkp8f zEuRA(kkHubt>f!~ISOvhDQ`|1@Y@yn!)Z0%VB5q-Bp%s4uN(39b8qDHrUp(!AM`#? zbV2{~LkDz1Z}dVp!QLEMvjOxxSKGW<37F{-UZmTimKzcQ8>3rWiSutI?sR{_-<>Rq ztSynH=Ii8xz*+B62lVb*?{K6e9^}(?yuEco&2^3yn`0zrp)6bD-bI-yy4QSPkkIg( zifV_oZ7a9(W|8gq|1NVyR&Dvv_T}mJ&(8MK_GK*>m~cmTZddnnUw18!V{>GRe2McS zUmp%$?Q3iE4>V0MZ?UijTXE+`dk2uONS4x>LTrO$BAlK*@xg^pcrve5^^A{*$3d@< zyoQ$(hBrcopTcL)SAy?%T5A)(+wsnTM}R)VuGCw&3vQKe4E!#hK-K54N%Wc5lbSDd zn+MHH&-tFm`Jd-`pqF`{5Bj1XdYnJ%1|Ry<<@76FzzNrlG^W0=ExumQ)C4Tf14PHB zSL*e?@OSpq8nN1){&dlGz^R>zvtQ_qAyl+y`>MR#nYZ|{=UA_O=?A>+npXNhk94bV z>a&SCkT2l9c?16)Y`8F9zjO^N4&0crUc`MPFHTI^ z@ny($D^K2x*|TQBmO+me-B~bd&I%W!PP^GIXTn8Y&)!&cZ``@FPpAFZTldc1dMVHC z{Tixorkhi8UYrszbKnf`!iA0d^%7J>z#c0?1M9>R}U3@`|<4K zpP!FAV|b?K+qw>4_jh$_#kCrC#X|2>d-~ZYpnnP~=bMGbIVhrmDw-&xi#pmUq>oA(DW#KIS}CTN zYMLphn|j(QsGkz*CaG|)DQc*xPHL#Bth!q2tF4adO^l(uB9cH0>6$CAxca*5ue}2M z9kK5fd#tg>va}Vm%PyNNve7~tEVa{0d#$zEV!N%j-E#Y_x8Z_2uDIord#<_ZqPwoT z?XvrBx_8M-*SsRpd#^~o!n?1#-2(fsumM*qFu()n`$`fq;_^y9@G$TTt8F~t>I zd@;ruYrHYX9eeyS$p0aWJTl28n|w0LDXY9P%PqV7GR!f{JoCvQaC1tnub2?ZC=>Mj zv(G^TEp*UC6Wz1XLnFQP1VuCLbkk2aF~QVOL;W%I5fiPsG`*FqQm_~DTo zJ~`rkQ=a$bePeDp=ap;zIp=IQuD0ivFRnQ1rz3tk>#4imI_#;Bp7rco)6VqkxUVg` z(n+tayU|TFQ2{r2D6Dfi4kWNV^Aa%syz|c|5dHGhH(&kq**m{I_tSST!S~^N4?g+c zn-9MB>2ps$`~U5`|33Wj%Rm48>f68l{pn}FKmOAD|33f&D8T<&Z+` zeGa@|{R)V{2~x0v7QCSU)<-}02{3vd#2)`ZD8UcnuY@4PAPQ5cLKT{@h56Iq_%_Hv z3XX7q5`>=gYIwcsNdO<#AYQ^?WGfR~U;#H=Arg~_#19%Ug-28(6r(7`DN?bDR=gq> zv#7-_a0F?mQm{;`vQ)TABx*vC;4 z5|gAnrT-^W$;n7Ql9j1cpq%9lbOpzB_JXB%V5G1n$_%NFl9+iU(WKEb##C}PVmFx<;)+jSO5VC;DB|qvz_jI zCp_aR&w0|bp7y*aJrTf8eXf(A`1~h811ivg611QOJt#sGs?dcpw4n}tC`2PF(TP$t zq6C0|J}z(qAm$8>=SU|=Ln_jdlC-2IJt<04s?wFRw52Y6DNJK3)0xt=rZ&APPIIc$ zo$|D&KK&_BgDTXa&Quu}xWG6oaXca}ps7TCDpaE?)u~d|r%^BfREd^mynpnlEfTnja02GvQ*v-OpuplL@XG{9m(Q>x5g;kc8W*?DU2bxj z8{O$bx4P2REq0xo-R)|(yWIV*cf*_8kUGNvl@;l3rE6Vu?t!uCwC+{Un@$HP;|*9< zD|OYofU82`v+DhCegnMT0RywrdPlPez1chjNl1Fn8FfPFGj~mJc#4L zXkkQDBWcAeV(iwo#H2OxdsB?!6`#1pDt0l8U+h`hvUtWZ)^3b*4C5WYxW_v7v5QA6 zWbhJs$VSetjc=4%w-;P zY{5)sF{|0hT3$1p&n#y%(>cy{p0l0vY+IuuwT%Bc<0}NxT;xF?xzI&kK#~cq+csyH z(YFn)p(CwmNq0BVl%6!E3+-mvmiN3gZm%^AZlUs8mmwUai)`f>18t*#mq)FqLHoaX+N9V)~+_T zQESwV;sCEM(yIy1sb)N%8QYT?$q{~8?4PM@?Z;XXM^0)zU{`NAu5T z>vN;)oW+l3I?`|3X(zintfp?N1LC`YP>=f7{XRAUWG!qjJGJLpkGBJ`jLxz47tvK` zFIV5oUnDEN?y{XRq}Tj(wA&W%c8~Vm{|@w=$NT6smp0;W?DwMEdDDi^`J*4tc*)zA zt{L4ZIXnD{R}|6BiB9)+i5&Wo*ZlB>|E-K0?CkWyd-SHCIqXmWdfCU`^|(Fo;Tw#5 z+{d2xGxn>{RbP88*I-?EdrdBz(mrc0a z4Iq7`I{~xlR6cL1Kjq_ISI|j^_H+N)&2!|oU-+~)zUfC_e7l!^({)d?|MlPf383u> zpznzsiD};elAg`=-ulT_YMp>xz1+9yOy|YlZtb1UHQ)j!pa%wEq%B+lmK+1lUIB98 z37VjmJzxU@mj;>`0G8PNL74#BUvPEWiYec4`2b@vTxD_H4cMD`0ozw)*_V|am`xU0 z9oyFN;H=fw!@(Wcai0M=!OZ2K<6)o&x?Zz&VFT`5n7+;-%p5iCzf~ zUkt`xn~h-yf}jclA8pN*QH_(gwa5_l6$|#?-_4+Sso{I^9^Ii>@X4McqEjNKq3Yq^ zAhzKg5}+iGm+YCGm9?G%7T*5{l3>sApLaPM^5NE3y@6--oAXr}D%Mu6W!%3Fpb}=4 z3MkwHEE&YbmMsoeDb84zG2#7jU?eKq$*o@C$seN`V=>a$B5qg8aUvXI;>b1PB%+`) zg4`iixrg4I|l+%{%i!~NEQm705v zR`q+BTrsY)zo|6rvSN} zrBkb6mBIljz?qja7~*Jkr9m>_fxTnUVI_^O=u3iWH?ru4a-vyosfLQ=g3@7{)}B=& z=*SV)ieg%dIu-9JXT3G!ZLypl{uwL;n$01jTE^*c9;ZoqDg3D;?O|OoHtHj~sYvc) zK>i|cCFlPfwrL>Z<8ytgRF0-pj_PdTU_u(e4a}Glmfu{06~n1ocy`rg1!i_NDF%Mu zRs|v4E#%r|Reup=qWt3*u&Y37JbSW2J#AecmKkf}pWFDS$fXb)g*W z-DI$mC~-n(pdFQ6;Y@4s)j5i)=Xof$j@wT<>!EGoKbGZz*$^ZwI|s%TlXQ}UlyARoLa1bA8nQ2 z5F+DqMrD}->`O8xy!Pp#0_C7BnzTKo@O3F_UaFzG>vEE;esY(ks%Gx}WOA}*%MHO& zAtmF^0wuwQB_9oz6NcdKqYBHCbwACZA3x)bistX6Bo2BZHA?bQWj3u3#DFq|KtMj3OD*HY<-s zD!z7Tx6aj2+SMMi2)LOe+@h&t%4ebeCaIR?xz%m6j%ZloXWddQXac8$-d-@;-hOW8 zAl5BcUM^>frJHSH?8;|!?k3%WYGhHWJ31xpzU;?^E$b>Po5pFpz2nuIY~RZ4wm$2q z>TKK^W5<D+=8IAR;!4`;|wk(mv-v| z@~yEh-unJ-_$KbZPVVxKUKtkbywa)RlI~~L?7lMSOjd8-7Vr2%s)7ov?+UPeZtVc0 ztMUqCw|XiJ-Y$aT-3Z3+fu?28!rpX3BDc=t7^2qYRY13$(F1(c5w@@kzc38PunfmA zq1JFy*)VF|uny-i4-@JP|1b~-u@Db25aX~B@9+`#FcL4Z5(ku^8v@7?&{_pRpN_F&MA$7q>ALyD=NTu^h)S8rN|e+c6%S zF&)D(AJ1_glQEx7Aos$Y4h;V)YRwfQC$b_hG9x##BR?`EN3tYOG9_2CC0{ZoXR;=5 zGADPkCx0?1hq5S-GAWm`DW5Vbr?M)qvL@#tM+wnhnIi-|04>+DE#ERO=dv#EGB5YC zFaI(y2eU8_GcgylF(0!o>jN^|@&)8_Ge5vH@A5N8^D_VPIVkf5q}D7mvo>clE@v}1 zYjZV2b2p1~EuX_Vr}H^0Gc6D0Hm~qHk25k;vpmzYJl``u=kqvcGdt_EKmT(ux7;{d zs0D)K31}2TAGAUzG(#`6LpL-;KeR+2)j=PXMPIZ=oq$GfG)H%|M{{&WM>IquG)Y&q zNmDdRi}XWdG)t4TOSk_tOvkiL&ooU>v`sT~NvHHjTeL!JG)woiM_)8h@AORnw9^ee+$KK#HB-~k>i zwqrkbV?(xLM>b_owq##+V^_9gYj$KSwr6j4W{0+DFE(S7wrOWJW?S}Vt2Sq|wris{ zY(MsA&$epIHf`TFZs)dc@AhoBc5Az~XU8^U4|iz`w{Vj-ai2D5hqiM2_G%xuXdky^ z7x!vAwgs5>b7%iIbVGJ=Q+ISTH+UmAY=`%BSGRM=wsxDhdM`I|cXxXa_jA9ue9t$1 zx3_KIwsQyfYp3^i_cw6ow`R+BXH)idmp6Lz_GkZgfa^ADU-o)ewtq`EXXAmNwbmcFvhjTcIgZPIRIf)N>hljX~U$}~|L6c*6k|+6(1G$VRIh5ac1wi?h zcX*d$_?K(BnB#bullhp3`Ie`7ma93NuQ`+(xsjK6l8^Y2fB2KbxQttQmAiNaP&tP4 zIiTzLpAY}~j~BX>FS&;&x{Et`pyzp&J9(0)xtBY6o^$!2Lph)Kxs(sOrsp}8bNPz@ z`Hnj}rYm}9@`jfAEqmKZlzq+Gyx}V27tAn|#W4NWax}Y0+suTLI|GJ?EyPm(g zk>7ZVGx>@`Ig=OpjE_2tM|q+MnU}kb z+qsUTySksdn|HgjBl^3q`m}qvhST`9-+Q)ycnM@4L7RXM{QJNAJHYFJzyo~24?Myr zyukYe8)5V$cOyFPrS*Gyv9qs z#Do7l%fCF#$Gprhe9G5+#FMr~5B$bIe8Km8&WHTf-~7T;e9?D($y>b%jDX3DJ=pKO*<=05>%hkUyv_qX*Sme& zn?2lT{MXaG*3W#**8{@q{lN2m!{fZxTRqkjyx1qa(g*z7>%b}~zTU3_-}AlW|NG)k zzT#Itc770xBe%velE;D z>(4&y+dk}50Pe58>hHeq2S4l!Kk-w*>jOXX8-MB_|MCkz@-P4J*M9I%|Lz~Z@?Zad z_4_{aV}J8=zxQvy@rS?ok3aci|MpXV_h*0dkALyEfB1L5^A~^lgTMKgfBHlJ`zQbM z_dfeuKR{6Ul#2pEfdmm2q@XY%LWc|&GR)NwB1MA{F(y=)v0_Jy8y9MfC=z4Hk|!yi zRJoER$CesX4s@7H=0ut{ao#*=bLYfQ1r3sf32|gmkQYrRHOX_J(u*@|=9KyoDb$HT zZ$9LBwJE}yQF%TkXahi5vuDw!RlAlgS`B62!c7~uZC$%&>E5lI_bgw%f87QKJeY7{ z!-o+kR=k*TW5RwNuZ;^ia%IbQDd&AlcQb&@f-fJY+*Y$^)15o3^~-lO;?zB%VaJv| zn|5v6w{hpzy_gK?S5lx?_U~2!eo=A|>5ONlUkMcT0D} zjXw(7{jFz(W%Q4Y&fH(ZJ&g2F5Sv%g=zy^h|f3- z_lg=wRr_}td7s(8x_XU_{3ZqEZ6;Q5$XFfvT_gPAF8QqXliNe=o*`t zJ~uPBuyk;Aa&~cbbN74g9}xH^DENJ3RCG-2hmUb7scGpMnOWI6#U-U>U&|{hs~Ve{ zTUy)NJ30pjhlWQ+$Hph-7Z#V6f2^#ot?%t093CB?oSvOu%7q4C{3_NTlKo9CB2X@L zOiT<+>`S@O&|Sa-g9!7=t-DynQYzR_?QYQX_~MY!7)sQCPaX}>tkYtC!ZxSNu!?p|@kUp9y;PegzE(6&7-!c4$ z^YBb}!zDkb!PqNeiguy68s+@+1b8`SXbDZ9|~AyDshCJ zp;^=&tq~%9*cH@JK)A*V!WM?W9C;ar0_Zpek+ru_K;IR>ASIXuNU0dFk;yMR+H_t~ ze(&wsx+gfL;S`6o6)e0v4l?xmsUz64FU~%qe?Py>g~>j1bYU^zv5wWIKj~!|mI| z*;DgO$;qG7r3AA{{vl{a`@eR{?n-SB0qZe>zoYr?Y-$X()g;? zmRK6>vT-M2`YAO@NMaOl1JR zBN##c@Znp=1-EBhSvPL!`_?a}+h}2?EKHa^w8%|P31-FX`Gl0shaf8qU|VdfR*A8H zX_*83-L5N-YX&{p?Hr74`;>0`lidfR@!7IlrD(1egpV@sQ6cZ=CY|qq8k+jPAWq3b zVySzA<6!4h-cx4Ms1^l$dv>YDDByKuGwcxZ`ybc^g3rUU`;h9cGc%b7lt#PnPs5OY zCv_*^jo>nnpIXlhW;j2Wq%=P}@_FEN$nJyGbO zg(Nm2g7tZyAkW@j8U`e>@?V+-&jt<3YX%CqQ)D4#c!u=eFGajD9dqkml&kxv#|hOe z@z^g|UZ$H4P5gb#|0o2GrDoj(u?~gC^H7a999Ry6dnkbN%kwujO!uiI zPyp0rW9?qk)0zHz$7LRIbw5dUDdXEeBsTHNAcVFbEF4swd9?0y=FV8gjASR>W%OFl z;J2$V{klTi8W`Bbx!|jjH}eApz!|^MgEkMbgE$}uZJr$1X8226=V^b!uz|v@CcwdO zM=3}9k?61t@y4)$BKPH60TTz5YTSh@o0=x7@B9LVQXMoMWV9W|hU!y7tPP`FR&?D=6JXroEcoP$}C z2_=k9k;BlAZ2@xRe8L%L1-L`8-KP?-p3!RY@+8!mSHz3r9h2F623 z4L?EMo^+!G%}lIFo(nlH3gF;`x{n_++Cp#W6T?M@YmDYK-*>f;YZiUVZW}XYZifmZ zcf>A!)HE5M8ZXr0^NMdrb9`NgEiR0nWFIX?oCcp=Ljf(Zx#H(A^h1ei*;z^hpV7*3kkG(OGRoy^TGw9__SX=2zS2*Mm zZJjTlrz~_&W-0{MQ|tbw5!wmHpp%QZp? z3!c>O@gHYjOxoUxq*aG~Q?8CL-w3m;GNL@XuYRi215W%gvyYkOmky3Nw@`|j>N00K zui=KJ3r*|j(*BLQmsY}LJvubcLi|2JYWzL0+U8G3)l*3;K(6GCx4@}0Pxip+4-hJ*cconr-SN?GC|g=*YaAEAybYkGDY z&hN!w9BX*_#lkc63=Wf6rLAsx8aVQX9K$0~E^>F7;&wQP@bMB~#N5SMHtk`26i-I8 zk3I4^xD#pBo*px!p(y`4_DR^09u*xV3((T*gZVRy0~>1^N9NU52$&Z0q(mE+bsCw? zbB-lspP=`%DK}3qM57||lWW_&g2cyL16;54iuJo-)!()ECp{Yu2wylDK%u@VaZPO% z9xh|w`#--QxN3d-bs8)ypmvtdgk@4JH_a{-2yaFDi{Lr&2hOki(JM}3(Uqu1?=!3A00SE%n zjL8I*Be(Z_4h5WF&6|cGC3S5O?$V%oUjwQ^<-$(lhQlnAj;~jM&vwhJ6#lhkxD1(3 z=~Iy*P41eZL;(>g8_0$=&_^gb9ksh!A!XkdUSMY;L-|L2l$ESnn^6F1_z7r6zFhxB z?LgJ*_^))dN&%T-bV7ATxmyi6!`21O3kmXGZAj`uZ2MdroFy@sEq}SIK|GEQujHXh zfN;I4D?E2H>iyI~luZh!icX;RD{WlKo@-Hfx)Rq*&`QI0oU&up{R@f5+Sn*4qA7-e zW~ZIR>6VAtSWq+iE$nm>=Ulo&SYXmVD+-ugZbOFf4;`-QmpApEk)VLBebE2PQm$Jq zv7-Po!@QFkb+VvMfN!T%F4sV&ZYyHC{fTwk z@-?YzSayD4Bf0v$b{@~1Zhv<^(PDWAvBkZ%My+YBlC7*_$J0rg`20p&`L%6kxQ20#X(Wxdo8djzn#t&1QBo zBwD?RS-&k5G_`+@AuaxePfEruRgnSQ_EA9FaW)EgiW`Q&8;S;(>R+4NDo9FBP)&64 ze()zzt|t-?7rf*U6SebJjLVF3Sk9VJS}bRV=sOm&?h&>?$z4OlTqtw(^aSL_x3Hr= z911NVnZCW<1+8;%$|`I~9Cm1t?J2dp(wKOAYV`aOt0=6irEWV7-iE!0Rr5l5PlwRq zedQCw5sGx&`K4VtiV-&7>oQSmO4Xpbimgy1^Jvek>p zi?)OG85A&M)8ivH=lOIH?F=rwpaD6WM*-tn@25PU?8!zi)GArwRvIbCY|Fjtdi<7& zpTo9dJ*K)kaQULo_8YDC&JhSpk~rytUaZCC zw)R z&lv_r^;o>|ewyFC>xOc+FEMT|TcGVDpZB;vU$;wS?OPvI=CiwpguGtgNb`sT8QTHR=PwUP6Vd}W$>Hjk~?6m|9N==Js3dWa}l1{50= zyG!xrT#!z=J;DVax)qEQZSAIhH7jA~aL{`82UNu^f1`!9gBI5&7IJ=PRL$tN(P{CK zS+(*Uh-EZoJ`RABF`Nn-JlsM=x&gk!-`W7FoaO+>QLTgm)#-$S+qD=C`%E@z0x8TV zzFJwy1j~%5fI1;S{B^l>_6wXc})S>b$%*`{%V=rw;Q)D1hq( zc1j}%8g&Cx2FKJ%b)?xzU`e3yci22Ws0{PQG|XMqWYQ1GC5$FPgr@UjtH;GGlg26) zLLkN;c6w3`5}~!a)0};$-5>cV0H6AW_pIA72cjlY)WPQDtJ6g_QeUetA?tV;`Vq0X z6lNTS0{rdf>(2go68d-fSbn*8;n;T5K54f=3>=%p?_-qvo4i_DGPDz!2hbW}hOs4Z zwECdG`eUZP9$FJLNtJjQNC>`T4)2^?GQ0f-h%aJJCQX%jl*wwrY#*gM4q6P=JHOD^ zU>5X@f9^&HhCq}|>IR}x2#Cu0KY{PJ9T%MP!UwQJG)Z!EkYgV#qkyn2HDn_>Em_-D~uLm%b49Qu)98!Iukde7)1-kpeHO&rDHi%3BDHzDU z@9rrhuW7u3|$bLU>>=P{78$KOiy&O!@MA9;e#O{x$c^%j1~ zKHFNLhoFb*cu?0^lq1YV7c|V6$mqP9H+SRo@vf_B&EpaJl73!=3l9V2FmebvX!P|A zVs`MYgUqvLdvb6DRHa7ag`{(QaMh(5&1+Z3I!`y*QZR@u#Drx(MFG*c8~oLuKnedC zn##|kHdawy*M==i9mUOlfAM=~u*<&W;7k&iwtN)%PzFgFOd(Hqhmo3^HgF&&cIXSZ zs{JpqNwA~dyNhHU&IwpOOA>iMGDZox+{=lN}BUqe}q30&Gtn%q{c9m0I~9bedz zOh0U6*sAOMs#;pgS}x6)NJ+w3LQgOVi5RypIp^i^e-!VmTb#LsuUh_NKl`>YC}cOq zoh7BVspl8DL{%P`6RklAp-bTVFaLF67wFBez=MsEbzIQHrv(|`?$zy?uJ?K;)xQGO z`Ymrd2(okKK$l8xbn&o?a^-JrKzaa-iBiMb!}4LHpHKkXD!v0!2z&-|xx59D8jnxI z(~FikPr^ouYrt;kw@xnnP{2LXoLN_dCHoutD&M&T!RAH0Z)L~C`u)XXIMsaLJrJ_n zvpQkQ@{8z8xs0zJuyYnGUyh$-g>L3N4`8Jfraf23#iUxYO?IT=U01Z~$kcsUk{6!q zwn-`7BTpl|6=D}`5~mU6_v1~#0bX@<=a*-99?-4CtsL`{tJzp67tsc~h)>4{EJZRp z)ZDM~X(rr3Yqd;_u(0sRm{xEjvG3*>cpDIIQgHR=as8U^NQ~z3-BRa%oc^&Az@cO3 zvA+2TIj>l;)B*IoJ5L>YQBO@-XvQvk{x(t2ndg~YtQxaYt%*P15G$+X=G@&jrl|E; zmzd*jl7YN@%iFG7$prQeT{r3tMKo8-d`&Gh?@*)=QNwHDNlUz2=cdIV;x~#ll{$+It8#h1!Ql%)I#x3jFs? z<5=!T?DMoLFBsWa_KR@(Fk!zvpoUB zwqW2mm<0_7FJ1ZG9jh4SKtr1Q>gshGDn3^ZKVTdbcGuZ{*oSI&FEa5Dvg9X%%>`dgdA8Oxdu3q6wt=6(0P$PIXH;zK+p(+8#6gLvmxlhk~W}l3>2`r zdi-H_S*j}lX$-j#R0kagS1thl!Jb?zs#`iV38Y49~hR*jhUA2K98_* zAPMiLDGm%B#mnpq(>)kmOd^4tdH*&SRkP@)eyXENX;o3-?IXQ6WrMl7Z$(8$M6T(B zHa@TOnue-8jgXnl!#)=?a}gEEITb^liQkc+`oBVriyJ11!0(GC( zvu+QO0~@=yhytYfZ_EDF@`!>3yuG9^e%wEWd`5507oJga)os!yo%4+v8456;<^@)O zu_7PPl_jHt2d48lRo#htQfJLEB*-OFnmYG$J?YS$?)e@<=s`~{y3OfHETR>uF|P^T zCjg;U0V~|!UD%s4!Hr)ckclmCExpqPvRhOLTfJ(70(=8eKq$D9aVbxbKA1WJ}w7% z?U6*K`cWN$6|-S~3?jd!n!+R~m9PdqM2l|DsV&`dlzsOEDFeOmthi)@U-^@+n3x%Q4^2x5%O~~6}-Vr&I`d=>k zv6|l=skOs%pwtd?);7qRC8UPRFYrJSga_}VzwMn5>WS*%R?O{fY#AB%ogG!i#cicV zSx3HMv<|X0rWX*dUm2V!y;!KcmT$77rhC0^+#v&zDVDX!{!YnV&CKea)3bs$ zHN9faFKxDB*0@`v(0qHj>my-m+5O7Kx$jh1!>^cvjC;A&t1as!8@#wfI8iu)8@_YH z@+FPEBtzHQVYh1KJDI9)#JzZetU_dSc>x+yKHs#diNZGG3iBkIb0o~H5SA<}?{uDr zLL3?!OlV(+gbh@F`Xt$BMgFqj<cKvzd~wcOZ*_+ovvhPdUbttg^sdC& zInVwK!qdzYFtC0Q zSn%Jh@O)k48Rf-sRvyLTz|4y$KO~i|Ge{` z=D~wPu7x|IA?#G;ZFUPuTkx%E%r7Ww#jSn@MM#%4F&uKDOmWF!?Q6n^KnpviMMDFFj^&%3Bc z(`zOfD>$xmHC3QvmpOJJ^S!O%UOPtaR#G>zHG^>2zr0G_KKEj6(74X#lamUd^=XFt zy|u265YEUqw(pYPbIzW#PP{q#dGOPgmT0Q^mmbfi0VQXoQK%~{xQ1b&n|NIz?E2Vr zrE&M<-DoN#7O#<#r#Sd(l##Q)kdMzkt`mZf+nez6cnF6m&p-xonZC*a(^q2EP2V7= zf!qO(+16s#rdLYFoBNt4?2GQR2o%`$V7r51tJe+-MhLea*ZN0nO3s)en z3i>Oz_X!E11Wv;wciV#aFLq8NEKVFc3t%uDwj2ip5`b^aa=@XB36S~YVll;qn}u{_ zN@SyEaT{&$BOz!{tv$P|b&7U!5PhikI=w%}E~)>bZy1J^D~MS@3#sl6lK-Vk^Orqk z`IYSbhn31uYWwU)`6`Z)d~KYOeZV+mc!uu=LVsrA7&bMsTd}Tj`v=6P+m~zIizrXY9u#UA||iI z$j)?RcqgaC9{%cw+4~+udc?NuCwAX>Y}1!KqK6%%xAVLA8F~2fgSHYTy*j@gl#514 zp4pmfM-N|}pDs1i3ejN5wOCl${ILz?6ZmcxI`eTY?q%qWj0C)5WxoAKZy8EMI0RGPv+|=0MZ(kMX{!zzr1`H ztLxXgbG^)z=!R`kp0PPyam0ZcQ(m4+oPTSY>bCh0f3=Qxf@c^v8^l~F(vMyrewoIK z-gcxm3CB`~>FUFhbeA(nxC&mpSF3@RZ^R`hZ8hhjBx8lt&(=6Ku=y~OSbEkc;$@(;|C-wl>?-; z8ie+s(d<1*{>5k>2zrXc`!jBw*14@VrIQduY4y;4`ESTI3=|cQdxR2h-(Czieo1L7TRWxQ_ zYbRT{qPli5*msA%rn(h|1%G~`I@^?$be%xSBs?e(e&rpM!SE%B?35iQ#rPM+$yW=CY% z8g$pkH5M6c2u3fwW5Gm99;VBo7Dxwhu!CT$=^IHWU;$7LbRO8_Mv&AxAQ2#HCY7W? z*oS+{@1|8v&x8qN#l()3loG^BcWIgmooVmNn1Z1zv<0JcR`(Uexe=1BhFxVB63m}O zv3?vQ;1ZWA<3TU)#rA^udK<@g*OfwD;Jjhnq%5L8x-a!<)Do?@)nAJVMt0>I>quPP z8TeP;G8KnCW$J7{3w9NwndM$$X99iF@nvN7kjSuAWlv$r))+~mowubZUp?>!;=FmI zO{y%XW%$s!SdIt;DdEIL_bZ=atF2z)tUVOG&R758RgoNVsK2(e@FM@FVyod+4^Ac( zpTqHhA&vA&WYv^g@l}S$%dZ^5Vz+%mo($^#`%5e>^bCb9bPXpSL4Do9Sbz@Mzo)L1JJetR z>@66O9F0%vth%A@WX;2A{IF^;(}IlZLx@s;{2lDR^FgKVyZnx>9$t$_IS1Oh`t6K~ zBdj#;yva{R32263g#=wh22DF|Zl<8-XNL9rIUtgmJt zdo8RTbIQr~I&)8?3+z-m?xtdC58W|H_HV_`9H?agyZQu1S*^8@=Wl-{3fG#zAbj#y zkj8n*fUtg5C;du4Vs=;NNTjxx@8b5iXHc`SyABi+63UtBJfgS{bS6Z#!`mh7v#?~` zZQyJOLFB86Lc|d{3ZO~_d;b9a!I^^sX6;~mRYnu8UVO08?=ZF8se|apIi%`Wz#`v) zWD&1V`iRf@w|LD&JMmUVs_~^81V+By?Js>(rYJc%(_qJ#UXxOvY)~P3MykOb#Fin~q`jgqK~7@=w7*)ifx8)nX%G4Lcx7z5jaa$ZVh|r`3F6 znODrh4+s0so>9EC&{P0Fnc%29v~93Z;XUirBw;{;@WF%GEg8C1R;v!t-^t>&xPL-X z|8038MsF+Le1PiZH#&)V|&;dws--O`Y--%$N)=D z*$#dhk%dlq(;%jMA$!z!Xj^M@tWF-Jz7k0v$t?U9=P7LMz*l^4)P7=yoXE{=nXAb+ zOzCK~Ezv4=u!hhkjl;q$cRix<9i&4?ot-vQ%pq=_bs!|RneXktu96sy1+nk#ZS2U% z(UCv0I3|lUSt&jg+~+~CDVt?OQsY01nX0;eEtJsm*Zw)u{A(6fFFkHjU14==WK?mtVM~xLo19dg1)HS}UuM)*tERsn6JQ8T{$4aAdx6NAT*A$Eu9$%G=T1R-@+D zWcI-xn$wW0t0|YKU2kT46DMQ7G=-*;kl%)VP^g;r)cKPz1MAC(PXwW?nl~oN&|6YN zP3fZTmwelRxv+FOiD>6X7uK`f#}+qpvJcR?Xx!EvdIF{d97t>7&6 z9y5G@pl+u*32p%0+?2Cv!5+zW5jPIDax7$q5R2XWuA%jSqgFEbQ5ZZ(2?Q}g3(9B9 zU~F)j4&!ljOS&zDH3&Cp0JoI{xy z*2~F`go8r0^dl19AdP+7ys?;}|x z%D^ZFT&2ZL?r)v_!#tQhgT?Mi9pxDGYNyo1!|(rPR$LN^M!1}I2&7@o5sX`mZyM?D z!3(+~-YW~`Sv}3^A4w^4+_;x|qEjtHla+^m@3+{}{~~jdmNojP?MN%M58LfG2aA9D zOn3g4dtDL(bAErVTKpH)PHelmf2nI4vH3kkOVD}T|AN!Ex*YRT;2MURAwF7$;an2mwLPS)`5Y$Sg6w{9basA^w83Fyjac@F-dT0c z&gToFvt8-Joa~@_8q_3emc4W_5;&QM0#^2Ck;uZ+{^N~5dgf<~X5ZE-OKedTX!Kxh zfN`b@6y1Ly79c67EFCup`oWiA&@!nDc5z;}C^xwYhG-#ROzz|e)>=Sar1_{vf5653 za3^pxdmeqeLB)m(mzNstqG@Qv<_(hjRIX`qfxJzn%IM74^dRJ%{37)z&swy;3ieKn zAjl!hNa}5FLa4Ry-J}`Xhh8J+qw%g`b4I~@p@SBQnuRZGkSohkgpT4cbgN}{7%^C~ z#R|tcSCFr<){-q6(!>fesE>TpE9$}`gs@xz2Q_8WSyLi?DwC!>j}XzTm2SR1Rhwtc zq2Q@5yDY^KS&%PN2I4wTu^nR_?3DV_T3)4=oWA89UTebyO zLnXw+9nKbNd+U>M^TY@|$DG9Pt6M>C;x$68_ z;Rd1j0Mj(JGB3}qBVb5?lF|0L!m9!31nrYo_l!H9Nlgw46+lfjUO0(_dqi6oY%jf^ z8R=^PV)R10mzVHEhwzTXk)>n&xogueiCJkGBzjr&;$KXcmVewB8u>2bReD2??G`?T zeVw3-u+2ycvaR$0USDkd{lK&E&Y1@as4dU1tc}(fO0rNq!dXj9+@JmZNt~>F5 zJ5g;8IiYjT^%u@&W_pxKP8RVjn5DoEC{#PO0aK{@kfS|2u;wwbrnpvlz78Jb4Aubt z_Lum{Q@bP4FHovOkqMTRnj1s^Tj#UURJ`IQkzOD}u2VpgGF;F4?#{x9B>mpD_?99? z-gvM~GjxcjJUrg79qJwVUav?XW<}=M%I56Y4vkG{@p2Ym z4n?wC4++2Yi)vd8Q;FsvE`0tKG!f>|9WY$v5BY2339Rv_C&0RczjWvE;Dq2G9|sNI z?|uE-$02-5p&BapQgX6ZNuH<Z?G{PhvapYMMurO;_kD9`bo+j^^l{wwGBuWl>+~vfG!2W?z58c+8Q^cPb2xT>tmD)HTe5t#OhR z37n=&9o$-58yr&6w2OKXchMNH>tddrrTgtu#yat&!FPrWVR|O}7QKs3`S$G8p+fqI z&y{5%53FlWo9)en*3;qvw~c`m4|aBah`DARLa(YKtEhz_tsmZ@Y?3*EL^lgc1j_yL;;^}jm^qAca-tUp3ZyNndgl=NY7V6sT>TymHcV)@*=#_}EthF-_ z7rht$tR6jDdm7p*9-6~O_*D=&TQaW#*^4`DbC~UKRKbaswV}=9$D))kSmu(?chGJT{E~3SSwn1kp!02eJ<36TocFH1Sj_Kzp1GEePw~? zw?5$N*GCU#Ntpv5B)=@msJKF+`rL=?H!KcbO8$QvkIy@HKb({wvDqmYAHgYI;!+iG zK$adqRA((j%!u%M3k593!W95&z!0j;Sm1 z`xia32leA%7`_ZX!2(r(CP<}Yc{GjWpiQ42^y78V_K_Bn?0Df1|hn?gYxxM zF1?$b#c#{P^rpV09;DizhxOD%UND0+KMB|7;*bT|F<7_KIEh4DOUmbJzHk~lswvEZ zuU_6<$oS_)XtCqY*)9mC>h6W*b2S|K|7Z7YjT(>a|C{fXD6PgWH~9DC`{_Q{t=_)T z_b`*Sy5B^#bk!Z07;Po1GkYcEGSOSz^|<%D>DtT#J#shM6s{JyKpZ-59 zG}GB5Z`#3ijpwx#c_iTh>uSd642NWGoc_rDxu)gU`ZK#3j8mA+9MdpE$+!3GGe?Ui z#L4)yeN)6j;$FE$-Js*e>yHDG;}-0Z$ZVh!C*#9g7uF>WKdgJYn<(1c7d)Z63XLKH z9bEIV8WoThN869bQf(`7hwd!sRg+R?nH^X1h@`HyP8tWHXD#+=C0P7}%Ld&D=*%e( z4A_y9BhaHA2=WzsBzdv$9u>X+@V9KN+S`cGva)9TNsim5A6nG|n8gX)ho#fj?bbuH z+DJ>^&g8V#b@J^S1Yf_GII!<{PTj#>8j$ zD0sXk!#)=zMot8Oe7IEZkn)qK(Rw^|h+w)QknUw|c9Vir>@zgMUc@`dslZjq!$Z*? z1;|Hkoj_|!*$=#TkJ@{eWZVdYzu}(tOqf*%*cD_NY}`74ks2qp#~t0;P5mYckuo=A z91v5)Z$1}apKh|@j%Iw5#tV=y{(TS!oAt11iiFGCjeE0^tTlbc5_Qw^1oIF)im_X`HU#mMf4A^W-Y1DWty>;buW;#*D$t81Iyk zN~>4ISoyMj%TqCPruvQQ2~iBe&Wp|*byHvF&za+=!99r2*QGo)^E6d9MhG3*`U2wv2Kp}H2wy2AMy13Z7 zxij-L%1r!>T1#ram&Z%^=<3_py66H@ZA5!7&bL_>?7gfgjsjNM)>^~e9r!ykqoWNw z_+k#fM^oLtkPd7#zwO{%PT=14HBa-yhhRN9pR@n}&K0_mKj9d{KX6Pfo6YI*CuC*$ zKL1Z3`FAkPl2Y;R35X^y5hcdwpX|ry0uPXhVEn;EYRx@a4jH~U34=f85b$p^lYJk= zZXBj#kd!{g{FKn4~)H-`6u95HMB0cha@AmR?LWmaGK^t){RRqNqX^e2WiX5Xc z@5|lz_l-^|>*n=IEtJ%(8Di2HmnLK$VTAbD4Ze#iv$`MzfBgUlOfO<+p@5*&pPPAz zP^DJyPJ`eNw0?;-PO0XT#0@S7#z)@N?efD;h;&NO;Ap+sOk13fn!-w4SR_H)Zv?Td%-F{_)3o3~LkBo@(~ z3W?UN8MNL}%(g4vJmBag!6QZkATO=-eje!G#+{L^u}RzG6%s0^Xdv6R>FxP-x2X9Q zfOWW?AcQ>`SCN$!{N}TUB$}Kgw$#6qMK=ElD2zt#3=LRk)YyfreF=?sf*eEMfUuT` z9Env37Q^b&Li(NF!ic2ZJWNSzh@{v|^sza+a4cWWpZHr#FZ|oeI$ej^p=5d3dcDJO!IKTb_HNa zu38*phMQSq1;*$hl&jErf_~_Ve=icN7tEOqz#3^%V7<6WaQq|`@HXfC&;$E!!cW{? zr|P9&#u{%2e>~Uwc3+Aav+!taI?v`gljD50`CNYCO8R?XBC!4vUtS(Bf(@BfhNFB> zgXi+#q6_R+R2~jbOmUr1o_R$PP+i<;h8_}LmKg6K8^bo0uY(Ip0vtX@-Nm(S^VIdj z3-4Q-Pi$xZ`aB8j>1iO+_ZZy&hXTg59AqF`nPY~nSaJcS7x}yo%(=<<6Si-E$g`A7r(ivI2a*Jhe3BEovi)0Tqf7bh0>l7d&V--)7%LQ1$G3!V zuv9+v0z(#@Hkm*qSpEJO)96Jh2+pg2OKbe`{7~H(r62;G1G)=V#fNR;Qz3oHAg6dq z^O8hf#e+!it=}>*kiR^x3U1sppFP9@H%`_6Qj7kD&*mIc8#V{l#{)9Qe-4Q6r!Eu_ zzNxT2`X=yaZX)3<|5u{_AG{+hu!NX7xHO4PJB=PU<(`lb3eZ|J;I_=RPG37#rQm$Q z`NaN?E{ldd?ML+dKD6I{73Kf1^T*^Xzq$HLcmLmYpuKNr2#pW%$4qLhIQ8`AqI+k4 zC^|?TOhfnyr=5Y}bfch(qVyB=vd}^~9oA0UhOAu#?V-$5!`zI5keu!|{-*I>v7lFGgkY(u2V~p4gJ+nGgkk!|8okiuC&b4YaOPEcsh6bJE?-y@MYS_EG(CO;cr7^#&M2 zIQ5hq8W?bid>HzJsBt%%xQ*6OMlqzv?+2A{dpmuVIH7h;UG>l=Ka2HZ)t8Sdanm1C zbLnhlZKYPQd<0!Q?3Az>_?LGTVa8dLvz9J#IeFzxFSD&}nTz5C!e57`VxK&DMJ_aV z5I3ewxz283og<%F&sW`XJ7Dap{6`(>Xs){U*bt&1$py9?o%i%ICP+Jbb`^;YO=jnz%5W5zaS28hI6W@zsTn zFpZKITUu3H(8O6^ZAFCBD(B*x2Mr`tUf)CPI+s@7W%!8m1TVaR8{}CE%BN3^jpM5@ zR_ILVP_?Vz=9t)B&9t_=_r*0*jvVx)eIovMlvIZ&ZTNV)EwD)ZC|-2aevq9*;#=m zdQ6Gt^5teOVb4TOzD~bR2wcWy5Z|?A52Ia=Gf2~#Nc?DZtQRW0Gf#HFE7Txa^i|Gi z1}~xoaKA#@kQsVpR5We&khu5io1o7DFcyaQH=|7m9vU;U2xCHY*E|Jtzm})oUMwpS zc9s?B(UDM<+XK|kvngm;Fbw|^Cr1AdKjRdrmz4%5zklq5Jy4U_$iB)qsckaA|CG@- z<<13O{Pkqo;f>dG&qMYGinA4!?U}D|_S|ULHGnnBIXT=8UAm44Wx%(?kc&CETHkBP zUY>Y!o9;bDst$d`LyyVO~8Yh0)4T#P(XmB7AB7b zdd0kNI&ZH*Bt`Mqzl@yw*eOzYhM{L_CGkTqKc58g0OyYXfZhY=k{a96?@vSHuWx24 z4Kp(f(e&oq^h;7VOI+!v3ys`7Au9;PFLSt8?7}=@exMYNJZ)mm&PVoBU_%@a`i~@l;+?(7QM2< zyvp``cO>3PYe+1XS8UCGw$}QaCF|pP2OsBni0%aV=o$QW`lFT+in$NHj7X>2rZzf@ zyLCUXg@h<*gtFBd&+luNOthI5U(LS~nNkTf3VPPUCSSTA=s7;da%zn*bGnjuAUGfcb)K(tp}yOtLS|N&94gd5no=@6yIU!eAX`2BaQXU#Yy-_6(tKs zobr`5B7HeN+wLj~Lsy~|=U@tm^Fnq!;36O=9oN_=ml^k@dG^4H3bjxaU zDzqZfBhu`57>c@h?=D4dfNSGFRg}Nphf@GsoKU@rd5bPA-Hj{3GSb4?zKefRo)Kre zb^oROWBH5W4K1U+4Z+LbQtRRihZVhmZcJR}tzKSZQ+1&ffKz8ch^Hmr_dFALc)|E_ ziK?cN_u$4m_8Q?f(#BIYq-4jjGqnTh!qK5$tlp zu{WQfosCZ33Y?dXSmfkY@8c*7GF6noZER$$jWB*D#X#eg8wc(m`5)z+XEYmZ8^@!R zqGpXM;?dYOTBJr%rDF84cho2hDiJ6LY~?QLg(EO><&P4#6r)H=Gmb>ZK_vjkhCPSW9iwIzN7H5C_VUjxkH?} z=tzby-}5tLP=fBWcb1?1lYlTZ<6S}(WAzt>w2~cj#jSy;){yzy_{|6+B=AklZAbnF zi?SR(mhCTLIA}h8l0*svq4_^xR)T??kY6FHkQm>!er* z_S!M?-nehvE>Wu^sq54l6XD!Q(-fSYdEcvAoD(3MC->0ArMLde;}=;>k%yx;!cY=F zn_S(Mzk0JzD(eANb@7fp0ec_;3}iy%tm?gS&PN#+{}50fYL+jek~Nl?X5jgtp9Mt^ zoVT29^WFcGDHQ#Ws7>i{%ov z?a?pCxCXuuaf{+o`w3P(G8=_| z$^b9@>$KHGz}=&?ev{+5!AqGM8>g00dFAdg_Ac$MZLo=*%Nv~Vec4H~CJOW|I-&tL zuB_37b z;r9{`4A2imlWTlp%74k6kD_sVYGQ*g7WCVf8rT%pm-xs|O_5^GXHGkO6-8Xm2I^V;{Tps0yg$V&5z^^fABw-lPt7JvNWLy? zr=|K*bVQ84MoEe=#+PP=TVnEkve_4_|4{nCUrI$a*MwVhZs0Jt534(#jlO(s2`(|d z1*T&L>}%s>S#4!c)y-E@n$ooB zZH3yE3@$>~7a##jtR5E)$}5$&3NITSy_cbI#47wG>PxkGr})A;JKi#yUaOsGAsQ=Q ztHIJH*ua8QORZ6k+#Ei3Gj4m=)R<*RZ5YZF;z&CC#?>YO}(0;wznn}q2^qXRpwOMmSI_#N5WPTSl zM1ij(w{0Q9PK;0h6O_973X6USk#sbdLdTGBC_Q^{m0f||AMULDDrZJ%hx-VHoWogDnth<3%8SFHX+NvO}91MCg;*;ci zi{x9Ut|?zzpY-C|txp}TlI3h33oikKBXv_hhKR;QsX}KCJ9J=93O-tj%@?(M#5OVG zs3XqefL9o;CwFg?dTgAN#_Bl0Cq)!0ZndD6WOC%3pG>N^Vv!u>Jls*NV=|mdnnZwa z+C}A}gMn@AIRGYgklCtV8vR1lo%{HBw(p1Enb2Z-!doqvl?CX z)ff?BRWYs8)1$9_6%7cqwJjY~y3pT4L^?jF3QQ+i+*FX+;6M6UfdOdoIWltP}Hzi7%&id+&fdgzsnP3W#94%0N(EIf!i;oj?3uPi|S z&d3goRV0v^Frofh8xZ(YjA;*@B1KZIRozu7|5ohc==Ci3tao_0b#=6NOPC|bt{xkG zxkU7o%&6_Q3X^B$XBeMy#MGOa=56JE&TrXW)#0;H%6d+dmW?L`-PSdEK5GW$&BcZ< zRn?Y$E9)4C16T z05@2sQRt4v34g`~8mnty#D>#~xMlqi1Ew9H$XtDHZew?(SbODNb&2*9JwtO*d>T($XVem32ES$pI)kOw_qVDGcMwF)o z28drAAr}n_Xzp4`U`uP)wt`)c>*wxVxTWXobt973H(@~TDg{g)5p0*egwnpy;~BOV zSd&@iwsv%4>NEaUO92&4Z%_Lqo7a|Y2t))F1!DGSeuY^9gaCSM$A@seCj7TVM|B%2 zU-1^=O|ai&wY_~C9A=X`uJ5i%m+o{6x@p?d{fROr{)-*Fp2&g)sE7N{mI^d@9WGY` z5H~Rs3I20XTuKeu2yVNG`U^19XYmq7vB&pYJZ7l%fiLGrRR(5vzYnA{-|u1nPD~!} zM5G|RPJ=eeOfPMEBt?}$-REblm0g1_`#s_n3buO<>S7~(v`ug2IJ-WZsCQq*vdVIM z9!IaEEUaT-oC?+*Xulswfot6=>A7Ktl~8xao43}$g5x@bWXlwu-7PK>+Rb4i`k9 zMD8yA*6?`i^9;$QxEun)l>wm@EVd9`ugD@^DM%-&lx81uKQ15Xu4Jw%U(h;#{V%|V z5H^CIfAw&*q_v(Dd|!IgbLR6*vDM4K_u_NW*?PnDP}3!zsM+Amkn%A^n7E}3JsgoW zsIlvVI+1Q7ne6Wc(-zZYS^jxI28v94zW#1<*A6akY5HX3Q&tVNZU`cj7wZ#Zau#^l z&{~)8)UMS^+HPE0D0Q0RPb+<=eu6JI{l#UM+-+35^Q#xc7ne+wwb@W6Kxi;e-CdT% zumRw6=G}6*ihx3aEPY5^rK?l#=PiNRj57l;F3*V59H&F6$AoJqUe}S;3U5^2sCHVB zWDZ9PU)xgG@PvLs&fKanM?eO^ttkw1n3V$IVy-uQ8vQn6^>j-web@~P_70^D!sT-Y z3_&h}W|Gz0W&SeJs^M#>e82fi=*6@+a@jgYzZ~tWT=Wd1vJDb*i@U;{{m)Ehk9aDi z4dGK=@2`V04~kBDwkrDnjmY}14)RYbKQW1Ewv&|@C;o@)^H2KP|HB!nzh?dgACGjP literal 0 HcmV?d00001 diff --git a/spytest/Doc/topo.png b/spytest/Doc/topo.png new file mode 100755 index 0000000000000000000000000000000000000000..47ae8f9360291cb2715b4a72514672b0b5bfee6c GIT binary patch literal 18747 zcmeHvbzD^KzAp*_N~z#Tqllo$2q>M3Qj!8njDQ0w&4ARaVt4(8v+8#YV3bU>R`!_fQzSX zrL}C;VNYxwpidtY$Uc4i*wV%rW~+CP6nI3gT>kb=4Y=N1B=qaWZb$S&(pxFX!aNqm z%NnFQN2N6qRny26h&D&s)Pmi#P8`30NwOQC<{-aUU_V$ud zk;8n$z{eFajN=}rVbI~eEI=rP3Lo@di`aGh6cFB%PPhY-E;KM7p=Xe#2scobQa6eUA?{>59> zVKDY8U*aMN`153#8l3C}a6#!Z@)&UW#U~onQQ)$a`U#jv%L@@9F_B{Hx!bU!EK{cH z;h^Dh-K%=j>Fu2>ld#ZxmQm#`+RQfOG8|E*j%&S>o?g^bRrzUkjO$Nyi+a$=zR5L0 zckkB38T;3qPM$wf`k*AR$Ja?4rK&$grUyB(<&e;&aE^$2(@VAE-&U5@-{owq)AW{O zY8~9aS-YLL27Jb{XUr}Nz>JhCc<0>5 zE%y9Nk{t38kn z*FLbxqHJwcp&(bMDFgX!p49upJ^oqRW*c%<&{caimIMcpHHB)Dj|rxdb#-TBwY?BG zjHQ^myZha`^lcg-zM{`Q)etwb_t*@!(TC~wW_42tW^K6^&&SL&?dnp@)@9hhGeq9K z37_P)LrS}E(w=QJiX8>94H5L@ zDYPp6Hj#cq?inB}rR12bSE-^7?ae!N`O^XeOQ3t6z1fKM8Rt%OkvXIxX%{%P{aOE> zMpio#w%fx$YPxS9xz$6AG1OnyiA3i*PZr9Ly7oy{-4TcIpe$$93-3m2G`5KNZH=Ne zt=MLgI!gpMBtF?}t%+2(+FGv~3KnMHbw4ik`2}{r7FV|h=kEzJWS!~ETJLR{>swdP z&sp0psB7UrBBN)v8kW=D(OA$4yV=(`GhV%IBKU(b!EA?+^5Ofw>iNs7Aj05_j17U~ zsMmJ)%4^ssp=TJq2kUchm+p3ZKJ3l2VR&v@UuUD~NoKY=8u0cuprT=_4D^r6Z;|K< zo43gnT&Gy8(sAQRWA2Yc)D3-nDPvKNK}v7;h-EjXOZb}FU>=%Fma1Fo(VMLsrZzuz zg0-seKM3=>8&2iq<8QLKu20fYs_FXtri*yzl~;8ppbZB-)%mj1GE2g_W~bja zYtIcCbvEj0zdDJEqhe$%Dc=-WL`kHq+rGq3966|Kp#f)OkomMNlP|E5w3T>5@(A(S zF8Ur}sdqT(dHHpjB+zw~rfTb9iq6A$`=XpnQ%!T$_P4XcSDmf;$x6_%-3)yf^E(TB z^(GePk|bs)I&Z>SBTVj)F^ajSWyfPwkm6aHJDw@y3-v5Ja$d66#H!X#?1t1%g1buQ z=*m!13yHBS(sHkHn#y~H2PHJajj;=7)JmFf^c+m?s=y36am?{{sOVCgqSLx3r$dUP zDkKA&+@>>spb$Pj>ZP3MyH1z0!TgPu!_HZKy6V?9-Bzx>zE>KTT<(3u;a{h9rfG0$sF?g{E-Ep;@) z{1D4dj_r9Oe%Uk3={vg{lc(2<{1LRM&ey3ISQr!CWV0SoysbgI*V$_$Q8aj z5jlzGh^)&!A6-EDNZvv1l@RuPv9*HcX23*GzQa08xi_i){WAl}Y4uqf0>{pKR6)+Y z_>yd+bcDiNo-B_D>t^5GUKlusJgbA{%Ab5~2Wu(L={`#PF<{;B13WL}a9+q3@V*)I z!Yo&BUY{J61e)rwCC7Q2f%9T1F;b z0 zZ9XlId%CvMIvwN4s9Kt5YMZ0oH9S)3)V=zoVrCao6 z^(94DY@bVY5WM;mtXd;NYW8Zq{Y14nB;!U5l=jzsjQw`01=bHB^d+ZZJ%#R^%IEFR zj++i|aYS^ELPO;LqA*3~^CI`Y3-6g0|KN;I`zX%n+mx$reIlpiQop0m%T{5|ztH*h z@vUy}&8z)9^I%Guef>C(H|nnkrHgWnkjKbx9esTic8Utc8j_Q#4n}Y2c152Tyh5d) zDmhWktO_jHeo1tDDr>;&#)2^0vrCJhr6KUs%&a35(YoM}b@@2i6Q$d+Lqtq=)3M`*F(jgVvF zdrpu%0cK+kgR;;;xW+;w`lf6C$K=9=`xNZh^`z zg@{B(-1gcE7nZUzBzBfQu5Fj=51z)rt0EyV)DQT1d5Zf8vlYqHw>aSy1KS1-Q?j*fA{W zI-?Hvky`|rs3u^UZ>&rx4Sj0tAEIc|!0O2h`iK9I5bNTl>1k42E^`WowcofK${03! zQJp^b>ocsc4^(|Id?^+_S z4-m8f@Bb$f{^vFS9MiZsDVj3wCn1y0-bbJcL_{~#GGcke?cuu})ZF|kUu-7QvQYaC z`GV+8lNFa+NM&?^h-<8?xv*oV?vZhmOFlGssnr?>O4Z%DTcoW_Pm6iDNdBewdK#pe zy!=ro#e3^B2>0RsG!+Tg??%yPAn(b{ETlQ*hHJE2t+4Mf{9`PS7Oa)hXGt%ub8wLV zZfdc10FL#4 zk&ek{vz+!O#+*=bqu>D~WqnW_RY|}S$U!`55ovz}#Y(aL@wlYiC7Cz%jd5);X6FjT;9sw#&a|xuBZ%Q-Eaa_^7=5z z@ftaRLmKpO;wsygCaN>CrC#DnXed`JgjUmflSEQLp$c9UZK3o0xUba*)nX)9+m{Py z=hLuL$5E_)*>bUPz+!yv5H{b4ATJAK83l87LAky|gfyXae%%#FY4JNvMloV*pXEsK zR^bb}Y@1FK6fT|UasA}3{$&|QvhOpn(tfeiD*LLkqvI-?v=cTtqg^ zlZ!R{TWkJrXi9ZAkOTWd7Wy zY|zxVL)>?Gv6O15(6t>8Ro-LikaC=U97u@|B&0qxZvfAf`<9QVtkSk+9w}XJO{{?A zDe6z>o`1r+-Zk9>1s>k3!fXYz4xf1&TYm42+W6x@_TL{*=9?1u-Xz}T=ai8r!H-pK zCu)sJ9^w%@dy6FBeA6V024ky$waXe*aO%_2c*W#ZO;An1%XoqLC-O5k+8!1-H0c zAsYy7me(QMlF9aMal70p{5!!m3K_X_ba;Ivr@2gXS$h1*U6UcC7lwsfFt5!r6foUj z#GH3i%XB>6 zMf~gHvc&#wc?IP5bHs0<&oC3t#3(>AWCK*RO6{T~-AAXUYV)b*PQ!*1XBq`=JwT4Z z+S+_j%1^pW!oO@=pE7h9k1wZ z!N$19&v8My_C}yo;B;`fbZ!J6Pi@tZqfhRdcvruq6O&NDFb{ZFV5SuA=q}SxPpPn? z)(jgz0ULhcbp~bNUI9sHy`pwWXXa`AGp~f34dPvZOBS)rWv)%z=V-2mew5js1@8^N za;tMV^K{jnW%nK3H?c#r&_tMk!=w{$c8hOdv=Ev{WHP;tiyOD58*W9y2VGOc5ZOJO z;qTKrO$>#Q7uAiEP;vHD;KBVqP6O1)e(9E{CY4+N!c1;6B-GHD`7th;41BrU@PdVn zCf=@Hts<_i0s|`5K5?P_zN<5|X$JMmRWtS`mH55ECxemkt-WhpUZqs2bT1~tRW3V>O&M(jK1Vrkdv8=qnq zLRHzRq93gSg%+rqP52s*-rf%&H=!W>>;<4yi&Mz*$j2HghVWb(UjH#I`4v#GgZqZe ziZS3+Y5=aE0$rxb3VfP-8x%y`1hL(>dGRAAcu3TVTk(WtuEfc#y&nwkPjU6?@ruHI42HmMw3pN_si}QeA`WYR$fgMFq8Pwp9de*XN>m&z9uvOP zvAovu3q8LJlg2-Zh1I4--)BkN+ET{Z2@m0OiuMb{C1M~p1GqXNIGp?t zawj}F^^afF@9Y!pygZLf!*PHSPv}o9sg`T(472_oLKVgV)ID%mQywfxcL-7C8(ERk zym;VvF@NAub{-$8<%PE03p%&lS4x0%gS_thZVngbtnh+FC)dZRg{SjbzX*B>c>BPP zr|&BP<)J0>oCG&TnJj$2H)w&}4!Ly3>l^RMEc=tb?Vq1ik!x5KZ8C4 zzqO5r#W+t0aKdvu!NVltlal=;xX=5I)6vl}y(S1zF24*DVz_Z8hFrce4qRGu1-46y z#{k&ic0lix6PY2q`DorQKL9oiUv;a9FR88E7Bec_n%-@G;JgwUEznFdYWFl>M7Oz@ zc9uVIa3IrMh5v)}=?Vx9AuJGQ(O3y;>CkVW40d~5mC#dQ-?YMAGPK@^t124v!ziMf z_H~Fg)qK@kyVG!=+s~Z zg}q@rcq#%xyCoxGt27oAjU08~7Sx$SdbLLCZ#4mSMm#e9#B$)rio0skyFa}Y&;f46 zvTCh31W)G3GL@ToFg9gz)Mf+H?7t{Pj4*{0^fRTQHma)Vgb$$k8r(iFp8bugR?3Cy zxz7wv1j4sor2_c~|2yei&%e3nX9oF8dw|sPzs_#{Ee4ubiqLXh(l|^N|6MHj-$Y3w zf(MVLvA6)RK##*Dv4g7$rzhC#@o$|0DeNx!KjQ@ep&w+H7x{hgUiAOp7qO1mZ50HK zj}=*M{veCIShfZ6n={HCxF0;t#GP(w5ye!V=Fup~?ERDRFJH6fs~&pU29~T`qx*=c z6qatLT}xOIv;ne;pzl{fraFk-6X$7)St7bz@-!SKaXLkt&l>^XE_Dd!;GqO!>84M| zQ>>4}OM}-Psd#~f=j5j=;lz>>uvTXE^!;5gh9!pEsu$wpO3OPMOcUgNGL<)MV z*B2zX@f5TSXxM|Ll@xHe~H- zdit)*KLT85@+Tbmel2(ak$ziH%W}U8q4eHupT+IE9pyRaTXh1?J$r*jBzWf&7G9s3 zm~3K#t&+X--d%?U@sYeOjWrAxH{YK>2+1!eK7BU3Y^sARiajzY--TRkJn{%O)$@d^ zzr2J9&xP2clv$lOVZk`p=K)k8GFT@GJ`38K=f0Xl>jGt3EO`5kvKu?F{riD71V72Y zO7w@VUi&027;yRj&8ANEX+G64Ny2mg+@11jnuv33`U?PUEu#dzRyp*-b5v86ycJOzRL%PfPN z72MW#wC_0VTa2=p^U8E?GYMHzD>l^=UKRo*vT1ToAJ8o9&VQ}*#mOf=zWU1EvJlLA ze54hNR{wO{Z6}}VnX@J8XW?WXV@Vz^mruNZv#)Xk{XOjjYu!_u0d z>3hwJM5s7aK`bw|-GaW&ifJiz$!AyfNnw9<5M*vq=eoCn|B6~2L?dfrlU7>kiX_FD z!rf3dPekZ3yjaHsgPM@ih8G$Pv(P18LH$#!%lYT0Lf`eNr8~Qb!tQ3-?8fM#wf#)@ znWA}-X;IFLlVJa7SGth6PeIYd%ZZm1-&H~yj^Yh#Is8sbP+F4xoeHIgc`xIg?B+Oh zV6AopGEshunch9(-P?TqdSVY2Qu|zb6lpgz6W2k0qA6oYFW9wf3CSkof7o!_b}cN4 zq;KYI8DepQRtGkW?ds=Z++oAa;UryU9Wp?lXO322uHLz>NukrSG+yZ+QPvDu4cYRl2TIPCmtT3?(i0TV>~sJC{H~WW3>_YiSg7G+)oGz_#aibsb*`PJz3q z&ku&Y`$yYa1Iu!hk3nx&lcKHl=RzdgH z$p%4N`+FrMn67Z7^!34!2^}?nteT{oLZRYJB~|RbUv-K@eO8HD3kAJB#Y!IHs5!1z}QV zMtT8-{+~MQ`)PSkwtTZv(y>*dY+(kizOs?|ox7hTN3A38TH8a@wFu+K{K)Tmb03IN zQmnwcL%%?zJ7Y&-=Rt>F zp)4gv72ul-y=@ZfbP{p%5uz&{sYYD%iy)Chn;Ho}aRLYhfV7O6oT4i9W38Fb1f92P zig9aE)7=;f`&#vCh$|fh)q*795gz;Bm0mDlNH=F)E&IYtsvJQ;4$xGlT4C2sihd3-nk*saz%Cw{+L zUnYc8TFK18gs1#BrLAoWyR$1TunLOpS{XYtOkOvfzqUX`G00tl%Gw!O&kp}~8hFge z-`(+`z?uE! z&6WXA^me2nPk@VzA5)y(kzq%V2yn^@yeawyzK{5&Us1SbS9a7BUe zH8V+6@?~w{wpNz^%_Fs`r1p(NI&V zCfa0ioSk7SmojRr>7i?BeAvEmrYGAx)CmYlC3gsq|L!WZY~9R$IC>p1 zde~SWJISGj;VA-M5Si#f@cU+&1cFlA9T;3nzZ7|-?joqEn7FS^kx#FQepkov)E>{WPd_s-jXD5SLY;uqScEBC5}d=$K8NL zan-nQU!n=+w$`J6zV}aH^^3tI!L_noqXVqEJ&Q+4w`|mV^J)0OxO$&Kjtv1FdN20? zs=C_$fwOh40qz;cKQiuaExb}-4JwUo7jS-!TF-3OHI>l^N>EsAYb+srQ&@&K^;ITa zofp0t-1>zq=-&0McLzakCFgiOFRn!QA6Xz^$F&yc2tSh@))D`cD+oYx4D@3^M~I!h zD(M=V`$f||Eoav}xW8VlbkwUeqcL~Xfj@iGT9)-^IfK_dP$%=S&icf^8`q}o2$=DA zLm&otCcxM$V}Ll2q~q(fejj~0;trETUYq|-cFw|s%g$L! ze$@zS8>CHJ|B{vq|8r@%KCYg-G!!IR1px-nk=A+Gv;N(zUjY;fKRTf`2_zeVAbs`J*7V5alU5=?7Ic|C}Sxfo;THK zQ@E?;pMSY>LdMYO(XWEk-$)QUobqec|LaPN|G4(w%7z+gN|NXk^RWIeu&(R~zQXL{ z{36{%2Ns3g#ez=l?TrWUths+U7!eU*;Yjf~@E}D$C>mB8v<VWad)?Is9WrC4vD{Mmn%n! zcYL&0)h@|kYnwibgxz<-Nop-4#*8u(f;VuTuhDNEXHqS5jE) z@fjsocFJp9!=nvoc)ZSG(IRh#AeY}e0Cfc!vPc9^@rnXkdk`bXp=Uo)Th4Bf#M^gQ zs$YKkm8&ysDM`|a5??hs7{9>oK;e#Os+kjbWnO(Snugq&%sK40lDsWY#J?PY;4!S? z4>SS#FXU3#VTx#JR7cdNQP!gGLU+3m_6`k<+Ii*P>T1^t6?X}{xf@Fc;)yPn{Gg>>E`Y$ zcJSPOW}ZVMt&53};(#GjA~3Vy^MnYn11 zTz5h9FHd64u{!x4?gI?*w%`r)h4hIUtS5aKXfm{?@nK&u{&|RR7qrbez)sX6SHdBl zH{9#<#kFdGe0St8nn0#VxBzo@n9o7Ee)}2`GVh#@S;!elgCSL4Cb;!1;aZtF6`xpf zim#wRILaImO=@{=q+BCSg1-@?l{L@&xlC6ZevFOwuPWQqthjL#FBL5{dh^0Mr^R{?yI@dYenDej#|M_$RqrEZ|Dva39)#-gnkL zSP~I3r>vL+YG2MXV(xpjt*w4$9DNs2RSMnV7B36o-_Qx2-bOG1Gg`_lqAbw6pCr~k zkiO%2xIx_PLng}x8TW${)8Krtdpu(CY?p*#gNl&0VQbG>Go>dvXVr2KdNSB41)MD_ z)03lyZCrD&V%)iROKd2-yq(i0O)4O?b`PrZP$Tuyx$lhFP19OMpB<&IkNXZoT6No8 z*CfC@DC`)8F)+>Yp(denh)JTk3!??wf1!b4ryt~*s}t{<@%WraX?_$xIMD&=c+a{g zl8-2K+>{lmj|rSPK*70re2WWyJ`tua-3+<+ya;)>YY^0gsmz}Ur#yHd#twM~W zWm61W$dP!GThNt_m@0k|yU(ukF`$Lt%4C^%!EKnv% zE|$&jL-@rwT)*#BZ5k}q_Zw-jV(bwZ@8b2>PW08`iLb5Ms#Eu=rW>w6 zp`!=pb=H?}SG<)>=6)_zrNEXC%oHq=`SP#qyORCvlV`Ff-oysH!#USqiva_$hZq*y zg#Ok30S0OhA@)K41)nVaXSXAP5r>#ESYcx6dpV56=pIWkcDkCD8x!(*;Mss{s`j-( zmhl0zUxfb(1M(&hw~X6wn!SfQPxfA)8!Sq7_J_L%JiVj{bW|kQz5riVEJbWir(q;* z<{BN``oyS4I-^P$-mbYCY$Nn=GUzuaeV7_p^WTx4=G2amFU(@zBTWjm&9 zp}kq?RjYT1q`YCLH3Q0)vi`oH&)7kM{TEnxLLUm7oZ%j?RqUZOWynBVc0-?LR+)MO zdJOdW?IvOc^f)xr+SfGvDx0QeXZ0R$9|EAiMw5s1=}Eicb@|_{(A|VE>xJsFw3e7x zQ97<;trJ9k&Q{%?>F+BbidX+0Jyo^EY|(iLb}s0o0OJx}dL2pY^(v^P@?8e7Evjm& ziQCo44t>D_J2at$9hwOF)BFP0Ws0qGm05RwmST2;MW))*rC!!&LfjV(a}%huJhV@^ zeHYF@jAP#lcus%nMmr%K*`oGhO#u}>3~Sh#@n8Kq+$8+Z6!Bj<%JWacm;dvbq5pZ& zyUYJ0qyMJp{Qn{Kj=!vSNwZPtYQ#{&Y6|YlomchAbguGve?IDObM|>N)thcGmCGk= z0Z){o04frbF+XX|Iq{O|`1un1XsL{Z^ih$tctH``;gJ}cKgNI*NE9+?S+VP&j zxdH-$N5d+(zmGp*3zy9{#C~P}d65I09$Fkqbp2i9g&tn)+OaKUNnT4_#HgbLH!iJK znLx#SupjB4JW*<;L)#2gBnb#!NO4DiYIAX<{P(@xXWC0MfKr(;<9?7bW2g~@CO31W z)%Rw>?^#hK-cie*?KZ=N(9Sk>Gi?6kY5h~D;m&yK ze7kH7FuKpnCto=GyWb{{#88Zz7Q(%xDF%@Y>e{QI^P0Ztu)Fup^j3)xvZevYrO%;y zLIRkRTnn^beMch7MT#?OGEMHM?Yx;(Xi(l#7_gjcXFd?k3*g=!Wer>Fx$@YkS{|9o zuxHac-zs@gabeTReo{P>Q2RE017^kt=?)8J2(xFdxq({HrljMj5rf@4`t?&V zsO&dAQ|~1Z2xT9X_q0=>cCtabV}a)G6k1ifRi*nwMoOmoZwuavJWZCIfq5l||N99Y~9Qhnj z{kJvY0q!+|AQ{P>FGk_r=OWF@_ev_^WiDHaEA+F(fH0m3?WB;ggA2w6S<03M(A%1p zVt%Ntu6+|d&%tL{<9Q`%SD8?=zM+0rQHO>AAotuUQS24Azq_K*gI%efCkb)zS)n!X znY8ED7s*Y8+O?L#&`bZ(=lRC7u%M~kDLb%!pM;HqNEJ) zw|pSl3Smwo>&~oY?ho>5Owr0@d2xmGu3f}1-Mu8oh9lAN^(Qt@URGbJOe;&}g<&(! zjp>+TsMFq#Uwm4##6s4ymSx~e{p38;=$s3Zyos!((yk(IqUqaa=0@P;$^r}BR(2TF zPdYbt?e&HAIEkgTOxAdFhn}2gw}?q&9|K>&tD5{+2HncNU{{UW*mWcR2&M9GUtfDe zfnpU4=`$tIV4>7}{Mes?XvVziNIHs@QSAi?dpJfS{|I=;8)HR3w5ZdW%n6+6@yNNS z7~HFyZ}B;jXR+9AC8%`IK)2mOckZdD^W}O-2h(by46TFd%X#xx4g@)#*G06|C_>6( z2aB|?O`l13@>I=Yxqih4iAc7$8|~W^P*g6Bmp0Z0*sGy)3l-O%(`C_I_K3l~-(*@- zSJd3xWAgli_*A{+5Cp7m^Xhy?fnIxAhgn+n=K}Uw|BN`53>yuZ%oz^yrqEPRMggLE z-RI+mspzGRHPJe;r#G&~O~aV8U7E%oY7 z>kFAK-6UzBkHxelrf^@F*{jQyD`jPACT}2 zMq8B9q2indR4sbu&%@)1w8KZ%1u7O(9P57Q-5IEkscOfJ2lNp4Rtr6;jZ1TSpLykT zDsh3Xgv*Id&w#fCfWC5O>}j@TN|Ms7N0|V6!&9xvXwcW+BQl@-PC3KXVWG~~+nH9) zb?ZXf<^_RI)4Heqq!XRYw>{O%4Ws9U{mp$^uHlHEZ@5pvlUI6c8-p`@~ zhEyag@=lQ7nN6YV6%=3f=d`1`m3p6n_hg+Dnb|Ra*h=Yes^Ds!y_RRv$s(|n6mU99 za$af|?RskQW|3Z-Ne^`4xKmFsFvgWxi$H}-g& zytApr+Z#JC*zUb#uje_>b`KD8i(7IS(z~RXH!C#n#`m+>y9D2%3qlN@o$R{=7|LId z#xV`VpONz!>q911K@JfmCmBVsw!6$4aAs|r4`p$pq jY@dJmF%f_NJxTrX1nS!ypVxs^3FM`fZ)Zw9eEz=xB!9jO literal 0 HcmV?d00001 diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index a62068599e..78c4426e04 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -257,7 +257,7 @@ def teardown_rules(self, dut, setup): dut.command('config acl update full {}'.format(remove_rules_dut_path)) @pytest.fixture(scope='class', autouse=True) - def acl_rules(self, duthost, testbed_devices, setup, acl_table): + def acl_rules(self, duthost, localhost, setup, acl_table): """ setup/teardown ACL rules based on test class requirements :param duthost: DUT host object @@ -266,7 +266,6 @@ def acl_rules(self, duthost, testbed_devices, setup, acl_table): :param acl_table: table creating fixture :return: """ - localhost = testbed_devices['localhost'] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='acl_rules') loganalyzer.load_common_config() diff --git a/tests/ansible_host.py b/tests/ansible_host.py deleted file mode 100644 index de8bdd5d87..0000000000 --- a/tests/ansible_host.py +++ /dev/null @@ -1,43 +0,0 @@ -from ansible.plugins.loader import callback_loader -from ansible.errors import AnsibleError - -def dump_ansible_results(results, stdout_callback='yaml'): - cb = callback_loader.get(stdout_callback) - return cb._dump_results(results) if cb else results - -class AnsibleModuleException(AnsibleError): - - """Sub-class AnsibleError when module exceptions occur.""" - - def __init__(self, msg, results=None): - super(AnsibleModuleException, self).__init__(msg) - self.results = results - - def __str__(self): - return "{}\nAnsible Results => {}".format(self.message, dump_ansible_results(self.results)) - -class AnsibleHost(object): - """ wrapper for ansible host object """ - - def __init__(self, ansible_adhoc, hostname, is_local=False): - if is_local: - self.host = ansible_adhoc(inventory='localhost', connection='local')[hostname] - else: - self.host = ansible_adhoc(become=True)[hostname] - self.hostname = hostname - - def __getattr__(self, item): - self.module_name = item - self.module = getattr(self.host, item) - - return self._run - - def _run(self, *module_args, **complex_args): - - module_ignore_errors = complex_args.pop('module_ignore_errors', False) - - res = self.module(*module_args, **complex_args)[self.hostname] - if res.is_failed and not module_ignore_errors: - raise AnsibleModuleException("run module {} failed".format(self.module_name), res) - - return res diff --git a/tests/platform/args/__init__.py b/tests/arp/arp_args/__init__.py similarity index 100% rename from tests/platform/args/__init__.py rename to tests/arp/arp_args/__init__.py diff --git a/tests/arp/arp_args/wr_arp_args.py b/tests/arp/arp_args/wr_arp_args.py new file mode 100644 index 0000000000..4ea83b9994 --- /dev/null +++ b/tests/arp/arp_args/wr_arp_args.py @@ -0,0 +1,19 @@ +# WR-ARP Args file + +def add_wr_arp_args(parser): + ''' + Adding arguments required for wr arp test cases + + Args: + parser: pytest parser object + + Returns: + None + ''' + parser.addoption( + "--test_duration", + action="store", + type=int, + default=370, + help="Test duration", + ) diff --git a/tests/arp/conftest.py b/tests/arp/conftest.py new file mode 100644 index 0000000000..95bb9b21dd --- /dev/null +++ b/tests/arp/conftest.py @@ -0,0 +1,14 @@ +from arp_args.wr_arp_args import add_wr_arp_args + +# WR-ARP pytest arguments +def pytest_addoption(parser): + ''' + Adds option to FDB pytest + + Args: + parser: pytest parser object + + Returns: + None + ''' + add_wr_arp_args(parser) diff --git a/tests/arp/files/ferret.conf.j2 b/tests/arp/files/ferret.conf.j2 new file mode 100644 index 0000000000..485153c819 --- /dev/null +++ b/tests/arp/files/ferret.conf.j2 @@ -0,0 +1,10 @@ +[program:ferret] +command=/usr/bin/python /opt/ferret.py {{ ferret_args }} +process_name=ferret +stdout_logfile=/tmp/ferret.out.log +stderr_logfile=/tmp/ferret.err.log +redirect_stderr=false +autostart=false +autorestart=true +startsecs=1 +numprocs=1 diff --git a/tests/arp/files/ferret.py b/tests/arp/files/ferret.py new file mode 100644 index 0000000000..954d558e27 --- /dev/null +++ b/tests/arp/files/ferret.py @@ -0,0 +1,335 @@ +#/usr/bin/env python + +# python t.py -f /tmp/vxlan_decap.json -s 192.168.8.1 + +import SimpleHTTPServer +import SocketServer +import select +import shutil +import json +import BaseHTTPServer +import time +import socket +import ctypes +import ssl +import struct +import binascii +import itertools +import argparse +import os + +from pprint import pprint + +from cStringIO import StringIO +from functools import partial +from collections import namedtuple + + +Record = namedtuple('Record', ['hostname', 'family', 'expired', 'lo', 'mac', 'vxlan_id']) + +ASIC_TYPE=None + + +class Ferret(BaseHTTPServer.BaseHTTPRequestHandler): + server_version = "FerretHTTP/0.1" + + def do_POST(self): + if not self.path.startswith('/Ferret/NeighborAdvertiser/Slices/'): + self.send_error(404, "URL is not supported") + else: + info = self.extract_info() + self.update_db(info) + self.send_resp(info) + + def extract_info(self): + c_len = int(self.headers.getheader('content-length', 0)) + body = self.rfile.read(c_len) + j = json.loads(body) + return j + + def generate_entries(self, hostname, family, expire, lo, info, mapping_family): + for i in info['vlanInterfaces']: + vxlan_id = int(i['vxlanId']) + for j in i[mapping_family]: + mac = str(j['macAddr']).replace(':', '') + addr = str(j['ipAddr']) + r = Record(hostname=hostname, family=family, expired=expire, lo=lo, mac=mac, vxlan_id=vxlan_id) + self.db[addr] = r + + return + + def update_db(self, info): + hostname = str(info['switchInfo']['name']) + lo_ipv4 = str(info['switchInfo']['ipv4Addr']) + lo_ipv6 = str(info['switchInfo']['ipv6Addr']) + duration = int(info['respondingSchemes']['durationInSec']) + expired = time.time() + duration + + self.generate_entries(hostname, 'ipv4', expired, lo_ipv4, info, 'ipv4AddrMappings') + self.generate_entries(hostname, 'ipv6', expired, lo_ipv6, info, 'ipv6AddrMappings') + + return + + def send_resp(self, info): + result = { + 'ipv4Addr': self.src_ip + } + f, l = self.generate_response(result) + self.send_response(200) + self.send_header("Content-type", "application/json") + self.send_header("Content-Length", str(l)) + self.send_header("Last-Modified", self.date_time_string()) + self.end_headers() + shutil.copyfileobj(f, self.wfile) + f.close() + return + + def generate_response(self, response): + f = StringIO() + json.dump(response, f) + l = f.tell() + f.seek(0) + return f, l + + +class RestAPI(object): + PORT = 448 + + def __init__(self, obj, db, src_ip): + self.httpd = SocketServer.TCPServer(("", self.PORT), obj) + self.context = ssl.SSLContext(ssl.PROTOCOL_TLS) + self.context.verify_mode = ssl.CERT_NONE + self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + self.context.load_cert_chain(certfile="/opt/test.pem", keyfile="/opt/test.key") + self.httpd.socket=self.context.wrap_socket(self.httpd.socket, server_side=True) + self.db = db + obj.db = db + obj.src_ip = src_ip + + def handler(self): + return self.httpd.fileno() + + def handle(self): + return self.httpd.handle_request() + + +class Interface(object): + ETH_P_ALL = 0x03 + RCV_TIMEOUT = 1000 + RCV_SIZE = 4096 + SO_ATTACH_FILTER = 26 + + def __init__(self, iface, bpf_src): + self.iface = iface + self.socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(self.ETH_P_ALL)) + if bpf_src is not None: + blob = ctypes.create_string_buffer(''.join(struct.pack("HBBI", *e) for e in bpf_src)) + address = ctypes.addressof(blob) + bpf = struct.pack('HL', len(bpf_src), address) + self.socket.setsockopt(socket.SOL_SOCKET, self.SO_ATTACH_FILTER, bpf) + self.socket.bind((self.iface, 0)) + self.socket.settimeout(self.RCV_TIMEOUT) + + def __del__(self): + self.socket.close() + + def handler(self): + return self.socket.fileno() + + def recv(self): + return self.socket.recv(self.RCV_SIZE) + + def send(self, data): + self.socket.send(data) + + +class Poller(object): + def __init__(self, httpd, interfaces, responder): + self.responder = responder + self.mapping = {interface.handler(): interface for interface in interfaces} + self.httpd = httpd + + def poll(self): + handlers = self.mapping.keys() + [self.httpd.handler()] + while True: + (rdlist, _, _) = select.select(handlers, [], []) + for handler in rdlist: + if handler == self.httpd.handler(): + self.httpd.handle() + else: + self.responder.action(self.mapping[handler]) + + +class Responder(object): + ARP_PKT_LEN = 60 + ARP_OP_REQUEST = 1 + def __init__(self, db): + self.arp_chunk = binascii.unhexlify('08060001080006040002') # defines a part of the packet for ARP Reply + self.arp_pad = binascii.unhexlify('00' * 18) + self.db = db + + def hexdump(self, data): + print " ".join("%02x" % ord(d) for d in data) + + def action(self, interface): + data = interface.recv() + + ext_dst_mac = data[0x00:0x06] + ext_src_mac = data[0x06:0x0c] + ext_eth_type = data[0x0c:0x0e] + if ext_eth_type != binascii.unhexlify('0800'): + print "Not 0x800 eth type" + self.hexdump(data) + print + return + src_ip = data[0x001a:0x001e] + dst_ip = data[0x1e:0x22] + gre_flags = data[0x22:0x24] + gre_type = data[0x24:0x26] + + gre_type_r = struct.unpack('!H', gre_type)[0] + self.hexdump(data) + if gre_type_r == 0x88be: # Broadcom + arp_request = data[0x26:] + if ASIC_TYPE == "barefoot": + # ERSPAN type 2 + # Ethernet(14) + IP(20) + GRE(4) + ERSPAN(8) = 46 = 0x2e + # Note: Count GRE as 4 byte, only mandatory fields. + # References: https://tools.ietf.org/html/rfc1701 + # https://tools.ietf.org/html/draft-foschiano-erspan-00 + arp_request = data[0x2E:] + + elif gre_type_r == 0x8949: # Mellanox + arp_request = data[0x3c:] + else: + print "GRE type 0x%x is not supported" % gre_type_r + self.hexdump(data) + print + return + + if len(arp_request) > self.ARP_PKT_LEN: + print "Too long packet" + self.hexdump(data) + print + return + + remote_mac, remote_ip, request_ip, op_type = self.extract_arp_info(arp_request) + # Don't send ARP response if the ARP op code is not request + if op_type != self.ARP_OP_REQUEST: + return + + request_ip_str = socket.inet_ntoa(request_ip) + + if request_ip_str not in self.db: + print "Not in db" + return + + r = self.db[request_ip_str] + if r.expired < time.time(): + print "Expired row in db" + del self.db[request_ip_str] + return + + if r.family == 'ipv4': + new_pkt = ext_src_mac + ext_dst_mac + ext_eth_type # outer eth frame + ipv4 = binascii.unhexlify('45000060977e400040110000') + dst_ip + src_ip # ip + crc = self.calculate_header_crc(ipv4) + ipv4 = ipv4[0:10] + crc + ipv4[12:] + new_pkt += ipv4 + new_pkt += binascii.unhexlify('c00012b5004c1280') # udp + new_pkt += binascii.unhexlify('08000000%06x00' % r.vxlan_id) # vxlan + + arp_reply = self.generate_arp_reply(binascii.unhexlify(r.mac), remote_mac, request_ip, remote_ip) + new_pkt += arp_reply + else: + print 'Support of family %s is not implemented' % r.family + return + + interface.send(new_pkt) + + return + + def calculate_header_crc(self, ipv4): + s = 0 + for l,r in zip(ipv4[::2], ipv4[1::2]): + l_u = struct.unpack("B", l)[0] + r_u = struct.unpack("B", r)[0] + s += (l_u << 8) + r_u + + c = s >> 16 + s = s & 0xffff + + while c != 0: + s += c + c = s >> 16 + s = s & 0xffff + + s = 0xffff - s + + return binascii.unhexlify("%x" % s) + + def extract_arp_info(self, data): + # remote_mac, remote_ip, request_ip, op_type + return data[6:12], data[28:32], data[38:42], (ord(data[20]) * 256 + ord(data[21])) + + def generate_arp_reply(self, local_mac, remote_mac, local_ip, remote_ip): + eth_hdr = remote_mac + local_mac + return eth_hdr + self.arp_chunk + local_mac + local_ip + remote_mac + remote_ip + self.arp_pad + +def get_bpf_for_bgp(): + bpf_src = [ + (0x28, 0, 0, 0x0000000c), # (000) ldh [12] + (0x15, 0, 2, 0x00000800), # (001) jeq #0x800 jt 2 jf 4 + (0x30, 0, 0, 0x00000017), # (002) ldb [23] + (0x15, 6, 7, 0x0000002f), # (003) jeq #0x2f jt 10 jf 11 + (0x15, 0, 6, 0x000086dd), # (004) jeq #0x86dd jt 5 jf 11 + (0x30, 0, 0, 0x00000014), # (005) ldb [20] + (0x15, 3, 0, 0x0000002f), # (006) jeq #0x2f jt 10 jf 7 + (0x15, 0, 3, 0x0000002c), # (007) jeq #0x2c jt 8 jf 11 + (0x30, 0, 0, 0x00000036), # (008) ldb [54] + (0x15, 0, 1, 0x0000002f), # (009) jeq #0x2f jt 10 jf 11 + (0x6, 0, 0, 0x00040000), # (010) ret #262144 + (0x6, 0, 0, 0x00000000), # (011) ret #0 + ] + return bpf_src + + +def extract_iface_names(config_file): + with open(config_file) as fp: + graph = json.load(fp) + + net_ports = [] + for name, val in graph['minigraph_portchannels'].items(): + members = ['eth%d' % graph['minigraph_port_indices'][member] for member in val['members']] + net_ports.extend(members) + + return net_ports + +def parse_args(): + parser = argparse.ArgumentParser(description='Ferret VXLAN API') + parser.add_argument('-f', '--config-file', help='file with configuration', required=True) + parser.add_argument('-s', '--src-ip', help='Ferret endpoint ip', required=True) + parser.add_argument('-a', '--asic-type', help='ASIC vendor name', type=str, required=False) + args = parser.parse_args() + if not os.path.isfile(args.config_file): + print "Can't open config file '%s'" % args.config_file + exit(1) + + global ASIC_TYPE + ASIC_TYPE = args.asic_type + return args.config_file, args.src_ip + +def main(): + db = {} + + config_file, src_ip = parse_args() + iface_names = extract_iface_names(config_file) + rest = RestAPI(Ferret, db, src_ip) + bpf_src = get_bpf_for_bgp() + ifaces = [Interface(iface_name, bpf_src) for iface_name in iface_names] + responder = Responder(db) + p = Poller(rest, ifaces, responder) + p.poll() + +if __name__ == '__main__': + main() diff --git a/tests/arp/test_wr_arp.py b/tests/arp/test_wr_arp.py new file mode 100644 index 0000000000..26319a92d1 --- /dev/null +++ b/tests/arp/test_wr_arp.py @@ -0,0 +1,216 @@ +import json +import logging +import pytest + +from common.platform.ssh_utils import prepare_testbed_ssh_keys as prepareTestbedSshKeys +from ptf_runner import ptf_runner + +logger = logging.getLogger(__name__) + +# Globals +PTFRUNNER_QLEN = 1000 +VXLAN_CONFIG_FILE = '/tmp/vxlan_decap.json' + +class TestWrArp: + ''' + TestWrArp Performs control plane assisted warm-reboo + ''' + def __prepareVxlanConfigData(self, duthost, ptfhost): + ''' + Prepares Vxlan Configuration data for Ferret service running on PTF host + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + mgFacts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + vxlanConfigData = { + 'minigraph_port_indices': mgFacts['minigraph_port_indices'], + 'minigraph_portchannel_interfaces': mgFacts['minigraph_portchannel_interfaces'], + 'minigraph_portchannels': mgFacts['minigraph_portchannels'], + 'minigraph_lo_interfaces': mgFacts['minigraph_lo_interfaces'], + 'minigraph_vlans': mgFacts['minigraph_vlans'], + 'minigraph_vlan_interfaces': mgFacts['minigraph_vlan_interfaces'], + 'dut_mac': duthost.setup()['ansible_facts']['ansible_Ethernet0']['macaddress'] + } + with open(VXLAN_CONFIG_FILE, 'w') as file: + file.write(json.dumps(vxlanConfigData, indent=4)) + + logger.info('Copying ferret config file to {0}'.format(ptfhost.hostname)) + ptfhost.copy(src=VXLAN_CONFIG_FILE, dest='/tmp/') + + @pytest.fixture(scope='class', autouse=True) + def setupFerret(self, duthost, ptfhost): + ''' + Sets Ferret service on PTF host. This class-scope fixture runs once before test start + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + ptfhost.copy(src="arp/files/ferret.py", dest="/opt") + + result = duthost.shell( + cmd='''ip route show proto zebra type unicast | + sed -e '/default/d' -ne '/0\//p' | + head -n 1 | + sed -ne 's/0\/.*$/1/p' + ''' + ) + assert len(result['stderr_lines']) == 0, 'Could not obtain DIP' + + dip = result['stdout'] + logger.info('VxLan Sender {0}'.format(dip)) + + ptfhost.host.options['variable_manager'].extra_vars.update({ + 'ferret_args': '-f /tmp/vxlan_decap.json -s {0}'.format(dip) + }) + + logger.info('Copying ferret config file to {0}'.format(ptfhost.hostname)) + ptfhost.template(src='arp/files/ferret.conf.j2', dest='/etc/supervisor/conf.d/ferret.conf') + + logger.info('Generate pem and key files for ssl') + ptfhost.command( + cmd='''openssl req -new -x509 -keyout test.key -out test.pem -days 365 -nodes + -subj "/C=10/ST=Test/L=Test/O=Test/OU=Test/CN=test.com"''', + chdir='/opt' + ) + + self.__prepareVxlanConfigData(duthost, ptfhost) + + logger.info('Refreshing supervisor control with ferret configuration') + ptfhost.shell('supervisorctl reread && supervisorctl update') + + @pytest.fixture(scope='class', autouse=True) + def copyPtfDirectory(self, ptfhost): + ''' + Copys PTF directory to PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + ptfhost.copy(src="ptftests", dest="/root") + + @pytest.fixture(scope='class', autouse=True) + def setupRouteToPtfhost(self, duthost, ptfhost): + ''' + Sets routes up on DUT to PTF host. This class-scope fixture runs once before test start + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + result = duthost.shell(cmd="ip route show table default | sed -n 's/default //p'") + assert len(result['stderr_lines']) == 0, 'Could not find the gateway for management port' + + gwIp = result['stdout'] + ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] + + route = duthost.shell(cmd='ip route get {0}'.format(ptfIp))['stdout'] + if 'PortChannel' in route: + logger.info( + "Add explicit route for PTF host ({0}) through eth0 (mgmt) interface ({1})".format(ptfIp, gwIp) + ) + duthost.shell(cmd='ip route add {0}/32 {1}'.format(ptfIp, gwIp)) + + yield + + if 'PortChannel' in route: + logger.info( + "Delete explicit route for PTF host ({0}) through eth0 (mgmt) interface ({1})".format(ptfIp, gwIp) + ) + duthost.shell(cmd='ip route delete {0}/32 {1}'.format(ptfIp, gwIp)) + + @pytest.fixture(scope='class', autouse=True) + def removePtfhostIp(self, ptfhost): + ''' + Removes IP assigned to eth inerface of PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + ptfhost.script(src='scripts/remove_ip.sh') + + @pytest.fixture(scope='class', autouse=True) + def changePtfhostMacAddresses(self, ptfhost): + ''' + Change MAC addresses (unique) on PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + ptfhost.script(src="scripts/change_mac.sh") + + @pytest.fixture(scope='class', autouse=True) + def prepareSshKeys(self, duthost, ptfhost): + ''' + Prepares testbed ssh keys by generating ssh key on ptf host and adding this key to known_hosts on duthost + This class-scope fixture runs once before test start + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + invetory = duthost.host.options['inventory'].split('/')[-1] + secrets = duthost.host.options['variable_manager']._hostvars[duthost.hostname]['secret_group_vars'] + + prepareTestbedSshKeys(duthost, ptfhost, secrets[invetory]['sonicadmin_user']) + + def testWrArp(self, request, duthost, ptfhost): + ''' + Control Plane Assistent test for Warm-Reboot. + + The test first start Ferret server, implemented in Python. Then initiate Warm-Reboot procedure. While the + host in Warm-Reboot test continuously sending ARP request to the Vlan member ports and expect to receive ARP + replies. The test will fail as soon as there is no replies for more than 25 seconds for one of the Vlan + member ports. + + Args: + request: pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + ''' + testDuration = request.config.getoption('--test_duration') + ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] + dutIp = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + + logger.info('Warm-Reboot Control-Plane assist feature') + ptf_runner( + ptfhost, + 'ptftests', + 'wr_arp.ArpTest', + qlen=PTFRUNNER_QLEN, + platform_dir='ptftests', + platform='remote', + params={ + 'ferret_ip' : ptfIp, + 'dut_ssh' : dutIp, + 'config_file' : VXLAN_CONFIG_FILE, + 'how_long' : testDuration, + }, + log_file='/tmp/wr_arp.ArpTest.log' + ) diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py new file mode 100644 index 0000000000..7ceb77d765 --- /dev/null +++ b/tests/bgp/conftest.py @@ -0,0 +1,58 @@ +import pytest +import logging +from common.utilities import wait_until + +logger = logging.getLogger(__name__) + +@pytest.fixture(scope='module') +def setup_keepalive_and_hold_timer(duthost, nbrhosts): + # incrase the keepalive and hold timer + duthost.command("vtysh -c \"configure terminal\" \ + -c \"router bgp {}\" \ + -c \"neighbor {} timers 60 180\"".format( + metadata['localhost']['bgp_asn'], \ + bgp_nbr_ip)) + + for k, nbr in nbrhosts.items(): + nbr['host'].eos_config(lines=["timers 60 180"], parents=["router bgp {}".format(bgp_nbr['asn'])]) + + yield + +@pytest.fixture(scope='module') +def setup_bgp_graceful_restart(duthost, nbrhosts): + + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) + + for k, nbr in nbrhosts.items(): + logger.info("enable graceful restart on neighbor {}".format(k)) + logger.info("bgp asn {}".format(nbr['conf']['bgp']['asn'])) + res = nbr['host'].eos_config(lines=["graceful-restart restart-time 300"], \ + parents=["router bgp {}".format(nbr['conf']['bgp']['asn'])]) + logger.info("abc {}".format(res)) + res = nbr['host'].eos_config(lines=["graceful-restart"], \ + parents=["router bgp {}".format(nbr['conf']['bgp']['asn']), "address-family ipv4"]) + logger.info("abc {}".format(res)) + res = nbr['host'].eos_config(lines=["graceful-restart"], \ + parents=["router bgp {}".format(nbr['conf']['bgp']['asn']), "address-family ipv6"]) + logger.info("abc {}".format(res)) + + # change graceful restart option will clear the bgp session. + # so, let's wait for all bgp sessions to be up + logger.info("bgp neighbors: {}".format(bgp_neighbors.keys())) + if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()): + pytest.fail("not all bgp sessions are up after enable graceful restart") + + yield + + for k, nbr in nbrhosts.items(): + # start bgpd if not started + nbr['host'].start_bgpd() + logger.info("disable graceful restart on neighbor {}".format(k)) + nbr['host'].eos_config(lines=["no graceful-restart"], \ + parents=["router bgp {}".format(nbr['conf']['bgp']['asn']), "address-family ipv4"]) + nbr['host'].eos_config(lines=["no graceful-restart"], \ + parents=["router bgp {}".format(nbr['conf']['bgp']['asn']), "address-family ipv6"]) + + if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()): + pytest.fail("not all bgp sessions are up after disable graceful restart") diff --git a/tests/test_bgp_fact.py b/tests/bgp/test_bgp_fact.py similarity index 69% rename from tests/test_bgp_fact.py rename to tests/bgp/test_bgp_fact.py index 1912224906..e91b9d2e12 100644 --- a/tests/test_bgp_fact.py +++ b/tests/bgp/test_bgp_fact.py @@ -1,13 +1,10 @@ -from ansible_host import AnsibleHost -def test_bgp_facts(ansible_adhoc, testbed,duthost): +def test_bgp_facts(duthost): """compare the bgp facts between observed states and target state""" - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) npus = duthost.num_npus() - bgp_facts = ans_host.bgp_facts(num_npus=npus)['ansible_facts'] - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] + bgp_facts = duthost.bgp_facts()['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] for k, v in bgp_facts['bgp_neighbors'].items(): # Verify bgp sessions are established diff --git a/tests/bgp/test_bgp_gr_helper.py b/tests/bgp/test_bgp_gr_helper.py new file mode 100644 index 0000000000..fcdb8fc505 --- /dev/null +++ b/tests/bgp/test_bgp_gr_helper.py @@ -0,0 +1,103 @@ +import pytest +import logging +import ipaddress +from common.helpers.assertions import pytest_assert +from common.utilities import wait_until + +logger = logging.getLogger(__name__) + +def test_bgp_gr_helper_routes_perserved(duthost, nbrhosts, setup_bgp_graceful_restart): + """ + Verify that DUT routes are preserved when peer performed graceful restart + """ + + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) + po = config_facts.get('PORTCHANNEL', {}) + dev_nbr = config_facts.get('DEVICE_NEIGHBOR', {}) + + rtinfo_v4 = duthost.get_ip_route_info(ipaddress.ip_address(u'0.0.0.0')) + if len(rtinfo_v4['nexthops']) == 0: + pytest.skip("there is no next hop for v4 default route") + + rtinfo_v6 = duthost.get_ip_route_info(ipaddress.ip_address(u'::')) + if len(rtinfo_v6['nexthops']) == 0: + pytest.skip("there is no next hop for v6 default route") + + ifnames_v4 = [nh[1] for nh in rtinfo_v4['nexthops']] + ifnames_v6 = [nh[1] for nh in rtinfo_v6['nexthops']] + + ifnames_common = [ ifname for ifname in ifnames_v4 if ifname in ifnames_v6 ] + ifname = ifnames_common[0] + + # get neighbor device connected ports + nbr_ports = [] + if ifname.startswith("PortChannel"): + for member in po[ifname]['members']: + nbr_ports.append(dev_nbr[member]['port']) + else: + pytest.skip("Do not support peer device not connected via port channel") + logger.info("neighbor device connected ports {}".format(nbr_ports)) + + # get nexthop ip + for nh in rtinfo_v4['nexthops']: + if nh[1] == ifname: + bgp_nbr_ipv4 = nh[0] + + for nh in rtinfo_v6['nexthops']: + if nh[1] == ifname: + bgp_nbr_ipv6 = nh[0] + + # get the bgp neighbor + bgp_nbr = bgp_neighbors[str(bgp_nbr_ipv4)] + nbr_hostname = bgp_nbr['name'] + nbrhost = nbrhosts[nbr_hostname]['host'] + exabgp_sessions = ['exabgp_v4', 'exabgp_v6'] + pytest_assert(nbrhost.check_bgp_session_state([], exabgp_sessions), \ + "exabgp sessions {} are not up before graceful restart".format(exabgp_sessions)) + + # shutdown Rib agent, starting gr process + logger.info("shutdown rib process on neighbor {}".format(nbr_hostname)) + nbrhost.kill_bgpd() + + # wait till DUT enter NSF state + pytest_assert(wait_until(60, 5, duthost.check_bgp_session_nsf, bgp_nbr_ipv4), \ + "neighbor {} does not enter NSF state".format(bgp_nbr_ipv4)) + pytest_assert(wait_until(60, 5, duthost.check_bgp_session_nsf, bgp_nbr_ipv6), \ + "neighbor {} does not enter NSF state".format(bgp_nbr_ipv6)) + + # confirm ip route still there + rtinfo_v4 = duthost.get_ip_route_info(ipaddress.ip_address(u'0.0.0.0')) + pytest_assert(ipaddress.ip_address(bgp_nbr_ipv4) in [ nh[0] for nh in rtinfo_v4['nexthops'] ], \ + "cannot find nexthop {} in the new default route nexthops. {}".format(bgp_nbr_ipv4, rtinfo_v4)) + + rtinfo_v6 = duthost.get_ip_route_info(ipaddress.ip_address(u'::')) + pytest_assert(ipaddress.ip_address(bgp_nbr_ipv6) in [ nh[0] for nh in rtinfo_v6['nexthops'] ], \ + "cannot find nexthop {} in the new default route nexthops. {}".format(bgp_nbr_ipv6, rtinfo_v6)) + + # shutdown the connected ports from nbr + for nbr_port in nbr_ports: + nbrhost.shutdown(nbr_port) + + try: + # start Rib agent + logger.info("startup rib process on neighbor {}".format(nbr_hostname)) + nbrhost.start_bgpd() + + # wait for exabgp sessions to establish + pytest_assert(wait_until(300, 10, nbrhost.check_bgp_session_state, [], exabgp_sessions), \ + "exabgp sessions {} are not coming back".format(exabgp_sessions)) + except: + raise + finally: + # unshut the connected ports from nbr + for nbr_port in nbr_ports: + nbrhost.no_shutdown(nbr_port) + + # confirm bgp session up + graceful_restarted_bgp_sessions = [str(bgp_nbr_ipv4), str(bgp_nbr_ipv6)] + pytest_assert(wait_until(300, 10, duthost.check_bgp_session_state, graceful_restarted_bgp_sessions), \ + "graceful restarted bgp sessions {} are not coming back".format(graceful_restarted_bgp_sessions)) + + # Verify no route changes in the application db + # TODO diff --git a/tests/test_bgp_speaker.py b/tests/bgp/test_bgp_speaker.py similarity index 100% rename from tests/test_bgp_speaker.py rename to tests/bgp/test_bgp_speaker.py diff --git a/tests/common/devices.py b/tests/common/devices.py index b2b4e0c39d..ed77723eb9 100644 --- a/tests/common/devices.py +++ b/tests/common/devices.py @@ -7,9 +7,11 @@ We can consider using netmiko for interacting with the VMs used in testing. """ -import json import logging import os +import re +import json +import ipaddress from multiprocessing.pool import ThreadPool from datetime import datetime @@ -58,7 +60,7 @@ def run_module(module_args, complex_args): res = self.module(*module_args, **complex_args)[self.hostname] if res.is_failed and not module_ignore_errors: - raise RunAnsibleModuleFail("run module {} failed, errmsg {}".format(self.module_name, res)) + raise RunAnsibleModuleFail("run module {} failed".format(self.module_name), res) return res @@ -91,7 +93,7 @@ class SonicHost(AnsibleHostBase): For running ansible module on the SONiC switch """ - CRITICAL_SERVICES = ["swss", "syncd", "database", "teamd", "bgp", "pmon", "lldp"] + CRITICAL_SERVICES = ["swss", "syncd", "database", "teamd", "bgp", "pmon", "lldp", "snmp"] def __init__(self, ansible_adhoc, hostname, gather_facts=False): AnsibleHostBase.__init__(self, ansible_adhoc, hostname) @@ -129,22 +131,34 @@ def _get_npu_info(self): self._get_critical_services_for_multi_npu - def _platform_info(self): + def get_platform_info(self): + """ + @summary: Get the platform information of the SONiC switch. + @return: Returns a dictionary containing preperties of the platform information, for example: + { + "platform": "", + "hwsku": "", + "asic_type": "" + } + """ platform_info = self.command("show platform summary")["stdout_lines"] + result = {} for line in platform_info: if line.startswith("Platform:"): - self.facts["platform"] = line.split(":")[1].strip() + result["platform"] = line.split(":")[1].strip() elif line.startswith("HwSKU:"): - self.facts["hwsku"] = line.split(":")[1].strip() + result["hwsku"] = line.split(":")[1].strip() elif line.startswith("ASIC:"): - self.facts["asic_type"] = line.split(":")[1].strip() + result["asic_type"] = line.split(":")[1].strip() + return result def gather_facts(self): """ @summary: Gather facts of the SONiC switch and store the gathered facts in the dict type 'facts' attribute. """ self.facts = {} - self._platform_info() + platform_info = self.get_platform_info() + self.facts.update(platform_info) self._get_npu_info() logging.debug("SonicHost facts: %s" % json.dumps(self.facts)) @@ -202,6 +216,55 @@ def critical_services_fully_started(self): logging.debug("Status of critical services: %s" % str(result)) return all(result.values()) + def critical_process_status(self, service): + """ + @summary: Check whether critical process status of a service. + + @param service: Name of the SONiC service + """ + result = {'status': True} + result['exited_critical_process'] = [] + result['running_critical_process'] = [] + critical_process_list = [] + + # return false if the service is not started + service_status = self.is_service_fully_started(service) + if service_status == False: + result['status'] = False + return result + + # get critical process list for the service + output = self.command("docker exec {} bash -c '[ -f /etc/supervisor/critical_processes ] && cat /etc/supervisor/critical_processes'".format(service), module_ignore_errors=True) + for l in output['stdout'].split(): + critical_process_list.append(l.rstrip()) + if len(critical_process_list) == 0: + return result + + # get process status for the service + output = self.command("docker exec {} supervisorctl status".format(service)) + logging.info("====== supervisor process status for service {} ======".format(service)) + + for l in output['stdout_lines']: + (pname, status, info) = re.split("\s+", l, 2) + if status != "RUNNING": + if pname in critical_process_list: + result['exited_critical_process'].append(pname) + result['status'] = False + else: + if pname in critical_process_list: + result['running_critical_process'].append(pname) + + return result + + def all_critical_process_status(self): + """ + @summary: Check whether all critical processes status for all critical services + """ + result = {} + for service in self.CRITICAL_SERVICES: + result[service] = self.critical_process_status(service) + return result + def get_crm_resources(self): """ @summary: Run the "crm show resources all" command and parse its output @@ -232,43 +295,57 @@ def get_crm_resources(self): return result - def get_pmon_daemon_list(self): + def get_pmon_daemon_states(self): """ - @summary: get pmon daemon list from the config file (/usr/share/sonic/device/{platform}/{hwsku}/pmon_daemon_control.json) + @summary: get state list of daemons from pmon docker. + Referencing (/usr/share/sonic/device/{platform}/pmon_daemon_control.json) if some daemon is disabled in the config file, then remove it from the daemon list. + + @return: dictionary of { service_name1 : state1, ... ... } """ - full_daemon_tup = ('xcvrd', 'ledd', 'psud', 'syseepromd') + # some services are meant to have a short life span or not part of the daemons + exemptions = ['lm-sensors', 'start.sh', 'rsyslogd'] + + daemons = self.shell('docker exec pmon supervisorctl status')['stdout_lines'] + + daemon_list = [ line.strip().split()[0] for line in daemons if len(line.strip()) > 0 ] + daemon_ctl_key_prefix = 'skip_' - daemon_list = [] daemon_config_file_path = os.path.join('/usr/share/sonic/device', self.facts["platform"], 'pmon_daemon_control.json') try: output = self.shell('cat %s' % daemon_config_file_path) json_data = json.loads(output["stdout"]) logging.debug("Original file content is %s" % str(json_data)) - for key in full_daemon_tup: + for key in daemon_list: if (daemon_ctl_key_prefix + key) not in json_data: - daemon_list.append(key) logging.debug("Daemon %s is enabled" % key) elif not json_data[daemon_ctl_key_prefix + key]: - daemon_list.append(key) logging.debug("Daemon %s is enabled" % key) else: logging.debug("Daemon %s is disabled" % key) + exemptions.append(key) except: # if pmon_daemon_control.json not exist, then it's using default setting, # all the pmon daemons expected to be running after boot up. - daemon_list = list(full_daemon_tup) + pass + + # Collect state of services that are not on the exemption list. + daemon_states = {} + for line in daemons: + words = line.strip().split() + if len(words) >= 2 and words[0] not in exemptions: + daemon_states[words[0]] = words[1] - logging.info("Pmon daemon list for this platform is %s" % str(daemon_list)) - return daemon_list + logging.info("Pmon daemon state list for this platform is %s" % str(daemon_states)) + return daemon_states def num_npus(self): """ return the number of NPUs on the DUT """ return self.facts["num_npu"] - + def get_syncd_docker_names(self): """ @summary: get the list of syncd dockers names for the number of NPUs present on the DUT @@ -282,6 +359,7 @@ def get_syncd_docker_names(self): for npu in range(0,num_npus): syncd_docker_names.append("syncd{}".format(npu)) return syncd_docker_names + def get_swss_docker_names(self): swss_docker_names = [] if self.facts["num_npu"] == 1: @@ -309,7 +387,7 @@ def get_networking_uptime(self): return self.get_now_time() - datetime.strptime(start_time["ExecMainStartTimestamp"], "%a %Y-%m-%d %H:%M:%S UTC") except Exception as e: - self.logger.error("Exception raised while getting networking restart time: %s" % repr(e)) + logging.error("Exception raised while getting networking restart time: %s" % repr(e)) return None def get_image_info(self): @@ -321,7 +399,7 @@ def get_image_info(self): ret = {} images = [] for line in lines: - words = line.strip().split(' ') + words = line.strip().split() if len(words) == 2: if words[0] == 'Current:': ret['current'] = words[1] @@ -334,8 +412,147 @@ def get_image_info(self): return ret def get_asic_type(self): - return dut.facts["asic_type"] + return self.facts["asic_type"] + + def shutdown(self, ifname): + """ + Shutdown interface specified by ifname + + Args: + ifname: the interface to shutdown + """ + return self.command("sudo config interface shutdown {}".format(ifname)) + + def no_shutdown(self, ifname): + """ + Bring up interface specified by ifname + + Args: + ifname: the interface to bring up + """ + return self.command("sudo config interface startup {}".format(ifname)) + + def get_ip_route_info(self, dstip): + """ + @summary: return route information for a destionation IP + + @param dstip: destination IP (either ipv4 or ipv6) + +============ 4.19 kernel ============== +admin@vlab-01:~$ ip route list match 0.0.0.0 +default proto bgp src 10.1.0.32 metric 20 + nexthop via 10.0.0.57 dev PortChannel0001 weight 1 + nexthop via 10.0.0.59 dev PortChannel0002 weight 1 + nexthop via 10.0.0.61 dev PortChannel0003 weight 1 + nexthop via 10.0.0.63 dev PortChannel0004 weight 1 + +admin@vlab-01:~$ ip -6 route list match :: +default proto bgp src fc00:1::32 metric 20 + nexthop via fc00::72 dev PortChannel0001 weight 1 + nexthop via fc00::76 dev PortChannel0002 weight 1 + nexthop via fc00::7a dev PortChannel0003 weight 1 + nexthop via fc00::7e dev PortChannel0004 weight 1 pref medium + +============ 4.9 kernel =============== +admin@vlab-01:~$ ip route list match 0.0.0.0 +default proto 186 src 10.1.0.32 metric 20 + nexthop via 10.0.0.57 dev PortChannel0001 weight 1 + nexthop via 10.0.0.59 dev PortChannel0002 weight 1 + nexthop via 10.0.0.61 dev PortChannel0003 weight 1 + nexthop via 10.0.0.63 dev PortChannel0004 weight 1 + +admin@vlab-01:~$ ip -6 route list match :: +default via fc00::72 dev PortChannel0001 proto 186 src fc00:1::32 metric 20 pref medium +default via fc00::76 dev PortChannel0002 proto 186 src fc00:1::32 metric 20 pref medium +default via fc00::7a dev PortChannel0003 proto 186 src fc00:1::32 metric 20 pref medium +default via fc00::7e dev PortChannel0004 proto 186 src fc00:1::32 metric 20 pref medium + + """ + + if dstip.version == 4: + rt = self.command("ip route list match {}".format(dstip))['stdout_lines'] + else: + rt = self.command("ip -6 route list match {}".format(dstip))['stdout_lines'] + + logging.info("route raw info for {}: {}".format(dstip, rt)) + + rtinfo = {'set_src': None, 'nexthops': [] } + + # parse set_src + m = re.match(r"^default proto (bgp|186) src (\S+)", rt[0]) + m1 = re.match(r"^default via (\S+) dev (\S+) proto 186 src (\S+)", rt[0]) + if m: + rtinfo['set_src'] = ipaddress.ip_address(m.group(2)) + elif m1: + rtinfo['set_src'] = ipaddress.ip_address(m1.group(3)) + + # parse nexthops + for l in rt: + m = re.search(r"(default|nexthop) via (\S+) dev (\S+)", l) + if m: + rtinfo['nexthops'].append((ipaddress.ip_address(m.group(2)), m.group(3))) + logging.info("route parsed info for {}: {}".format(dstip, rtinfo)) + + return rtinfo + + def get_bgp_neighbor_info(self, neighbor_ip): + """ + @summary: return bgp neighbor info + + @param neighbor_ip: bgp neighbor IP + """ + nbip = ipaddress.ip_address(neighbor_ip) + if nbip.version == 4: + out = self.command("vtysh -c \"show ip bgp neighbor {} json\"".format(neighbor_ip)) + else: + out = self.command("vtysh -c \"show bgp ipv6 neighbor {} json\"".format(neighbor_ip)) + nbinfo = json.loads(re.sub(r"\\\"", '"', re.sub(r"\\n", "", out['stdout']))) + logging.info("bgp neighbor {} info {}".format(neighbor_ip, nbinfo)) + + return nbinfo[str(neighbor_ip)] + + def check_bgp_session_state(self, neigh_ips, state="established"): + """ + @summary: check if current bgp session equals to the target state + + @param neigh_ips: bgp neighbor IPs + @param state: target state + """ + neigh_ok = [] + bgp_facts = self.bgp_facts()['ansible_facts'] + logging.info("bgp_facts: {}".format(bgp_facts)) + for k, v in bgp_facts['bgp_neighbors'].items(): + if v['state'] == state: + if k in neigh_ips: + neigh_ok.append(k) + logging.info("bgp neighbors that match the state: {}".format(neigh_ok)) + if len(neigh_ips) == len(neigh_ok): + return True + + return False + + def check_bgp_session_nsf(self, neighbor_ip): + """ + @summary: check if bgp neighbor session enters NSF state or not + + @param neighbor_ip: bgp neighbor IP + """ + nbinfo = self.get_bgp_neighbor_info(neighbor_ip) + if nbinfo['bgpState'].lower() == "Active".lower(): + if nbinfo['bgpStateIs'].lower() == "passiveNSF".lower(): + return True + return False + + def get_version(self): + """ + Gets the SONiC version this device is running. + + Returns: + str: the firmware version number (e.g. 20181130.31) + """ + output = dut.command("sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version") + return output["stdout_lines"][0].strip() class EosHost(AnsibleHostBase): """ @@ -350,5 +567,135 @@ def __init__(self, ansible_adhoc, hostname, user, passwd, gather_facts=False): 'ansible_network_os':'eos', \ 'ansible_user': user, \ 'ansible_password': passwd, \ + 'ansible_ssh_user': user, \ + 'ansible_ssh_pass': passwd, \ 'ansible_become_method': 'enable' } self.host.options['variable_manager'].extra_vars.update(evars) + + def shutdown(self, interface_name): + out = self.host.eos_config( + lines=['shutdown'], + parents='interface %s' % interface_name) + logging.info('Shut interface [%s]' % interface_name) + return out + + def no_shutdown(self, interface_name): + out = self.host.eos_config( + lines=['no shutdown'], + parents='interface %s' % interface_name) + logging.info('No shut interface [%s]' % interface_name) + return out + + def check_intf_link_state(self, interface_name): + show_int_result = self.host.eos_command( + commands=['show interface %s' % interface_name])[self.hostname] + return 'Up' in show_int_result['stdout_lines'][0] + + def command(self, cmd): + out = self.host.eos_command(commands=[cmd]) + return out + + def set_interface_lacp_rate_mode(self, interface_name, mode): + out = self.host.eos_config( + lines=['lacp rate %s' % mode], + parents='interface %s' % interface_name) + logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode)) + return out + + def kill_bgpd(self): + out = self.host.eos_config(lines=['agent Rib shutdown']) + return out + + def start_bgpd(self): + out = self.host.eos_config(lines=['no agent Rib shutdown']) + return out + + def check_bgp_session_state(self, neigh_ips, neigh_desc, state="established"): + """ + @summary: check if current bgp session equals to the target state + + @param neigh_ips: bgp neighbor IPs + @param neigh_desc: bgp neighbor description + @param state: target state + """ + neigh_ips_ok = [] + neigh_desc_ok = [] + out_v4 = self.host.eos_command( + commands=['show ip bgp summary | json'])[self.hostname] + logging.info("ip bgp summary: {}".format(out_v4)) + + out_v6 = self.host.eos_command( + commands=['show ipv6 bgp summary | json'])[self.hostname] + logging.info("ipv6 bgp summary: {}".format(out_v6)) + + for k, v in out_v4['stdout'][0]['vrfs']['default']['peers'].items(): + if v['peerState'].lower() == state.lower(): + if k in neigh_ips: + neigh_ips_ok.append(neigh_ips) + if v['description'] in neigh_desc: + neigh_desc_ok.append(v['description']) + + for k, v in out_v6['stdout'][0]['vrfs']['default']['peers'].items(): + if v['peerState'].lower() == state.lower(): + if k in neigh_ips: + neigh_ips_ok.append(neigh_ips) + if v['description'] in neigh_desc: + neigh_desc_ok.append(v['description']) + + if len(neigh_ips) == len(neigh_ips_ok) and len(neigh_desc) == len(neigh_desc_ok): + return True + + return False + + +class FanoutHost(): + """ + @summary: Class for Fanout switch + + For running ansible module on the Fanout switch + """ + + def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd): + self.hostname = hostname + self.type = device_type + self.host_to_fanout_port_map = {} + self.fanout_to_host_port_map = {} + if os == 'sonic': + self.os = os + self.host = SonicHost(ansible_adhoc, hostname) + else: + # Use eos host if the os type is unknown + self.os = 'eos' + self.host = EosHost(ansible_adhoc, hostname, user, passwd) + + def get_fanout_os(self): + return self.os + + def get_fanout_type(self): + return self.type + + def shutdown(self, interface_name): + return self.host.shutdown(interface_name)[self.hostname] + + def no_shutdown(self, interface_name): + return self.host.no_shutdown(interface_name)[self.hostname] + + def command(self, cmd): + return self.host.command(cmd)[self.hostname] + + def __str__(self): + return "{ os: '%s', hostname: '%s', device_type: '%s' }" % (self.os, self.hostname, self.type) + + def __repr__(self): + return self.__str__() + + def add_port_map(self, host_port, fanout_port): + """ + Fanout switch is build from the connection graph of the + DUT. So each fanout switch instance is relevant to the + DUT instance in the test. As result the port mapping is + unique from the DUT perspective. However, this function + need update when supporting multiple DUT + """ + self.host_to_fanout_port_map[host_port] = fanout_port + self.fanout_to_host_port_map[fanout_port] = host_port diff --git a/tests/common/errors.py b/tests/common/errors.py index 25a2397a6d..be30d84117 100644 --- a/tests/common/errors.py +++ b/tests/common/errors.py @@ -1,8 +1,26 @@ """ Customize exceptions """ +from ansible.plugins.loader import callback_loader +from ansible.errors import AnsibleError + + class UnsupportedAnsibleModule(Exception): pass -class RunAnsibleModuleFail(Exception): - pass + +def dump_ansible_results(results, stdout_callback='yaml'): + cb = callback_loader.get(stdout_callback) + return cb._dump_results(results) if cb else results + + +class RunAnsibleModuleFail(AnsibleError): + + """Sub-class AnsibleError when module exceptions occur.""" + + def __init__(self, msg, results=None): + super(RunAnsibleModuleFail, self).__init__(msg) + self.results = results + + def __str__(self): + return "{}\nAnsible Results => {}".format(self.message, dump_ansible_results(self.results)) diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py index 0ad9b8646b..cf6739efeb 100644 --- a/tests/common/fixtures/advanced_reboot.py +++ b/tests/common/fixtures/advanced_reboot.py @@ -5,8 +5,8 @@ import pytest import time -from common.errors import RunAnsibleModuleFail from common.mellanox_data import is_mellanox_device as isMellanoxDevice +from common.platform.ssh_utils import prepare_testbed_ssh_keys as prepareTestbedSshKeys from common.reboot import reboot as rebootDut from ptf_runner import ptf_runner @@ -25,11 +25,13 @@ class AdvancedReboot: inboot/preboot list. The class transfers number of configuration files to the dut/ptf in preparation for reboot test. Test cases can trigger test start utilizing runRebootTestcase API. ''' - def __init__(self, request, testbed_devices, testbed, **kwargs): + def __init__(self, request, duthost, ptfhost, localhost, testbed, **kwargs): ''' Class contructor. @param request: pytest request object - @param testbed_devices: fixture provides information about testbed devices + @param duthost: AnsibleHost instance of DUT + @param ptfhost: PTFHost for interacting with PTF through ansible + @param localhost: Localhost for interacting with localhost through ansible @param testbed: fixture provides information about testbed @param kwargs: extra parameters including reboot type ''' @@ -38,9 +40,9 @@ def __init__(self, request, testbed_devices, testbed, **kwargs): ) self.request = request - self.duthost = testbed_devices['dut'] - self.ptfhost = testbed_devices['ptf'] - self.localhost = testbed_devices['localhost'] + self.duthost = duthost + self.ptfhost = ptfhost + self.localhost = localhost self.testbed = testbed self.__dict__.update(kwargs) self.__extractTestParam() @@ -65,6 +67,7 @@ def __extractTestParam(self): self.newSonicImage = self.request.config.getoption("--new_sonic_image") self.cleanupOldSonicImages = self.request.config.getoption("--cleanup_old_sonic_images") self.readyTimeout = self.request.config.getoption("--ready_timeout") + self.replaceFastRebootScript = self.request.config.getoption("--replace_fast_reboot_script") def getHostMaxLen(self): ''' @@ -115,11 +118,16 @@ def __buildTestbedData(self): self.rebootData['dut_hostname'] = self.mgFacts['minigraph_mgmt_interface']['addr'] self.rebootData['dut_mac'] = hostFacts['ansible_Ethernet0']['macaddress'] - self.rebootData['dut_username'] = hostFacts['ansible_env']['SUDO_USER'] self.rebootData['vlan_ip_range'] = self.mgFacts['minigraph_vlan_interfaces'][0]['subnet'] self.rebootData['dut_vlan_ip'] = self.mgFacts['minigraph_vlan_interfaces'][0]['addr'] + + invetory = self.duthost.host.options['inventory'].split('/')[-1] + secrets = self.duthost.host.options['variable_manager']._hostvars[self.duthost.hostname]['secret_group_vars'] + self.rebootData['dut_username'] = secrets[invetory]['sonicadmin_user'] + self.rebootData['dut_password'] = secrets[invetory]['sonicadmin_password'] + self.rebootData['default_ip_range'] = str( - ipaddress.ip_interface(self.mgFacts['minigraph_vlan_interfaces'][0]['addr'] + '/16').network + ipaddress.ip_interface(self.mgFacts['minigraph_vlan_interfaces'][0]['addr'] + '/18').network ) for intf in self.mgFacts['minigraph_lo_interfaces']: @@ -169,7 +177,7 @@ def __validateAndBuildSadList(self): if 'vlan_port_down' in item: assert itemCnt <= self.vlanMaxCnt, ( 'Vlan count is greater than or equal to number of Vlan interfaces. ' - 'Current val = {0} Max val = {}' + 'Current val = {0} Max val = {1}' ).format(itemCnt, self.vlanMaxCnt) if 'routing' in item: assert itemCnt <= self.hostMaxCnt, ( @@ -207,30 +215,11 @@ def __runScript(self, scripts, ansibleHost): logger.info('Running script {0} on {1}'.format(script, ansibleHost.hostname)) ansibleHost.script('scripts/' + script) - def __prepareTestbedSshKeys(self, dutUsername, dutIp): + def __prepareTestbedSshKeys(self): ''' Prepares testbed ssh keys by generating ssh key on ptf host and adding this key to known_hosts on duthost - @param dutUsername: DUT username - @param dutIp: DUT IP - ''' - logger.info('Remove old keys from ptfhost') - self.ptfhost.shell('rm -f /root/.ssh/id_rsa*') - try: - result = self.ptfhost.shell('stat /root/.ssh/known_hosts') - except RunAnsibleModuleFail: - pass # files does not exist - else: - self.ptfhost.shell('ssh-keygen -f /root/.ssh/known_hosts -R ' + dutIp) - - logger.info('Generate public key for ptf host') - self.ptfhost.shell('ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""') - result = self.ptfhost.shell('cat /root/.ssh/id_rsa.pub') - cmd = ''' - mkdir -p /home/{0}/.ssh && - echo "{1}" >> /home/{0}/.ssh/authorized_keys && - chown -R {0}:{0} /home/{0}/.ssh/ - '''.format(dutUsername, result['stdout']) - self.duthost.shell(cmd) + ''' + prepareTestbedSshKeys(self.duthost, self.ptfhost, self.rebootData['dut_username']) def __handleMellanoxDut(self): ''' @@ -299,7 +288,7 @@ def __setupTestbed(self): self.__runScript(['remove_ip.sh', 'change_mac.sh'], self.ptfhost) - self.__prepareTestbedSshKeys(self.rebootData['dut_username'], self.rebootData['dut_hostname']) + self.__prepareTestbedSshKeys() logger.info('Copy tests to the PTF container {}'.format(self.ptfhost.hostname)) self.ptfhost.copy(src='ptftests', dest='/root') @@ -307,6 +296,11 @@ def __setupTestbed(self): logger.info('Copy ARP responder to the PTF container {}'.format(self.ptfhost.hostname)) self.ptfhost.copy(src='scripts/arp_responder.py', dest='/opt') + # Replace fast-reboot script + if self.replaceFastRebootScript: + logger.info('Replace fast-reboot script on DUT {}'.format(self.duthost.hostname)) + self.duthost.copy(src='scripts/fast-reboot', dest='/usr/bin/') + def __clearArpAndFdbTables(self): ''' Clears ARP and FDB entries @@ -408,8 +402,12 @@ def __runPtfRunner(self, rebootOper=None): ''' logger.info("Running PTF runner on PTF host: {0}".format(self.ptfhost)) - prebootOper = rebootOper if rebootOper is not None and 'routing' in rebootOper else None - inbootOper = rebootOper if rebootOper is not None and 'routing' not in rebootOper else None + # Non-routing neighbor/dut lag/bgp, vlan port up/down operation is performed before dut reboot process + # lack of routing indicates it is preboot operation + prebootOper = rebootOper if rebootOper is not None and 'routing' not in rebootOper else None + # Routing add/remove is performed during dut reboot process + # presence of routing in reboot operation indicates it is during reboot operation (inboot) + inbootOper = rebootOper if rebootOper is not None and 'routing' in rebootOper else None self.__updateAndRestartArpResponder(rebootOper) @@ -423,6 +421,7 @@ def __runPtfRunner(self, rebootOper=None): platform="remote", params={ "dut_username" : self.rebootData['dut_username'], + "dut_password" : self.rebootData['dut_password'], "dut_hostname" : self.rebootData['dut_hostname'], "reboot_limit_in_seconds" : self.rebootLimit, "reboot_type" :self.rebootType, @@ -452,15 +451,17 @@ def __restorePrevImage(self): ''' Resotre previous image and reboot DUT ''' - logger.info('Restore current image') - self.duthost.shell('sonic_installer set_default {0}'.format(self.currentImage)) + currentImage = self.duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout'] + if currentImage != self.currentImage: + logger.info('Restore current image') + self.duthost.shell('sonic_installer set_default {0}'.format(self.currentImage)) - rebootDut( - self.duthost, - self.localhost, - reboot_type=self.rebootType.replace('-reboot', ''), - wait = 180 + self.readyTimeout - ) + rebootDut( + self.duthost, + self.localhost, + reboot_type=self.rebootType.replace('-reboot', ''), + wait = self.readyTimeout + ) def tearDown(self): ''' @@ -480,11 +481,13 @@ def tearDown(self): self.__restorePrevImage() @pytest.fixture -def get_advanced_reboot(request, testbed_devices, testbed): +def get_advanced_reboot(request, duthost, ptfhost, localhost, testbed): ''' Pytest test fixture that provides access to AdvancedReboot test fixture @param request: pytest request object - @param testbed_devices: fixture provides information about testbed devices + @param duthost: AnsibleHost instance of DUT + @param ptfhost: PTFHost for interacting with PTF through ansible + @param localhost: Localhost for interacting with localhost through ansible @param testbed: fixture provides information about testbed ''' instances = [] @@ -494,7 +497,7 @@ def get_advanced_reboot(**kwargs): API that returns instances of AdvancedReboot class ''' assert len(instances) == 0, "Only one instance of reboot data is allowed" - advancedReboot = AdvancedReboot(request, testbed_devices, testbed, **kwargs) + advancedReboot = AdvancedReboot(request, duthost, ptfhost, localhost, testbed, **kwargs) instances.append(advancedReboot) return advancedReboot diff --git a/tests/common/fixtures/conn_graph_facts.py b/tests/common/fixtures/conn_graph_facts.py index d1f7089183..e196b24051 100644 --- a/tests/common/fixtures/conn_graph_facts.py +++ b/tests/common/fixtures/conn_graph_facts.py @@ -3,10 +3,8 @@ import json @pytest.fixture(scope="module") -def conn_graph_facts(testbed_devices): +def conn_graph_facts(duthost, localhost): conn_graph_facts = dict() - dut = testbed_devices["dut"] - localhost = testbed_devices["localhost"] base_path = os.path.dirname(os.path.realpath(__file__)) # json file contains mapping from inventory file name to its corresponding graph file @@ -14,9 +12,21 @@ def conn_graph_facts(testbed_devices): if os.path.exists(inv_mapping_file): with open(inv_mapping_file) as fd: inv_map = json.load(fd) - inv_file = dut.host.options['inventory'].split('/')[-1] - if inv_map and inv_file in inv_map: - lab_conn_graph_file = os.path.join(base_path, "../../../ansible/files/{}".format(inv_map[inv_file])) + inv_opt = duthost.host.options['inventory'] + inv_files = [] + if isinstance(inv_opt, str): + inv_files = (duthost.host.options['inventory']) # Make it iterable for later use + elif isinstance(inv_opt, list) or isinstance(inv_opt, tuple): + inv_files = duthost.host.options['inventory'] - conn_graph_facts = localhost.conn_graph_facts(host=dut.hostname, filename=lab_conn_graph_file)['ansible_facts'] + for inv_file in inv_files: + inv_file = os.path.basename(inv_file) + + # Loop through the list of inventory files supplied in --inventory argument. + # For the first inventory file that has a mapping in inv_mapping.json, return + # its conn_graph_facts. + if inv_map and inv_file in inv_map: + lab_conn_graph_file = os.path.join(base_path, "../../../ansible/files/{}".format(inv_map[inv_file])) + conn_graph_facts = localhost.conn_graph_facts(host=duthost.hostname, filename=lab_conn_graph_file)['ansible_facts'] + return conn_graph_facts return conn_graph_facts diff --git a/tests/common/fixtures/populate_fdb.py b/tests/common/fixtures/populate_fdb.py new file mode 100644 index 0000000000..54aee84643 --- /dev/null +++ b/tests/common/fixtures/populate_fdb.py @@ -0,0 +1,128 @@ +import json +import logging +import pytest + +from ptf_runner import ptf_runner + +logger = logging.getLogger(__name__) + +class PopulateFdb: + """ + PopulateFdb populates DUT FDB entries + + It accepts MAC to IP ratio (default 100:1) and packet count (default 2000). It generates packets with + ratio of distinct MAC addresses to distinct IP addresses as provided. The IP addresses starts from VLAN + address pool. + + Command line sample: + pytest testbed_setup/test_populate_fdb.py --testbed= --inventory= --testbed_file= \ + --host-pattern={|all} --module-path= --mac_to_ip_ratio=100:1 --packet_count=8000 + + where: + mac_to_ip_ratio: Ratio of distinct MAC addresses to distinct IP addresses assigned to VM + packet_count: Number of packets to be created and sent to DUT + start_mac: VM start MAC address. Subsequent MAC addresses are increment of 1 on top of start MAC + """ + PTFRUNNER_QLEN = 1000 + VLAN_CONFIG_FILE = "/tmp/vlan_config.json" + + def __init__(self, request, duthost, ptfhost): + """ + Class constructor + + Args: + request: pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + self.macToIpRatio = request.config.getoption("--mac_to_ip_ratio") + self.startMac = request.config.getoption("--start_mac") + self.packetCount = request.config.getoption("--packet_count") + + self.duthost = duthost + self.ptfhost = ptfhost + + def __prepareVlanConfigData(self): + """ + Prepares Vlan Configuration data + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + mgVlanPorts = [] + mgFacts = self.duthost.minigraph_facts(host=self.duthost.hostname)["ansible_facts"] + for vlan, config in mgFacts["minigraph_vlans"].items(): + for port in config["members"]: + mgVlanPorts.append({ + "port": port, + "vlan": vlan, + "index": mgFacts["minigraph_port_indices"][port] + }) + + vlanConfigData = { + "vlan_ports": mgVlanPorts, + "vlan_interfaces": {vlan["attachto"]: vlan for vlan in mgFacts["minigraph_vlan_interfaces"]}, + "dut_mac": self.duthost.setup()["ansible_facts"]["ansible_Ethernet0"]["macaddress"] + } + + with open(self.VLAN_CONFIG_FILE, 'w') as file: + file.write(json.dumps(vlanConfigData, indent=4)) + + logger.info("Copying VLan config file to {0}".format(self.ptfhost.hostname)) + self.ptfhost.copy(src=self.VLAN_CONFIG_FILE, dest="/tmp/") + + logger.info("Copying ptftests to {0}".format(self.ptfhost.hostname)) + self.ptfhost.copy(src="ptftests", dest="/root") + + def run(self): + """ + Populates DUT FDB entries + + Args: + None + + Returns: + None + """ + self.__prepareVlanConfigData() + + logger.info("Populate DUT FDB entries") + ptf_runner( + self.ptfhost, + "ptftests", + "populate_fdb.PopulateFdb", + qlen=self.PTFRUNNER_QLEN, + platform_dir="ptftests", + platform="remote", + params={ + "start_mac": self.startMac, + "config_data": self.VLAN_CONFIG_FILE, + "packet_count": self.packetCount, + "mac_to_ip_ratio": self.macToIpRatio, + }, + log_file="/tmp/populate_fdb.PopulateFdb.log" + ) + +@pytest.fixture +def populate_fdb(request, duthost, ptfhost): + """ + Populates DUT FDB entries + + Args: + request: pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + populateFdb = PopulateFdb(request, duthost, ptfhost) + + populateFdb.run() diff --git a/tests/common/helpers/assertions.py b/tests/common/helpers/assertions.py new file mode 100644 index 0000000000..bc09c3e039 --- /dev/null +++ b/tests/common/helpers/assertions.py @@ -0,0 +1,6 @@ +import pytest + +def pytest_assert(condition, message = None): + __tracebackhide__ = True + if not condition: + pytest.fail(message) diff --git a/tests/common/platform/daemon_utils.py b/tests/common/platform/daemon_utils.py new file mode 100644 index 0000000000..8b3ff66b26 --- /dev/null +++ b/tests/common/platform/daemon_utils.py @@ -0,0 +1,23 @@ +""" +Helper script for checking status of platform daemon status + +This script contains re-usable functions for checking status of platform daemon status. +""" +import logging + + +def check_pmon_daemon_status(dut): + """ + @summary: check daemon running status inside pmon docker. + + This function use command "supervisorctl status" inside the container and check the status from the command output. + If the daemon status is "RUNNING" then return True, if daemon not exist or status is not "RUNNING", return false. + """ + daemons = dut.get_pmon_daemon_states() + ret = True + for daemon, state in daemons.items(): + logging.debug("Daemon %s status is %s" % (daemon, state)) + if state != 'RUNNING': + ret = False + + return ret diff --git a/tests/common/platform/device_utils.py b/tests/common/platform/device_utils.py new file mode 100644 index 0000000000..e4978d3ea1 --- /dev/null +++ b/tests/common/platform/device_utils.py @@ -0,0 +1,23 @@ +""" +Helper script for fanout switch operations +""" + +def fanout_switch_port_lookup(fanout_switches, dut_port): + """ + look up the fanout switch instance and the fanout switch port + connecting to the dut_port + + Args: + fanout_switches (list FanoutHost): list of fanout switch + instances. + dut_port (str): port name on the DUT + + Returns: + None, None if fanout switch instance and port is not found + FanoutHost, Portname(str) if found + """ + for _, fanout in fanout_switches.items(): + if dut_port in fanout.host_to_fanout_port_map: + return fanout, fanout.host_to_fanout_port_map[dut_port] + + return None, None diff --git a/tests/common/platform/ssh_utils.py b/tests/common/platform/ssh_utils.py new file mode 100644 index 0000000000..05726e2c24 --- /dev/null +++ b/tests/common/platform/ssh_utils.py @@ -0,0 +1,45 @@ +import logging + +from common.errors import RunAnsibleModuleFail + +logger = logging.getLogger(__name__) + +def prepare_testbed_ssh_keys(duthost, ptfhost, dut_username): + ''' + Prepares testbed ssh keys by generating ssh key on ptf host and adding this key to known_hosts on duthost + + @param duthost: instance of AnsibleHost class for DUT + @param ptfhost: instance of AnsibleHost class for PTF + @param dut_username: DUT username + ''' + dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + + logger.info('Remove old keys from ptfhost') + ptfhost.shell('rm -f /root/.ssh/id_rsa*') + try: + ptfhost.shell('stat /root/.ssh/known_hosts') + except RunAnsibleModuleFail: + pass # files does not exist + else: + ptfhost.shell('ssh-keygen -f /root/.ssh/known_hosts -R ' + dut_ip) + + logger.info('Generate public key for ptf host') + ptfhost.file(path='/root/.ssh/', mode='u+rwx,g-rwx,o-rwx', state='directory') + result = ptfhost.openssh_keypair( + path='/root/.ssh/id_rsa', + size=2048, + force=True, + type='rsa', + mode='u=rw,g=,o=' + ) + # There is an error with id_rsa.pub access permissions documented in: + # https://github.com/ansible/ansible/issues/61411 + # @TODO: remove the following line when upgrading to Ansible 2.9x + ptfhost.file(path='/root/.ssh/id_rsa.pub', mode='u=rw,g=,o=') + + cmd = ''' + mkdir -p /home/{0}/.ssh && + echo "{1}" >> /home/{0}/.ssh/authorized_keys && + chown -R {0}:{0} /home/{0}/.ssh/ + '''.format(dut_username, result['public_key']) + duthost.shell(cmd) diff --git a/tests/common/plugins/ansible_fixtures.py b/tests/common/plugins/ansible_fixtures.py index ac31dd2701..50cf2f0ceb 100644 --- a/tests/common/plugins/ansible_fixtures.py +++ b/tests/common/plugins/ansible_fixtures.py @@ -15,17 +15,3 @@ def ansible_adhoc(request): def init_host_mgr(**kwargs): return plugin.initialize(request.config, request, **kwargs) return init_host_mgr - - -# Same as for ansible_adhoc, let's have localhost fixture with session scope -# as it feels that during session run the localhost object should persist unchanged. -# Also, we have autouse=True here to force pytest to evaluate localhost fixture to overcome -# some hidden dependency between localhost and ansible_adhoc (even with default scope) (FIXME) -@pytest.fixture(scope='session', autouse=True) -def localhost(request): - """Return a host manager representing localhost.""" - # NOTE: Do not use ansible_adhoc as a dependent fixture since that will assert specific command-line parameters have - # been supplied. In the case of localhost, the parameters are provided as kwargs below. - plugin = request.config.pluginmanager.getplugin("ansible") - return plugin.initialize(request.config, request, inventory='localhost,', connection='local', - host_pattern='localhost').localhost diff --git a/tests/common/plugins/dut_monitor/pytest_dut_monitor.py b/tests/common/plugins/dut_monitor/pytest_dut_monitor.py index 2dce1f5614..df1cd92bfd 100644 --- a/tests/common/plugins/dut_monitor/pytest_dut_monitor.py +++ b/tests/common/plugins/dut_monitor/pytest_dut_monitor.py @@ -28,15 +28,15 @@ class DUTMonitorPlugin(object): def __init__(self, thresholds): self.thresholds = thresholds - @pytest.fixture(autouse=True, scope="session") - def dut_ssh(self, testbed, creds): + @pytest.fixture(autouse=True, scope="module") + def dut_ssh(self, duthost, creds): """Establish SSH connection with DUT""" - ssh = DUTMonitorClient(host=testbed["dut"], user=creds["sonicadmin_user"], + ssh = DUTMonitorClient(host=duthost.hostname, user=creds["sonicadmin_user"], password=creds["sonicadmin_password"]) yield ssh @pytest.fixture(autouse=True, scope="function") - def dut_monitor(self, dut_ssh, localhost, duthost, testbed_devices): + def dut_monitor(self, dut_ssh, localhost, duthost): """ For each test item starts monitoring of hardware resources consumption on the DUT """ @@ -50,8 +50,8 @@ def dut_monitor(self, dut_ssh, localhost, duthost, testbed_devices): general_thresholds = yaml.safe_load(stream) dut_thresholds = general_thresholds["default"] - dut_platform = testbed_devices["dut"].facts["platform"] - dut_hwsku = testbed_devices["dut"].facts["hwsku"] + dut_platform = duthost.facts["platform"] + dut_hwsku = duthost.facts["hwsku"] if dut_platform in general_thresholds: dut_thresholds.update(general_thresholds[dut_platform]["default"]) if dut_hwsku in general_thresholds[dut_platform]["hwsku"]: diff --git a/tests/common/plugins/loganalyzer/loganalyzer.py b/tests/common/plugins/loganalyzer/loganalyzer.py index eee59ce1a5..54c77821e5 100644 --- a/tests/common/plugins/loganalyzer/loganalyzer.py +++ b/tests/common/plugins/loganalyzer/loganalyzer.py @@ -36,6 +36,7 @@ def __init__(self, ansible_host, marker_prefix, dut_run_dir="/tmp"): self.expect_regex = [] self.ignore_regex = [] self._markers = [] + self.fail = True def _add_end_marker(self, marker): """ @@ -50,6 +51,13 @@ def _add_end_marker(self, marker): logging.debug("Adding end marker '{}'".format(marker)) self.ansible_host.command(cmd) + def __call__(self, **kwargs): + """ + Pass additional arguments when the instance is called + """ + self.fail = kwargs.get("fail", True) + return self + def __enter__(self): """ Store start markers which are used in analyze phase. @@ -60,7 +68,7 @@ def __exit__(self, *args): """ Analyze syslog messages. """ - self.analyze(self._markers.pop()) + self.analyze(self._markers.pop(), fail=self.fail) def _verify_log(self, result): """ @@ -108,13 +116,14 @@ def run_cmd(self, callback, *args, **kwargs): @return: Callback execution result """ marker = self.init() + fail = kwargs.pop("fail", True) try: call_result = callback(*args, **kwargs) except Exception as err: logging.error("Error during callback execution:\n{}".format(err)) - logging.debug("Log analysis result\n".format(self.analyze(marker))) + logging.debug("Log analysis result\n".format(self.analyze(marker, fail=fail))) raise err - self.analyze(marker) + self.analyze(marker, fail=fail) return call_result diff --git a/tests/common/plugins/psu_controller/snmp_psu_controllers.py b/tests/common/plugins/psu_controller/snmp_psu_controllers.py index 30b7d427bd..fc926d380d 100644 --- a/tests/common/plugins/psu_controller/snmp_psu_controllers.py +++ b/tests/common/plugins/psu_controller/snmp_psu_controllers.py @@ -6,43 +6,83 @@ import logging from controller_base import PsuControllerBase -from controller_base import run_local_cmd +from pysnmp.proto import rfc1902 +from pysnmp.entity.rfc3413.oneliner import cmdgen -def get_psu_controller_type(psu_controller_host): +class snmpPsuController(PsuControllerBase): """ - @summary: Use SNMP to get the type of PSU controller host - @param psu_controller_host: IP address of PSU controller host - @return: Returns type string of the specified PSU controller host - """ - result = None - cmd = "snmpget -v 1 -c public -Ofenqv %s .1.3.6.1.2.1.1.1.0" % psu_controller_host - try: - stdout = run_local_cmd(cmd) - - lines = stdout.splitlines() - if len(lines) > 0: - result = lines[0].strip() - result = result.replace('"', '') - except Exception as e: - logging.debug("Failed to get psu controller type, exception: " + repr(e)) - - return result - + PSU Controller class for SNMP conrolled PSUs - 'Sentry Switched CDU' and 'APC Web/SNMP Management Card' -class SentrySwitchedCDU(PsuControllerBase): + This class implements the interface defined in PsuControllerBase class for SNMP conrtolled PDU type + 'Sentry Switched CDU' and 'APC Web/SNMP Management Card' """ - PSU Controller class for 'Sentry Switched CDU' - This class implements the interface defined in PsuControllerBase class for PDU type 'Sentry Switched CDU' - """ - PORT_NAME_BASE_OID = ".1.3.6.1.4.1.1718.3.2.3.1.3.1" - PORT_STATUS_BASE_OID = ".1.3.6.1.4.1.1718.3.2.3.1.5.1" - PORT_CONTROL_BASE_OID = ".1.3.6.1.4.1.1718.3.2.3.1.11.1" - STATUS_ON = "1" - STATUS_OFF = "0" - CONTROL_ON = "1" - CONTROL_OFF = "2" + def get_psu_controller_type(self): + """ + @summary: Use SNMP to get the type of PSU controller host + @param psu_controller_host: IP address of PSU controller host + @return: Returns type string of the specified PSU controller host + """ + pSYSDESCR = ".1.3.6.1.2.1.1.1.0" + SYSDESCR = "1.3.6.1.2.1.1.1.0" + psu = None + cmdGen = cmdgen.CommandGenerator() + snmp_auth = cmdgen.CommunityData('public') + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( + snmp_auth, + cmdgen.UdpTransportTarget((self.controller, 161), timeout=5.0), + cmdgen.MibVariable(pSYSDESCR,), + ) + if errorIndication: + logging.info("Failed to get psu controller type, exception: " + str(errorIndication)) + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if current_oid == SYSDESCR: + psu = current_val + if psu is None: + self.psuType = None + return + if 'Sentry Switched CDU' in psu: + self.psuType = "SENTRY" + if 'APC Web/SNMP Management Card' in psu: + self.psuType = "APC" + return + + def psuCntrlOid(self): + """ + Define Oids based on the PSU Type + """ + # MIB OIDs for 'APC Web/SNMP Management PSU' + APC_PORT_NAME_BASE_OID = "1.3.6.1.4.1.318.1.1.4.4.2.1.4" + APC_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.5.1.1.4" + APC_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.3.1.1.4" + # MIB OID for 'Sentry Switched CDU' + SENTRY_PORT_NAME_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.3.1" + SENTRY_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.5.1" + SENTRY_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.11.1" + self.STATUS_ON = "1" + self.STATUS_OFF = "0" + self.CONTROL_ON = "1" + self.CONTROL_OFF = "2" + if self.psuType == "APC": + self.pPORT_NAME_BASE_OID = '.'+APC_PORT_NAME_BASE_OID + self.pPORT_STATUS_BASE_OID = '.'+APC_PORT_STATUS_BASE_OID + self.pPORT_CONTROL_BASE_OID = '.'+APC_PORT_CONTROL_BASE_OID + self.PORT_NAME_BASE_OID = APC_PORT_NAME_BASE_OID + self.PORT_STATUS_BASE_OID = APC_PORT_STATUS_BASE_OID + self.PORT_CONTROL_BASE_OID = APC_PORT_CONTROL_BASE_OID + elif self.psuType == "SENTRY": + self.pPORT_NAME_BASE_OID = '.'+SENTRY_PORT_NAME_BASE_OID + self.pPORT_STATUS_BASE_OID = '.'+SENTRY_PORT_STATUS_BASE_OID + self.pPORT_CONTROL_BASE_OID = '.'+SENTRY_PORT_CONTROL_BASE_OID + self.PORT_NAME_BASE_OID = SENTRY_PORT_NAME_BASE_OID + self.PORT_STATUS_BASE_OID = SENTRY_PORT_STATUS_BASE_OID + self.PORT_CONTROL_BASE_OID = SENTRY_PORT_CONTROL_BASE_OID + else: + pass + def _get_pdu_ports(self): """ @@ -51,17 +91,22 @@ def _get_pdu_ports(self): The PDU ports connected to DUT must have hostname of DUT configured in port name/description. This method depends on this configuration to find out the PDU ports connected to PSUs of specific DUT. """ - try: - cmd = "snmpwalk -v 1 -c public -Ofenq %s %s " % (self.controller, self.PORT_NAME_BASE_OID) - stdout = run_local_cmd(cmd) - for line in stdout.splitlines(): - if self.hostname in line: # PDU port name/description should have DUT hostname - fields = line.split() - if len(fields) == 2: - # Remove the preceding PORT_NAME_BASE_OID, remaining string is the PDU port ID - self.pdu_ports.append(fields[0].replace(self.PORT_NAME_BASE_OID, '')) - except Exception as e: - logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + repr(e)) + cmdGen = cmdgen.CommandGenerator() + snmp_auth = cmdgen.CommunityData('public') + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((self.controller, 161)), + cmdgen.MibVariable(self.pPORT_NAME_BASE_OID,), + ) + if errorIndication: + logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + str(errorIndication)) + for varBinds in varTable: + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if self.hostname.lower() in current_val.lower(): + # Remove the preceding PORT_NAME_BASE_OID, remaining string is the PDU port ID + self.pdu_ports.append(current_oid.replace(self.PORT_NAME_BASE_OID, '')) def __init__(self, hostname, controller): logging.info("Initializing " + self.__class__.__name__) @@ -69,6 +114,9 @@ def __init__(self, hostname, controller): self.hostname = hostname self.controller = controller self.pdu_ports = [] + self.psuType = None + self.get_psu_controller_type() + self.psuCntrlOid() self._get_pdu_ports() logging.info("Initialized " + self.__class__.__name__) @@ -89,16 +137,17 @@ def turn_on_psu(self, psu_id): @param psu_id: ID of the PSU on SONiC DUT @return: Return true if successfully execute the command for turning on power. Otherwise return False. """ - try: - idx = int(psu_id) % len(self.pdu_ports) - port_oid = self.PORT_CONTROL_BASE_OID + self.pdu_ports[idx] - cmd = "snmpset -v1 -C q -c private %s %s i %s" % (self.controller, port_oid, self.CONTROL_ON) - run_local_cmd(cmd) - logging.info("Turned on PSU %s" % str(psu_id)) - return True - except Exception as e: - logging.debug("Failed to turn on PSU %s, exception: %s" % (str(psu_id), repr(e))) + port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(psu_id)] + errorIndication, errorStatus, _, _ = \ + cmdgen.CommandGenerator().setCmd( + cmdgen.CommunityData('private'), + cmdgen.UdpTransportTarget((self.controller, 161)), + (port_oid, rfc1902.Integer(self.CONTROL_ON)), + ) + if errorIndication or errorStatus != 0: + logging.debug("Failed to turn on PSU %s, exception: %s" % (str(psu_id), str(errorStatus))) return False + return True def turn_off_psu(self, psu_id): """ @@ -117,16 +166,17 @@ def turn_off_psu(self, psu_id): @param psu_id: ID of the PSU on SONiC DUT @return: Return true if successfully execute the command for turning off power. Otherwise return False. """ - try: - idx = int(psu_id) % len(self.pdu_ports) - port_oid = self.PORT_CONTROL_BASE_OID + self.pdu_ports[idx] - cmd = "snmpset -v1 -C q -c private %s %s i %s" % (self.controller, port_oid, self.CONTROL_OFF) - run_local_cmd(cmd) - logging.info("Turned off PSU %s" % str(psu_id)) - return True - except Exception as e: - logging.debug("Failed to turn off PSU %s, exception: %s" % (str(psu_id), repr(e))) + port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(psu_id)] + errorIndication, errorStatus, _, _ = \ + cmdgen.CommandGenerator().setCmd( + cmdgen.CommunityData('private'), + cmdgen.UdpTransportTarget((self.controller, 161)), + (port_oid, rfc1902.Integer(self.CONTROL_OFF)), + ) + if errorIndication or errorStatus != 0: + logging.debug("Failed to turn on PSU %s, exception: %s" % (str(psu_id), str(errorStatus))) return False + return True def get_psu_status(self, psu_id=None): """ @@ -149,22 +199,28 @@ def get_psu_status(self, psu_id=None): The psu_id in returned result is integer starts from 0. """ results = [] - try: - cmd = "snmpwalk -v 1 -c public -Ofenq %s %s " % (self.controller, self.PORT_STATUS_BASE_OID) - stdout = run_local_cmd(cmd) - for line in stdout.splitlines(): + cmdGen = cmdgen.CommandGenerator() + snmp_auth = cmdgen.CommunityData('public') + errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( + snmp_auth, + cmdgen.UdpTransportTarget((self.controller, 161)), + cmdgen.MibVariable(self.pPORT_STATUS_BASE_OID,), + ) + if errorIndication: + logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + str(errorIndication)) + for varBinds in varTable: + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() for idx, port in enumerate(self.pdu_ports): port_oid = self.PORT_STATUS_BASE_OID + port - fields = line.strip().split() - if len(fields) == 2 and fields[0] == port_oid: - status = {"psu_id": idx, "psu_on": True if fields[1] == self.STATUS_ON else False} + if current_oid == port_oid: + status = {"psu_id": idx, "psu_on": True if current_val == self.STATUS_ON else False} results.append(status) - if psu_id is not None: - idx = int(psu_id) % len(self.pdu_ports) - results = results[idx:idx+1] - logging.info("Got PSU status: %s" % str(results)) - except Exception as e: - logging.debug("Failed to get psu status, exception: " + repr(e)) + if psu_id is not None: + idx = int(psu_id) % len(self.pdu_ports) + results = results[idx:idx+1] + logging.info("Got PSU status: %s" % str(results)) return results def close(self): @@ -176,13 +232,4 @@ def get_psu_controller(controller_ip, dut_hostname): @summary: Factory function to create the actual PSU controller object. @return: The actual PSU controller object. Returns None if something went wrong. """ - - psu_controller_type = get_psu_controller_type(controller_ip) - if not psu_controller_type: - return None - - if "Sentry Switched CDU" in psu_controller_type: - logging.info("Initializing PSU controller") - return SentrySwitchedCDU(dut_hostname, controller_ip) - - return None + return snmpPsuController(dut_hostname, controller_ip) diff --git a/tests/common/plugins/ptfadapter/__init__.py b/tests/common/plugins/ptfadapter/__init__.py index 7e890f50c4..4b7cf76c5e 100644 --- a/tests/common/plugins/ptfadapter/__init__.py +++ b/tests/common/plugins/ptfadapter/__init__.py @@ -3,7 +3,6 @@ import pytest from ptfadapter import PtfTestAdapter -from ansible_host import AnsibleHost DEFAULT_PTF_NN_PORT = 10900 DEFAULT_DEVICE_NUM = 0 diff --git a/tests/common/plugins/sanity_check/README.md b/tests/common/plugins/sanity_check/README.md index 0fa9edfeb3..22473d5c23 100644 --- a/tests/common/plugins/sanity_check/README.md +++ b/tests/common/plugins/sanity_check/README.md @@ -20,7 +20,7 @@ pytest_plugins = [ sonic-mgmt/tests/common/plugins/sanity_check: ``` @pytest.fixture(scope="module", autouse=True) -def sanity_check(testbed_devices, request): +def sanity_check(localhost, duthost, request, fanouthosts): ... ``` diff --git a/tests/common/plugins/sanity_check/__init__.py b/tests/common/plugins/sanity_check/__init__.py index d9640ac47f..644ed152c9 100644 --- a/tests/common/plugins/sanity_check/__init__.py +++ b/tests/common/plugins/sanity_check/__init__.py @@ -49,15 +49,12 @@ def _update_check_items(old_items, new_items, supported_items): @pytest.fixture(scope="module", autouse=True) -def sanity_check(testbed_devices, request): +def sanity_check(localhost, duthost, request, fanouthosts): logger.info("Start pre-test sanity check") - dut = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - skip_sanity = False allow_recover = False - recover_method = "config_reload" + recover_method = "adaptive" check_items = set(copy.deepcopy(constants.SUPPORTED_CHECK_ITEMS)) # Default check items post_check = False @@ -72,7 +69,7 @@ def sanity_check(testbed_devices, request): logger.info("Process marker %s in script. m.args=%s, m.kwargs=%s" % (m.name, str(m.args), str(m.kwargs))) skip_sanity = customized_sanity_check.kwargs.get("skip_sanity", False) allow_recover = customized_sanity_check.kwargs.get("allow_recover", False) - recover_method = customized_sanity_check.kwargs.get("recover_method", "config_reload") + recover_method = customized_sanity_check.kwargs.get("recover_method", "adaptive") if allow_recover and recover_method not in constants.RECOVER_METHODS: pytest.warning("Unsupported recover method") logger.info("Fall back to use default recover method 'config_reload'") @@ -102,19 +99,19 @@ def sanity_check(testbed_devices, request): logger.info("No sanity check item is specified, no post-test sanity check") return - print_logs(dut, constants.PRINT_LOGS) - check_results = do_checks(dut, check_items) + print_logs(duthost, constants.PRINT_LOGS) + check_results = do_checks(duthost, check_items) logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \ json.dumps(check_results, indent=4)) if any([result["failed"] for result in check_results]): if not allow_recover: - pytest.fail("Pre-test sanity check failed, allow_recover=False") + pytest.fail("Pre-test sanity check failed, allow_recover=False {}".format(check_results)) return logger.info("Pre-test sanity check failed, try to recover, recover_method=%s" % recover_method) - recover(dut, localhost, recover_method) + recover(duthost, localhost, fanouthosts, check_results, recover_method) logger.info("Run sanity check again after recovery") - new_check_results = do_checks(dut, check_items) + new_check_results = do_checks(duthost, check_items) logger.info("!!!!!!!!!!!!!!!! Pre-test sanity check after recovery results: !!!!!!!!!!!!!!!!\n%s" % \ json.dumps(new_check_results, indent=4)) if any([result["failed"] for result in new_check_results]): @@ -131,7 +128,7 @@ def sanity_check(testbed_devices, request): logger.info("No post-test check is required. Done post-test sanity check") return - post_check_results = do_checks(dut, check_items) + post_check_results = do_checks(duthost, check_items) logger.info("!!!!!!!!!!!!!!!! Post-test sanity check results: !!!!!!!!!!!!!!!!\n%s" % \ json.dumps(post_check_results, indent=4)) if any([result["failed"] for result in post_check_results]): diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index ba2bd687ba..0c322d2901 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -97,6 +97,8 @@ def check_interfaces(dut): return check_result def check_dbmemory(dut): + logger.info("Checking database memory...") + total_omem = 0 re_omem = re.compile("omem=(\d+)") res = dut.command("/usr/bin/redis-cli client list") @@ -115,6 +117,46 @@ def check_dbmemory(dut): logger.info("Done checking database memory") return check_result +def check_processes(dut): + logger.info("Checking process status...") + + networking_uptime = dut.get_networking_uptime().seconds + timeout = max((SYSTEM_STABILIZE_MAX_TIME - networking_uptime), 0) + interval = 20 + logger.info("networking_uptime=%d seconds, timeout=%d seconds, interval=%d seconds" % \ + (networking_uptime, timeout, interval)) + + check_result = {"failed": False, "check_item": "processes"} + if timeout == 0: # Check processes status, do not retry. + processes_status = dut.all_critical_process_status() + check_result["processes_status"] = processes_status + check_result["services_status"] = {} + for k, v in processes_status.items(): + if v['status'] == False or len(v['exited_critical_process']) > 0: + check_result['failed'] = True + check_result["services_status"].update({k: v['status']}) + else: # Retry checking processes status + start = time.time() + elapsed = 0 + while elapsed < timeout: + processes_status = dut.all_critical_process_status() + check_result["processes_status"] = processes_status + check_result["services_status"] = {} + for k, v in processes_status.items(): + if v['status'] == False or len(v['exited_critical_process']) > 0: + check_result['failed'] = True + check_result["services_status"].update({k: v['status']}) + + if check_result["failed"]: + wait(interval, msg="Not all processes are started, wait %d seconds to retry. Remaining time: %d %s" % \ + (interval, int(timeout - elapsed), str(check_result["processes_status"]))) + elapsed = time.time() - start + else: + break + + logger.info("Done checking processes status.") + return check_result + def do_checks(dut, check_items): results = [] for item in check_items: @@ -124,6 +166,8 @@ def do_checks(dut, check_items): results.append(check_interfaces(dut)) elif item == "dbmemory": results.append(check_dbmemory(dut)) + elif item == "processes": + results.append(check_processes(dut)) return results diff --git a/tests/common/plugins/sanity_check/constants.py b/tests/common/plugins/sanity_check/constants.py index e4d2b16ec2..ddd581726a 100644 --- a/tests/common/plugins/sanity_check/constants.py +++ b/tests/common/plugins/sanity_check/constants.py @@ -12,11 +12,12 @@ # Recover related definitions RECOVER_METHODS = { - "config_reload": {"cmd": "config reload -y", "reboot": False}, - "load_minigraph": {"cmd": "config load_minigraph -y", "reboot": False}, - "reboot": {"cmd": "reboot", "reboot": True}, - "warm_reboot": {"cmd": "warm-reboot", "reboot": True}, - "fast_reboot": {"cmd": "fast_reboot", "reboot": True} + "config_reload": {"cmd": "bash -c 'config reload -y &>/dev/null'", "reboot": False, "adaptive": False, 'recover_wait': 60}, + "load_minigraph": {"cmd": "bash -c 'config load_minigraph -y &>/dev/null'", "reboot": False, "adaptive": False, 'recover_wait': 60}, + "reboot": {"cmd": "reboot", "reboot": True, "adaptive": False, 'recover_wait': 120}, + "warm_reboot": {"cmd": "warm-reboot", "reboot": True, "adaptive": False, 'recover_wait': 120}, + "fast_reboot": {"cmd": "fast_reboot", "reboot": True, "adaptive": False, 'recover_wait': 120}, + "adaptive": {"cmd": None, "reboot": False, "adaptive": True, 'recover_wait': 30}, } # All supported recover methods -SUPPORTED_CHECK_ITEMS = ["services", "interfaces", "dbmemory"] # Supported checks +SUPPORTED_CHECK_ITEMS = ["services", "interfaces", "dbmemory", "processes"] # Supported checks diff --git a/tests/common/plugins/sanity_check/recover.py b/tests/common/plugins/sanity_check/recover.py index 0b91c85d89..a0c2633cbe 100644 --- a/tests/common/plugins/sanity_check/recover.py +++ b/tests/common/plugins/sanity_check/recover.py @@ -1,16 +1,16 @@ import logging -import time import constants -from common.utilities import wait, wait_until +from common.utilities import wait from common.errors import RunAnsibleModuleFail +from common.platform.device_utils import fanout_switch_port_lookup logger = logging.getLogger(__name__) -def reboot_dut(dut, localhost, cmd): +def reboot_dut(dut, localhost, cmd, wait_time): logger.info("Reboot dut using cmd='%s'" % cmd) reboot_task, reboot_res = dut.command(cmd, module_async=True) @@ -26,13 +26,74 @@ def reboot_dut(dut, localhost, cmd): assert False, "Failed to reboot the DUT" localhost.wait_for(host=dut.hostname, port=22, state="started", delay=10, timeout=300) - wait(30, msg="Wait 30 seconds for system to be stable.") + wait(wait_time, msg="Wait {} seconds for system to be stable.".format(wait_time)) -def recover(dut, localhost, recover_method): +def __recover_interfaces(dut, fanouthosts, result, wait_time): + action = None + for port in result['down_ports']: + logging.info("Restoring port {}".format(port)) + + pn = str(port).lower() + if 'portchannel' in pn or 'vlan' in pn: + action = 'config_reload' + continue + + fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, port) + if fanout and fanout_port: + fanout.no_shutdown(fanout_port) + dut.no_shutdown(port) + wait(wait_time, msg="Wait {} seconds for interface(s) to restore.".format(wait_time)) + return action + + +def __recover_services(dut, result): + status = result['services_status'] + services = [ x for x in status if not status[x] ] + logging.info("Service(s) down: {}".format(services)) + return 'reboot' if 'database' in services else 'config_reload' + + +def __recover_with_command(dut, cmd, wait_time): + dut.command(cmd) + wait(wait_time, msg="Wait {} seconds for system to be stable.".format(wait_time)) + + +def adaptive_recover(dut, localhost, fanouthosts, check_results, wait_time): + outstanding_action = None + for result in check_results: + if result['failed']: + logging.info("Restoring {}".format(result)) + if result['check_item'] == 'interfaces': + action = __recover_interfaces(dut, fanouthosts, result, wait_time) + elif result['check_item'] == 'services': + action = __recover_services(dut, result) + elif result['check_item'] == 'processes': + action = 'config_reload' + else: + action = 'reboot' + + # Any action can override no action or 'config_reload'. + # 'reboot' is last resort and cannot be overridden. + if action and (not outstanding_action or outstanding_action == 'config_reload'): + outstanding_action = action + + if outstanding_action: + method = constants.RECOVER_METHODS[outstanding_action] + wait_time = method['recover_wait'] + if method["reboot"]: + reboot_dut(dut, localhost, method["cmd"], wait_time) + else: + __recover_with_command(dut, method['cmd'], wait_time) + + +def recover(dut, localhost, fanouthosts, check_results, recover_method): logger.info("Try to recover %s using method %s" % (dut.hostname, recover_method)) - if constants.RECOVER_METHODS[recover_method]["reboot"]: - reboot_dut(dut, localhost, constants.RECOVER_METHODS[recover_method]["cmd"]) + method = constants.RECOVER_METHODS[recover_method] + wait_time = method['recover_wait'] + if method["adaptive"]: + adaptive_recover(dut, localhost, fanouthosts, check_results, wait_time) + elif method["reboot"]: + reboot_dut(dut, localhost, method["cmd"], wait_time) else: - dut.command(constants.RECOVER_METHODS[recover_method]["cmd"]) - wait(30, msg="Wait 30 seconds for system to be stable.") + __recover_with_command(dut, method['cmd'], wait_time) diff --git a/tests/common/plugins/tacacs.py b/tests/common/plugins/tacacs.py index 322ac0bf7e..31eabf3300 100644 --- a/tests/common/plugins/tacacs.py +++ b/tests/common/plugins/tacacs.py @@ -6,11 +6,11 @@ def setup_tacacs(ptfhost, duthost, creds): """setup tacacs client and server""" # disable tacacs server - ptfhost.shell("service tacacs_plus stop") + ptfhost.service(name="tacacs_plus", state="stopped") # configure tacacs client duthost.shell("sudo config tacacs passkey %s" % creds['tacacs_passkey']) - + # get default tacacs servers config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] for tacacs_server in config_facts.get('TACPLUS_SERVER', {}): @@ -34,12 +34,12 @@ def setup_tacacs(ptfhost, duthost, creds): ptfhost.template(src="tacacs/tac_plus.conf.j2", dest="/etc/tacacs+/tac_plus.conf") # start tacacs server - ptfhost.shell("service tacacs_plus start") + ptfhost.service(name="tacacs_plus", state="started") yield # stop tacacs server - ptfhost.shell("service tacacs_plus stop") + ptfhost.service(name="tacacs_plus", state="stopped") # reset tacacs client configuration duthost.shell("sudo config tacacs delete %s" % ptfip) diff --git a/tests/common/reboot.py b/tests/common/reboot.py index 8633193527..7dd9923140 100644 --- a/tests/common/reboot.py +++ b/tests/common/reboot.py @@ -1,7 +1,7 @@ import time import logging from multiprocessing.pool import ThreadPool, TimeoutError -from ansible_host import AnsibleModuleException +from errors import RunAnsibleModuleFail logger = logging.getLogger(__name__) @@ -148,8 +148,8 @@ def execute_reboot_helper(): while finalizer_state == 'activating': try: res = duthost.command('systemctl is-active warmboot-finalizer.service',module_ignore_errors=True) - except AnsibleModuleException as err: - res = err.module_result + except RunAnsibleModuleFail as err: + res = err.results finalizer_state = res['stdout'].strip() logger.info('warmboot finalizer service state {}'.format(finalizer_state)) diff --git a/tests/common/system_utils/docker.py b/tests/common/system_utils/docker.py index 3b30f250bb..aecc2c3ba4 100644 --- a/tests/common/system_utils/docker.py +++ b/tests/common/system_utils/docker.py @@ -28,36 +28,50 @@ class DockerRegistryInfo(_DockerRegistryInfo): """ pass -def parse_registry_file(registry_file): +def load_docker_registry_info(dut): """ - parse_registry_file parses the provided file to produce a DockerRegistryInfo. + Attempts to load Docker registry information. - See `SONIC_DOCKER_REGISTRY` for the expected format of this file. + This method will first search for the registry in the `secret_vars` section + of the Ansible inventory. If it's not found, then it will load the registry from + the `SONIC_DOCKER_REGISTRY` file. Args: - registry_file (str): The name of the file holding the registry information. + dut (SonicHost): The target device. Raises: - IOError: If the file cannot be opened for any reason. - ValueError: If the provided file is missing any required fields. + IOError: If the registry file cannot be read. + ValueError: If the registry information is missing from both the + Ansible inventory and the registry file. Returns: - DockerRegistryInfo: The registry info from the registry file. + DockerRegistryInfo: The registry information that was loaded. """ - try: - with open(registry_file) as contents: - registry_vars = yaml.safe_load(contents) - except IOError as err: - _LOGGER.error("Failed to parse registry file \"%s\" (%s)", registry_file, err) - raise + # FIXME: In Ansible we're able to load the facts regardless of where they're + # stored. We should figure out how to do this in pytest so the registry + # location isn't hard-coded. + registry_vars = dut.host.options['variable_manager'] \ + ._hostvars.get(dut.hostname, {}) \ + .get("secret_vars", {}) \ + .get("docker_registry") + + if not registry_vars: + _LOGGER.warning("Registry info not found in inventory, falling back to registry file") + + try: + with open(SONIC_DOCKER_REGISTRY) as contents: + registry_vars = yaml.safe_load(contents) + except IOError as err: + _LOGGER.error("Failed to parse registry file (%s)", err) + raise host = registry_vars.get("docker_registry_host") username = registry_vars.get("docker_registry_username") password = registry_vars.get("docker_registry_password") if not host or not username or not password: - error_message = "Registry file \"{}\" is missing login or hostname".format(registry_file) + error_message = "Missing registry hostname or login" _LOGGER.error(error_message) raise ValueError(error_message) @@ -65,18 +79,18 @@ def parse_registry_file(registry_file): def delete_container(dut, container_name): """ - delete_container attempts to delete the specified container from the DUT. + Attempts to delete the specified container from the DUT. Args: dut (SonicHost): The target device. container_name (str): The name of the container to delete. """ - dut.docker_container(name=container_name, state="absent") + dut.command("docker rm {}".format(container_name)) def download_image(dut, registry, image_name, image_version="latest"): """ - download_image attempts to download the specified image from the registry. + Attempts to download the specified image from the registry. Args: dut (SonicHost): The target device. @@ -85,15 +99,12 @@ def download_image(dut, registry, image_name, image_version="latest"): image_version (str): The version of the image to download. """ - dut.docker_login(registry_url=registry.host, - username=registry.username, - password=registry.password) - dut.docker_image(source="pull", - name="{}/{}:{}".format(registry.host, image_name, image_version)) + dut.command("docker login {} -u {} -p {}".format(registry.host, registry.username, registry.password)) + dut.command("docker pull {}/{}:{}".format(registry.host, image_name, image_version)) def tag_image(dut, tag, image_name, image_version="latest"): """ - tag_image applies the specified tag to a Docker image on the DUT. + Applies the specified tag to a Docker image on the DUT. Args: dut (SonicHost): The target device. @@ -102,19 +113,16 @@ def tag_image(dut, tag, image_name, image_version="latest"): image_version (str): The version of the image to tag. """ - dut.docker_image(source="local", - name="{}:{}".format(image_name, image_version), - tag=tag) + dut.command("docker tag {}:{} {}".format(image_name, image_version, tag)) -def swap_syncd(dut, registry_file=SONIC_DOCKER_REGISTRY): +def swap_syncd(dut): """ - swap_syncd replaces the default syncd container on the DUT with an RPC version of it. + Replaces the running syncd container with the RPC version of it. - This command will download a new Docker image to the DUT and restart the swss service. + This will download a new Docker image to the DUT and restart the swss service. Args: dut (SonicHost): The target device. - registry_file (str): The registry file describing where to download the RPC image. """ if is_broadcom_device(dut): @@ -139,14 +147,55 @@ def swap_syncd(dut, registry_file=SONIC_DOCKER_REGISTRY): output = dut.command("sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version") sonic_version = output["stdout_lines"][0].strip() - registry = parse_registry_file(registry_file) + registry = load_docker_registry_info(dut) download_image(dut, registry, docker_rpc_image, sonic_version) tag_image(dut, - "{}/{}".format(registry.host, docker_syncd_name), - docker_rpc_image, + "{}:latest".format(docker_syncd_name), + "{}/{}".format(registry.host, docker_rpc_image), sonic_version) + dut.command("systemctl reset-failed swss") dut.command("systemctl start swss") + + _LOGGER.info("swss has been restarted, waiting 60 seconds to initialize...") + time.sleep(60) + +def restore_default_syncd(dut): + """ + Replaces the running syncd with the default syncd that comes with the image. + + This will restart the swss service. + + Args: + dut (SonicHost): The target device. + """ + + if is_broadcom_device(dut): + vendor_id = "brcm" + elif is_mellanox_device(dut): + vendor_id = "mlnx" + else: + error_message = "\"{}\" is not currently supported".format(dut.get_asic_type()) + _LOGGER.error(error_message) + raise ValueError(error_message) + + docker_syncd_name = "docker-syncd-{}".format(vendor_id) + + dut.command("systemctl stop swss") + delete_container(dut, "syncd") + + # TODO: Getting the base image version should be a common utility + output = dut.command("sonic-cfggen -y /etc/sonic/sonic_version.yml -v build_version") + sonic_version = output["stdout_lines"][0].strip() + + tag_image(dut, + "{}:latest".format(docker_syncd_name), + docker_syncd_name, + sonic_version) + + dut.command("systemctl reset-failed swss") + dut.command("systemctl start swss") + _LOGGER.info("swss has been restarted, waiting 60 seconds to initialize...") time.sleep(60) diff --git a/tests/common/utilities.py b/tests/common/utilities.py index 4e17e3014f..ecf1a86ee5 100644 --- a/tests/common/utilities.py +++ b/tests/common/utilities.py @@ -36,7 +36,7 @@ def wait_until(timeout, interval, condition, *args, **kwargs): try: check_result = condition(*args, **kwargs) except Exception as e: - logging.debug("Exception caught while checking %s: %s" % (condition.__name__, repr(e))) + logging.error("Exception caught while checking %s: %s" % (condition.__name__, repr(e))) check_result = False if check_result: diff --git a/tests/conftest.py b/tests/conftest.py index eb9af0a335..8ac11fc26f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,15 +5,17 @@ import tarfile import logging import time +import string +import re import pytest import csv import yaml import ipaddr as ipaddress -from ansible_host import AnsibleHost from collections import defaultdict -from common.devices import SonicHost, Localhost, PTFHost, EosHost +from common.fixtures.conn_graph_facts import conn_graph_facts +from common.devices import SonicHost, Localhost, PTFHost, EosHost, FanoutHost logger = logging.getLogger(__name__) @@ -41,7 +43,7 @@ def __init__(self, testbed_file): CSV_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'server', 'vm_base', 'dut', 'comment') with open(self.testbed_filename) as f: - topo = csv.DictReader(f, fieldnames=CSV_FIELDS) + topo = csv.DictReader(f, fieldnames=CSV_FIELDS, delimiter=',') # Validate all field are in the same order and are present header = next(topo) @@ -57,6 +59,9 @@ def __init__(self, testbed_file): line['ptf_ip'] = str(ptfaddress.ip) line['ptf_netmask'] = str(ptfaddress.netmask) + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + del line['dut'] + topo = line['topo'] del line['topo'] line['topo'] = defaultdict() @@ -84,15 +89,37 @@ def pytest_addoption(parser): ############################ # test_techsupport options # ############################ - + parser.addoption("--loop_num", action="store", default=10, type=int, help="Change default loop range for show techsupport command") parser.addoption("--loop_delay", action="store", default=10, type=int, help="Change default loops delay") - parser.addoption("--logs_since", action="store", type=int, + parser.addoption("--logs_since", action="store", type=int, help="number of minutes for show techsupport command") +@pytest.fixture(scope="session", autouse=True) +def enhance_inventory(request): + """ + This fixture is to enhance the capability of parsing the value of pytest cli argument '--inventory'. + The pytest-ansible plugin always assumes that the value of cli argument '--inventory' is a single + inventory file. With this enhancement, we can pass in multiple inventory files using the cli argument + '--inventory'. The multiple inventory files can be separated by comma ','. + + For example: + pytest --inventory "inventory1, inventory2" + pytest --inventory inventory1,inventory2 + + This fixture is automatically applied, you don't need to declare it in your test script. + """ + inv_opt = request.config.getoption("ansible_inventory") + inv_files = [inv_file.strip() for inv_file in inv_opt.split(",")] + try: + setattr(request.config.option, "ansible_inventory", inv_files) + except AttributeError: + logger.error("Failed to set enhanced 'ansible_inventory' to request.config.option") + + @pytest.fixture(scope="session") def testbed(request): """ @@ -107,32 +134,6 @@ def testbed(request): return tbinfo.testbed_topo[tbname] -@pytest.fixture(scope="module") -def testbed_devices(ansible_adhoc, testbed): - """ - @summary: Fixture for creating dut, localhost and other necessary objects for testing. These objects provide - interfaces for interacting with the devices used in testing. - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Fixture for parsing testbed configuration file. - @return: Return the created device objects in a dictionary - """ - - devices = { - "localhost": Localhost(ansible_adhoc), - "dut": SonicHost(ansible_adhoc, testbed["dut"], gather_facts=True)} - - if "ptf" in testbed: - devices["ptf"] = PTFHost(ansible_adhoc, testbed["ptf"]) - else: - # when no ptf defined in testbed.csv - # try to parse it from inventory - dut = devices["dut"] - ptf_host = dut.host.options["inventory_manager"].get_host(dut.hostname).get_vars()["ptf_host"] - devices["ptf"] = PTFHost(ansible_adhoc, ptf_host) - - return devices - def disable_ssh_timout(dut): ''' @summary disable ssh session on target dut @@ -160,16 +161,21 @@ def enable_ssh_timout(dut): @pytest.fixture(scope="module") -def duthost(testbed_devices, request): +def duthost(ansible_adhoc, testbed, request): ''' @summary: Shortcut fixture for getting DUT host. For a lengthy test case, test case module can pass a request to disable sh time out mechanis on dut in order to avoid ssh timeout. After test case completes, the fixture will restore ssh timeout. - @param testbed_devices: Ansible framework testbed devices + @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is + mandatory argument for the class constructors. + @param testbed: Ansible framework testbed information + @param request: request parameters for duthost test fixture ''' stop_ssh_timeout = getattr(request.module, "pause_ssh_timeout", None) + dut_index = getattr(request.module, "dut_index", 0) + assert dut_index < len(testbed["duts"]), "DUT index '{0}' is out of bound '{1}'".format(dut_index, len(testbed["duts"])) - duthost = testbed_devices["dut"] + duthost = SonicHost(ansible_adhoc, testbed["duts"][dut_index], gather_facts=True) if stop_ssh_timeout is not None: disable_ssh_timout(duthost) @@ -180,25 +186,60 @@ def duthost(testbed_devices, request): @pytest.fixture(scope="module") -def ptfhost(testbed_devices): - """ - Shortcut fixture for getting PTF host - """ +def localhost(ansible_adhoc): + return Localhost(ansible_adhoc) + + +@pytest.fixture(scope="module") +def ptfhost(ansible_adhoc, testbed, duthost): + if "ptf" in testbed: + return PTFHost(ansible_adhoc, testbed["ptf"]) + else: + # when no ptf defined in testbed.csv + # try to parse it from inventory + ptf_host = duthost.host.options["inventory_manager"].get_host(duthost.hostname).get_vars()["ptf_host"] + return PTFHost(ansible_adhoc, ptf_host) - return testbed_devices["ptf"] @pytest.fixture(scope="module") def nbrhosts(ansible_adhoc, testbed, creds): """ - Shortcut fixture for getting PTF host + Shortcut fixture for getting VM host """ vm_base = int(testbed['vm_base'][2:]) devices = {} for k, v in testbed['topo']['properties']['topology']['VMs'].items(): - devices[k] = EosHost(ansible_adhoc, "VM%04d" % (vm_base + v['vm_offset']), creds['eos_login'], creds['eos_password']) + devices[k] = {'host': EosHost(ansible_adhoc, \ + "VM%04d" % (vm_base + v['vm_offset']), \ + creds['eos_login'], \ + creds['eos_password']), + 'conf': testbed['topo']['properties']['configuration'][k]} return devices +@pytest.fixture(scope="module") +def fanouthosts(ansible_adhoc, conn_graph_facts, creds): + """ + Shortcut fixture for getting Fanout hosts + """ + + dev_conn = conn_graph_facts['device_conn'] if 'device_conn' in conn_graph_facts else {} + fanout_hosts = {} + for dut_port in dev_conn.keys(): + fanout_rec = dev_conn[dut_port] + fanout_host = fanout_rec['peerdevice'] + fanout_port = fanout_rec['peerport'] + if fanout_host in fanout_hosts.keys(): + fanout = fanout_hosts[fanout_host] + else: + host_vars = ansible_adhoc().options['inventory_manager'].get_host(fanout_host).vars + os_type = 'eos' if 'os' not in host_vars else host_vars['os'] + fanout = FanoutHost(ansible_adhoc, os_type, fanout_host, 'FanoutLeaf', creds['fanout_admin_user'], creds['fanout_admin_password']) + fanout_hosts[fanout_host] = fanout + fanout.add_port_map(dut_port, fanout_port) + + return fanout_hosts + @pytest.fixture(scope='session') def eos(): """ read and yield eos configuration """ @@ -207,11 +248,14 @@ def eos(): return eos -@pytest.fixture(scope="session") -def creds(): - """ read and yield lab configuration """ - files = glob.glob("../ansible/group_vars/lab/*.yml") - files += glob.glob("../ansible/group_vars/all/*.yml") +@pytest.fixture(scope="module") +def creds(duthost): + """ read credential information according to the dut inventory """ + groups = duthost.host.options['inventory_manager'].get_host(duthost.hostname).get_vars()['group_names'] + logger.info("dut {} belongs to groups {}".format(duthost.hostname, groups)) + files = glob.glob("../ansible/group_vars/all/*.yml") + for group in groups: + files += glob.glob("../ansible/group_vars/{}/*.yml".format(group)) creds = {} for f in files: with open(f) as stream: diff --git a/tests/fdb/conftest.py b/tests/copp/__init__.py similarity index 100% rename from tests/fdb/conftest.py rename to tests/copp/__init__.py diff --git a/tests/copp/conftest.py b/tests/copp/conftest.py new file mode 100644 index 0000000000..57a746bdc1 --- /dev/null +++ b/tests/copp/conftest.py @@ -0,0 +1,31 @@ +""" + Pytest configuration used by the COPP tests. +""" + +def pytest_addoption(parser): + """ + Adds options to pytest that are used by the COPP tests. + """ + + parser.addoption( + "--nn_target_port", + action="store", + type=int, + default=3, + help="Which port to send traffic to", + ) + + parser.addoption( + "--pkt_tx_count", + action="store", + type=int, + default=100000, + help="How many packets to send to the DUT" + ) + + parser.addoption( + "--swap_syncd", + action="store_true", + default=False, + help="Install syncd RPC image for this test case" + ) diff --git a/tests/copp/copp_utils.py b/tests/copp/copp_utils.py new file mode 100644 index 0000000000..a0723f081f --- /dev/null +++ b/tests/copp/copp_utils.py @@ -0,0 +1,118 @@ +""" + Helpful utilities for writing tests for the COPP feature. + + Todo: + Refactor ptfadapter so it can be leveraged in these test cases. +""" + +DEFAULT_NN_TARGET_PORT = 3 + +_REMOVE_IP_SCRIPT = "scripts/remove_ip.sh" +_ADD_IP_SCRIPT = "scripts/add_ip.sh" +_UPDATE_COPP_SCRIPT = "copp/scripts/update_copp_config.py" + +_BASE_COPP_CONFIG = "/tmp/00-copp.config.json" +_SWSS_COPP_CONFIG = "swss:/etc/swss/config.d/00-copp.config.json" +_TEMP_COPP_CONFIG = "/tmp/copp_config.json" + +_PTF_NN_TEMPLATE = "templates/ptf_nn_agent.conf.ptf.j2" +_PTF_NN_DEST = "/etc/supervisor/conf.d/ptf_nn_agent.conf" + +_SYNCD_NN_TEMPLATE = "templates/ptf_nn_agent.conf.dut.j2" +_SYNCD_NN_DEST = "/tmp/ptf_nn_agent.conf" + +def limit_policer(dut, pps_limit): + """ + Updates the COPP configuration in the SWSS container to respect a given rate limit. + + Note: + The SWSS container must be restarted for the config change to take effect. + + Args: + dut (SonicHost): The target device. + pps_limit (int): The rate limit for COPP to enforce on ALL trap groups. + """ + + dut.command("docker cp {} {}".format(_SWSS_COPP_CONFIG, _BASE_COPP_CONFIG)) + dut.script(cmd="{} {} {} {}".format(_UPDATE_COPP_SCRIPT, pps_limit, + _BASE_COPP_CONFIG, _TEMP_COPP_CONFIG)) + dut.command("docker cp {} {}".format(_TEMP_COPP_CONFIG, _SWSS_COPP_CONFIG)) + +def restore_policer(dut): + """ + Reloads the default COPP configuration in the SWSS container. + + Notes: + This method should only be used after limit_policer. + + The SWSS container must be restarted for the config change to take effect. + """ + dut.command("docker cp {} {}".format(_BASE_COPP_CONFIG, _SWSS_COPP_CONFIG)) + +def configure_ptf(ptf, nn_target_port): + """ + Configures the PTF to run the NN agent on the specified port. + + Args: + ptf (PTFHost): The target PTF. + nn_target_port (int): The port to run NN agent on. + """ + + ptf.script(cmd=_REMOVE_IP_SCRIPT) + ptf.script(cmd=_ADD_IP_SCRIPT) + + facts = {"nn_target_port": nn_target_port} + ptf.host.options['variable_manager'].extra_vars.update(facts) + ptf.template(src=_PTF_NN_TEMPLATE, dest=_PTF_NN_DEST) + + ptf.supervisorctl(name="ptf_nn_agent", state="restarted") + + ptf.copy(src="ptftests", dest="/root") + +def restore_ptf(ptf): + """ + Restores the PTF and the NN agent to default settings. + + Args: + ptf (PTFHost): The target PTF. + """ + + ptf.script(cmd=_REMOVE_IP_SCRIPT) + + facts = {"nn_target_port": DEFAULT_NN_TARGET_PORT} + ptf.host.options['variable_manager'].extra_vars.update(facts) + + ptf.template(src=_PTF_NN_TEMPLATE, dest=_PTF_NN_DEST) + + ptf.supervisorctl(name="ptf_nn_agent", state="restarted") + +def configure_syncd(dut, nn_target_port): + """ + Configures syncd to run the NN agent on the specified port. + + Note: + The DUT must be running an RPC syncd image in order for the + NN agent to be available. + + Args: + dut (SonicHost): The target device. + nn_target_port (int): The port to run NN agent on. + """ + + facts = {"nn_target_port": nn_target_port, + "nn_target_interface": _map_port_number_to_interface(dut, nn_target_port)} + dut.host.options['variable_manager'].extra_vars.update(facts) + + dut.template(src=_SYNCD_NN_TEMPLATE, dest=_SYNCD_NN_DEST) + dut.command("docker cp {} syncd:/etc/supervisor/conf.d/".format(_SYNCD_NN_DEST)) + + dut.command("docker exec syncd supervisorctl reread") + dut.command("docker exec syncd supervisorctl update") + +def _map_port_number_to_interface(dut, nn_target_port): + """ + Retrieves the correct interface for a given port number. + """ + + interfaces = dut.command("portstat")["stdout_lines"][2:] + return interfaces[nn_target_port].split()[0] diff --git a/tests/copp/scripts/update_copp_config.py b/tests/copp/scripts/update_copp_config.py new file mode 100644 index 0000000000..007ac05198 --- /dev/null +++ b/tests/copp/scripts/update_copp_config.py @@ -0,0 +1,53 @@ +#!/usr/bin/python +""" + Script to modify a COPP configuration to follow a specified rate limit. + + This is used by the COPP tests to reduce the rate limit below that of the + PTF host's sending rate. + + Example:: + + $ python update_copp_config.py + $ python update_copp_config.py 600 /tmp/copp_config.json /tmp/new_copp_config.json +""" +import json +import sys + +def generate_limited_pps_config(pps_limit, input_config_file, output_config_file): + """ + Modifies a COPP config to use the specified rate limit. + + Notes: + This only affects COPP policies that enforce a rate limit. Other + policies are left alone. + + Args: + pps_limit (int): The rate limit to enforce expressed in PPS (packets-per-second) + input_config_file (str): The name of the file containing the initial config + output_config_file (str): The name of the file to output the modified config + """ + + with open(input_config_file) as input_stream: + copp_config = json.load(input_stream) + + for trap_group in copp_config: + for _, group_config in trap_group.items(): + # Notes: + # CIR (committed information rate) - bandwidth limit set by the policer + # CBS (committed burst size) - largest burst of packets allowed by the policer + # + # Setting these two values to pps_limit restricts the policer to allowing exactly + # that number of packets per second, which is what we want for our tests. + + if "cir" in group_config: + group_config["cir"] = pps_limit + + if "cbs" in group_config: + group_config["cbs"] = pps_limit + + with open(output_config_file, "w+") as output_stream: + json.dump(copp_config, output_stream) + +if __name__ == "__main__": + ARGS = sys.argv[1:] + generate_limited_pps_config(ARGS[0], ARGS[1], ARGS[2]) diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py new file mode 100644 index 0000000000..186201f690 --- /dev/null +++ b/tests/copp/test_copp.py @@ -0,0 +1,201 @@ +""" + Tests the COPP feature in SONiC. + + Notes: + These test cases require that a special RPC syncd image is installed on the + DUT. You can either pre-install this image and run the test normally, or + specify the `--swap-syncd` flag from the command line to have the test fetch + the RPC image and install it before the test runs. + + These test cases limit the PPS of all trap groups to 600. This is done to ensure + that the PTF can send traffic fast enough to trigger the policer. In order to validate + higher rate limits, a physical traffic generator is needed, which is beyond the scope + of these test cases. + + Parameters: + --nn_target_port (int): Which port you want the test to send traffic + to. Default is 3. + + Note that this is not the same as the interface name. For example, Ethernet12 + may not be the 12th port in your system depending on the HWSKU under test. + + --pkt_tx_count (int): How many packets to send during each individual test case. + Default is 100000. + + --swap_syncd: Used to install the RPC syncd image before running the tests. Default + is disabled. +""" + +import time +from collections import namedtuple + +import pytest +from copp import copp_utils +from ptf_runner import ptf_runner +from common.system_utils import docker +from common.broadcom_data import is_broadcom_device + +_COPPTestParameters = namedtuple("_COPPTestParameters", + ["nn_target_port", + "pkt_tx_count", + "swap_syncd", + "topo"]) +_SUPPORTED_TOPOS = ["ptf32", "ptf64", "t1", "t1-lag"] +_TEST_RATE_LIMIT = 600 + +class TestCOPP(object): + """ + Tests basic COPP functionality in SONiC. + """ + + @pytest.mark.parametrize("protocol", ["ARP", + "IP2ME", + "SNMP", + "SSH"]) + def test_policer(self, protocol, duthost, ptfhost, _copp_testbed): + """ + Validates that rate-limited COPP groups work as expected. + + Checks that the policer enforces the rate limit for protocols + that have a set rate limit. + """ + + if protocol == "ARP" \ + and is_broadcom_device(duthost) \ + and "201811" not in duthost.get_version(): + pytest.xfail("ARP policy disabled on BRCM devices due to SAI bug") + + if protocol in ["IP2ME", "SNMP", "SSH"] and _copp_testbed.topo == "t1-lag": + pytest.xfail("Packets not received due to faulty DIP, see #1171") + + _copp_runner(duthost, + ptfhost, + protocol, + _copp_testbed) + + @pytest.mark.parametrize("protocol", ["BGP", + "DHCP", + "LACP", + "LLDP", + "UDLD"]) + def test_no_policer(self, protocol, duthost, ptfhost, _copp_testbed): + """ + Validates that non-rate-limited COPP groups work as expected. + + Checks that the policer does not enforce a rate limit for protocols + that do not have any set rate limit. + """ + + if protocol == "BGP" and _copp_testbed.topo == "t1-lag": + pytest.xfail("Packets not received due to faulty DIP, see #1171") + + _copp_runner(duthost, + ptfhost, + protocol, + _copp_testbed) + +@pytest.fixture(scope="class") +def _copp_testbed(duthost, ptfhost, testbed, request): + """ + Pytest fixture to handle setup and cleanup for the COPP tests. + """ + + test_params = _gather_test_params(testbed, request) + + if test_params.topo not in _SUPPORTED_TOPOS: + pytest.skip("Topology not supported by COPP tests") + + _setup_testbed(duthost, ptfhost, test_params) + yield test_params + _teardown_testbed(duthost, ptfhost, test_params) + +def _copp_runner(dut, ptf, protocol, test_params): + """ + Configures and runs the PTF test cases. + """ + + params = {"verbose": False, + "pkt_tx_count": test_params.pkt_tx_count, + "target_port": test_params.nn_target_port} + + dut_ip = dut.setup()["ansible_facts"]["ansible_eth0"]["ipv4"]["address"] + device_sockets = ["0-{}@tcp://127.0.0.1:10900".format(test_params.nn_target_port), + "1-{}@tcp://{}:10900".format(test_params.nn_target_port, dut_ip)] + + # NOTE: debug_level can actually slow the PTF down enough to fail the test cases + # that are not rate limited. Until this is addressed, do not use this flag as part of + # nightly test runs. + ptf_runner(host=ptf, + testdir="ptftests", + testname="copp_tests.{}Test".format(protocol), + platform="nn", + qlen=100000, + params=params, + relax=None, + debug_level=None, + device_sockets=device_sockets) + +def _gather_test_params(testbed, request): + """ + Fetches the test parameters from pytest. + """ + + nn_target_port = request.config.getoption("--nn_target_port") + pkt_tx_count = request.config.getoption("--pkt_tx_count") + swap_syncd = request.config.getoption("--swap_syncd") + topo = testbed["topo"]["name"] + + return _COPPTestParameters(nn_target_port=nn_target_port, + pkt_tx_count=pkt_tx_count, + swap_syncd=swap_syncd, + topo=topo) + +def _setup_testbed(dut, ptf, test_params): + """ + Sets up the testbed to run the COPP tests. + """ + + # We don't want LLDP to throw off our test results, so we disable it first. + dut.command("docker exec lldp supervisorctl stop lldp-syncd") + dut.command("docker exec lldp supervisorctl stop lldpd") + + copp_utils.configure_ptf(ptf, test_params.nn_target_port) + + copp_utils.limit_policer(dut, _TEST_RATE_LIMIT) + + if test_params.swap_syncd: + docker.swap_syncd(dut) + else: + # NOTE: Even if the rpc syncd image is already installed, we need to restart + # SWSS for the COPP changes to take effect. + _restart_swss(dut) + + copp_utils.configure_syncd(dut, test_params.nn_target_port) + +def _teardown_testbed(dut, ptf, test_params): + """ + Tears down the testbed, returning it to its initial state. + """ + + copp_utils.restore_ptf(ptf) + + copp_utils.restore_policer(dut) + + if test_params.swap_syncd: + docker.restore_default_syncd(dut) + else: + _restart_swss(dut) + + dut.command("docker exec lldp supervisorctl start lldpd") + dut.command("docker exec lldp supervisorctl start lldp-syncd") + +def _restart_swss(dut): + """ + Restarts SWSS and waits for the system to stabilize. + """ + + # The failure counter may be incremented by other test cases, so we clear it + # first to avoid crashing the testbed. + dut.command("systemctl reset-failed swss") + dut.command("systemctl restart swss") + time.sleep(60) diff --git a/tests/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py similarity index 100% rename from tests/test_dhcp_relay.py rename to tests/dhcp_relay/test_dhcp_relay.py diff --git a/tests/docs/pytest.org.md b/tests/docs/pytest.org.md new file mode 100644 index 0000000000..0b13d7e8d0 --- /dev/null +++ b/tests/docs/pytest.org.md @@ -0,0 +1,128 @@ +# Pytest organization proposal + +This proposal intends to achieve the following + - Have a standard way of categorizing tests + - Have some guidelines around test file organization + - Have a master wrapper for test execution + - Follow common documentation style + - Test result collection + +## Test categorization +Leverage pytest custom markers to group tests based on topology, asic, features, device type and connection type. +Every testcase needs to have a topology marker. Feature markers are recommended for any feature test that are getting added. +'Device_type' is optional but needs to be specified if there is a specific requirement that the test needs a physical DUT as opposed to a VS. The same criteria applies for 'connection_type' + +``` +pytest.ini +[pytest] +markers: + topology(topo_name): The topologies this particular testcase can run against. topo_name can be individual topology names like 't0', 't1', 'ptf', 'any' or a comma separated like ('t0', 't1') if supported on multiple topologies + asic(vendor_name): used for asic specific test(broadcom, mellanox etc) + feature(feature_name): feature this test is written for. eg. acl, nat + connection_type(name): names can be 'fabric' (which indicates the presence of a fanout switch) or 'direct' if a testcase uses directly connected links + device_type(name): name can 'physical' (if this test requires a physical dut) or 'vs' (if this test can be run on a virtual switch) + +``` +conftest.py + +``` +def pytest_addoption(parser): + parser.addoption("--topology", action="store", metavar="NAME", + help="only run tests matching the topology NAME") + +def pytest_runtest_setup(item): + toponames = [mark.args for mark in item.iter_markers(name="topology")] + if toponames: + cfg_topos = item.config.getoption("--topology").split(',') + if all(topo not in toponames[0] for topo in cfg_topos): + pytest.skip("test requires topology in {!r}".format(toponames)) + else: + if item.config.getoption("--topology"): + pytest.skip("test does not match topology") + +``` + +Sample test file: test_topo.py + +``` +@pytest.mark.topology('t0', 't1') +def test_all(): + assert 1 == 1 + +@pytest.mark.topology('t0') +def test_t0(): + assert 1 == 1 + + +@pytest.mark.topology('any') +def test_any(): + assert 1 == 1 + +``` + +Sample test file: test_notopo.py + +``` +def test_notopo(): + assert 1 == 1 + +``` + +Test run + +``` +py.test --inventory inv --host-pattern dut1 --module-path ../ansible/library/ --testbed tb --testbed_file tb.csv --topology t1 test_topo.py test_notopo.py -rA + +platform linux2 -- Python 2.7.12, pytest-4.6.9, py-1.8.1, pluggy-0.13.1 +ansible: 2.8.7 +rootdir: /var/nejo/Networking-acs-sonic-mgmt/tests, inifile: pytest.ini +plugins: ansible-2.2.2 +collected 4 items + +test_topo.py::test_all PASSED [ 25%] +test_topo.py::test_t0 PASSED [ 50%] +test_topo.py::test_any SKIPPED [ 75%] +test_notopo.py::test_notopo SKIPPED [100%] + +.... + +.... +PASSED test_topo.py::test_all +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:293: test requires topology in [('t0',)] +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:293: test requires topology in [('any',)] +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:295: test does not match topology + +``` + +## Test file organization +- Have 2 broad categories (platform and feature). Feature specific tests and their helpers go into specific feature folders. + +``` +tests + |_ common + |_ platform + |_ ptftests + |_ nat + |_ test_nat_bindings.py + |_ files + |_ all helpers for the nat feature + |_ acl + +``` + +- Any reusable code needs to go under tests/common + +- File naming convention + The objective here is to provide meaningful names for helper files/testcase files so that the user gets a general idea of the file contents. + + +## Master wrapper +Make it easier to run a nightly test against a feature/platform/topology from the command line. Have something similar to the 'ansible/testbed-cli.sh' script which can be invoked with just the basic parameters (testbed name, what flavor of test to run) + + +## Documentation style +Follow a common style of documentation for test methods which can be used by some tool to generate html content + + +## Test result collection +Use the --junitxml attribute to collect test results. Can leverage the existing format used in sonic-utilities/sonic-swss repo for reporting test results. diff --git a/tests/drop_counters/combined_drop_counters.yml b/tests/drop_counters/combined_drop_counters.yml index 0d33bc1033..7688675da8 100755 --- a/tests/drop_counters/combined_drop_counters.yml +++ b/tests/drop_counters/combined_drop_counters.yml @@ -20,6 +20,10 @@ # - "x86_64-mlnx" # - "x86_64-dell.*" l2_l3: + - "x86_64-dell.*" + - "x86_64-arista.*" acl_l2: - - "x86_64-mlnx" \ No newline at end of file + - "x86_64-mlnx" + - "x86_64-dell.*" + - "x86_64-arista.*" diff --git a/tests/drop_counters/fanout/mellanox/mellanox_fanout.py b/tests/drop_counters/fanout/mellanox/mellanox_fanout.py index 8e389f4061..0488ffbc28 100755 --- a/tests/drop_counters/fanout/mellanox/mellanox_fanout.py +++ b/tests/drop_counters/fanout/mellanox/mellanox_fanout.py @@ -1,3 +1,4 @@ +import os import pytest from ..fanout_base import BaseFanoutHandler @@ -13,13 +14,13 @@ class FanoutHandler(BaseFanoutHandler): - def __init__(self, testbed_devices): + def __init__(self, duthost, localhost): self.initialized = False self.rule_id = MAX_OPENFLOW_RULE_ID # Ansible localhost fixture which calls ansible playbook on the local host - self.ansible_localhost = testbed_devices["localhost"] + self.ansible_localhost = localhost - dut_facts = self.ansible_localhost.conn_graph_facts(host=testbed_devices["dut"].hostname, filename=LAB_CONNECTION_GRAPH)["ansible_facts"] + dut_facts = self.ansible_localhost.conn_graph_facts(host=duthost.hostname, filename=LAB_CONNECTION_GRAPH)["ansible_facts"] self.fanout_host = dut_facts["device_conn"]["Ethernet0"]["peerdevice"] fanout_facts = self.ansible_localhost.conn_graph_facts(host=self.fanout_host, filename=LAB_CONNECTION_GRAPH)["ansible_facts"] diff --git a/tests/drop_counters/test_drop_counters.py b/tests/drop_counters/test_drop_counters.py index 39ebaedbad..2054dc57d4 100755 --- a/tests/drop_counters/test_drop_counters.py +++ b/tests/drop_counters/test_drop_counters.py @@ -7,13 +7,13 @@ import pprint import random import time -import scapy import yaml import re import os import json import netaddr +from common.utilities import wait_until logger = logging.getLogger(__name__) @@ -269,7 +269,7 @@ def rif_port_down(duthost, setup, loganalyzer): @pytest.fixture -def fanouthost(request, testbed_devices): +def fanouthost(request, duthost, localhost): """ Fixture that allows to update Fanout configuration if there is a need to send incorrect packets. Added possibility to create vendor specific logic to handle fanout configuration. @@ -278,15 +278,14 @@ def fanouthost(request, testbed_devices): By default 'fanouthost' fixture will not instantiate any instance so it will return None, and in such case 'fanouthost' instance should not be used in test case logic. """ - dut = testbed_devices["dut"] fanout = None # Check that class to handle fanout config is implemented - if "mellanox" == dut.facts["asic_type"]: + if "mellanox" == duthost.facts["asic_type"]: for file_name in os.listdir(os.path.join(os.path.dirname(__file__), "fanout")): # Import fanout configuration handler based on vendor name if "mellanox" in file_name: module = importlib.import_module("fanout.{0}.{0}_fanout".format(file_name.strip(".py"))) - fanout = module.FanoutHandler(testbed_devices) + fanout = module.FanoutHandler(duthost, localhost) break yield fanout @@ -415,38 +414,44 @@ def str_to_int(value): def verify_drop_counters(duthost, dut_iface, get_cnt_cli_cmd, column_key): """ Verify drop counter incremented on specific interface """ - drops = get_pkt_drops(duthost, get_cnt_cli_cmd)[dut_iface][column_key] - drops = str_to_int(drops) - - if drops != PKT_NUMBER: + get_drops = lambda: int(get_pkt_drops(duthost, get_cnt_cli_cmd)[dut_iface][column_key].replace(",", "")) + check_drops_on_dut = lambda: PKT_NUMBER == get_drops() + if not wait_until(5, 1, check_drops_on_dut): fail_msg = "'{}' drop counter was not incremented on iface {}. DUT {} == {}; Sent == {}".format( - column_key, dut_iface, column_key, drops, PKT_NUMBER + column_key, dut_iface, column_key, get_drops(), PKT_NUMBER ) pytest.fail(fail_msg) -def base_verification(discard_group, pkt, ptfadapter, duthost, ptf_tx_port_id, dut_iface, l2_col_key=RX_DRP, l3_col_key=RX_ERR): +def base_verification(discard_group, pkt, ptfadapter, duthost, ports_info, tx_dut_ports=None, + l2_col_key=RX_DRP, l3_col_key=RX_ERR): """ Base test function for verification of L2 or L3 packet drops. Verification type depends on 'discard_group' value. Supported 'discard_group' values: 'L2', 'L3', 'ACL' """ - send_packets(pkt, duthost, ptfadapter, ptf_tx_port_id) + send_packets(pkt, duthost, ptfadapter, ports_info["ptf_tx_port_id"]) if discard_group == "L2": - verify_drop_counters(duthost, dut_iface, GET_L2_COUNTERS, l2_col_key) + verify_drop_counters(duthost, ports_info["dut_iface"], GET_L2_COUNTERS, l2_col_key) ensure_no_l3_drops(duthost) elif discard_group == "L3": if COMBINED_L2L3_DROP_COUNTER: - verify_drop_counters(duthost, dut_iface, GET_L2_COUNTERS, l2_col_key) + verify_drop_counters(duthost, ports_info["dut_iface"], GET_L2_COUNTERS, l2_col_key) ensure_no_l3_drops(duthost) else: - verify_drop_counters(duthost, dut_iface, GET_L3_COUNTERS, l3_col_key) + if not tx_dut_ports: + pytest.fail("No L3 interface specified") + + verify_drop_counters(duthost, tx_dut_ports[ports_info["dut_iface"]], GET_L3_COUNTERS, l3_col_key) ensure_no_l2_drops(duthost) elif discard_group == "ACL": + if not tx_dut_ports: + pytest.fail("No L3 interface specified") + time.sleep(ACL_COUNTERS_UPDATE_INTERVAL) acl_drops = duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"]["DATAACL"]["rules"]["RULE_1"]["packets_count"] if acl_drops != PKT_NUMBER: fail_msg = "ACL drop counter was not incremented on iface {}. DUT ACL counter == {}; Sent pkts == {}".format( - dut_iface, acl_drops, PKT_NUMBER + tx_dut_ports[ports_info["dut_iface"]], acl_drops, PKT_NUMBER ) pytest.fail(fail_msg) if not COMBINED_ACL_DROP_COUNTER: @@ -456,19 +461,18 @@ def base_verification(discard_group, pkt, ptfadapter, duthost, ptf_tx_port_id, d pytest.fail("Incorrect 'discard_group' specified. Supported values: 'L2' or 'L3'") - -def do_test(discard_group, pkt, ptfadapter, duthost, ptf_tx_port_id, dut_iface, sniff_ports, l2_col_key=RX_DRP, l3_col_key=RX_ERR): +def do_test(discard_group, pkt, ptfadapter, duthost, ports_info, sniff_ports, tx_dut_ports=None, + l2_col_key=RX_DRP, l3_col_key=RX_ERR): """ Execute test - send packet, check that expected discard counters were incremented and packet was dropped @param discard_group: Supported 'discard_group' values: 'L2', 'L3', 'ACL' @param pkt: PTF composed packet, sent by test case @param ptfadapter: fixture @param duthost: fixture - @param ptf_tx_port_id: TX PTF port ID @param dut_iface: DUT interface name expected to receive packets from PTF @param sniff_ports: DUT ports to check that packets were not egressed from """ - base_verification(discard_group, pkt, ptfadapter, duthost, ptf_tx_port_id, dut_iface, l2_col_key, l3_col_key) + base_verification(discard_group, pkt, ptfadapter, duthost, ports_info, tx_dut_ports, l2_col_key, l3_col_key) # Verify packets were not egresed the DUT exp_pkt = expected_packet_mask(pkt) @@ -496,7 +500,7 @@ def test_equal_smac_dmac_drop(ptfadapter, duthost, setup, fanouthost, pkt_fields tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L2", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], ports_info["dut_iface"], setup["neighbor_sniff_ports"]) + do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"]) def test_multicast_smac_drop(ptfadapter, duthost, setup, fanouthost, pkt_fields, ports_info): @@ -522,7 +526,7 @@ def test_multicast_smac_drop(ptfadapter, duthost, setup, fanouthost, pkt_fields, tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L2", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], ports_info["dut_iface"], setup["neighbor_sniff_ports"]) + do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"]) def test_reserved_dmac_drop(ptfadapter, duthost, setup, fanouthost, pkt_fields, ports_info): @@ -551,7 +555,7 @@ def test_reserved_dmac_drop(ptfadapter, duthost, setup, fanouthost, pkt_fields, tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L2", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], ports_info["dut_iface"], setup["neighbor_sniff_ports"]) + do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"]) def test_not_expected_vlan_tag_drop(ptfadapter, duthost, setup, pkt_fields, ports_info): @@ -559,15 +563,17 @@ def test_not_expected_vlan_tag_drop(ptfadapter, duthost, setup, pkt_fields, port @summary: Verify that VLAN tagged packet which VLAN ID does not match ingress port VLAN ID is dropped and L2 drop counter incremented """ + start_vlan_id = 2 log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], pkt_fields["ipv4_src"]) max_vlan_id = 1000 upper_bound = max(setup["vlans"]) if setup["vlans"] else max_vlan_id - for interim in range(1, upper_bound): + for interim in range(start_vlan_id, upper_bound): if interim not in setup["vlans"]: vlan_id = interim break else: - pytest.fail("Unable to generate unique not yet existed VLAN ID. Already configured VLANs range {}-{}".format(1, upper_bound)) + pytest.fail("Unable to generate unique not yet existed VLAN ID. Already configured VLANs range {}-{}".format(start_vlan_id, + upper_bound)) pkt = testutils.simple_tcp_packet( eth_dst=ports_info["dst_mac"], # DUT port @@ -580,7 +586,7 @@ def test_not_expected_vlan_tag_drop(ptfadapter, duthost, setup, pkt_fields, port vlan_vid=vlan_id, ) - do_test("L2", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], ports_info["dut_iface"], setup["neighbor_sniff_ports"]) + do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"]) def test_dst_ip_is_loopback_addr(ptfadapter, duthost, setup, pkt_fields, tx_dut_ports, ports_info): @@ -599,7 +605,7 @@ def test_dst_ip_is_loopback_addr(ptfadapter, duthost, setup, pkt_fields, tx_dut_ tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_src_ip_is_loopback_addr(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): @@ -618,7 +624,7 @@ def test_src_ip_is_loopback_addr(ptfadapter, duthost, setup, tx_dut_ports, pkt_f tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_dst_ip_absent(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): @@ -634,7 +640,8 @@ def test_dst_ip_absent(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, por ip_dst="", # VM source tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("ip_addr", ["ipv4", "ipv6"]) @@ -668,8 +675,8 @@ def test_src_ip_is_multicast_addr(ptfadapter, duthost, setup, tx_dut_ports, pkt_ pytest.fail("Incorrect value specified for 'ip_addr' test parameter. Supported parameters: 'ipv4' and 'ipv6'") log_pkt_params(ports_info["dut_iface"], ports_info["dst_mac"], ports_info["src_mac"], pkt_fields["ipv4_dst"], ip_src) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_src_ip_is_class_e(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): @@ -689,8 +696,8 @@ def test_src_ip_is_class_e(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ip_dst=pkt_fields["ipv4_dst"], # VM source tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("addr_type, addr_direction", [("ipv4", "src"), ("ipv6", "src"), ("ipv4", "dst"), @@ -735,8 +742,8 @@ def test_ip_is_zero_addr(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, a pytest.fail("Incorrect value specified for 'addr_type' test parameter. Supported parameters: 'ipv4' or 'ipv6'") logger.info(pkt_params) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["dut_to_ptf_port_map"].values()) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports) @pytest.mark.parametrize("addr_direction", ["src", "dst"]) @@ -764,10 +771,15 @@ def test_ip_link_local(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, add pkt = testutils.simple_tcp_packet(**pkt_params) logger.info(pkt_params) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) +# Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings. +# Default SONiC behaviour is to forward the traffic, so loop-back filter does not triggers for IP packets. +# All router interfaces has attribute "sx_interface_attributes_t.loopback_enable" - enabled. +# To enable loop-back filter drops - need to disable that attribute when create RIF. +# To do this can be used SAI attribute SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION, which is not exposed to SONiC +@pytest.mark.skip(reason="SONiC can't enable loop-back filter feature") def test_loopback_filter(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Verify that packet drops by loopback-filter. Loop-back filter means that route to the host @@ -793,8 +805,7 @@ def test_loopback_filter(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, p tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"]) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_ip_pkt_with_exceeded_mtu(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, mtu_config, ports_info): @@ -821,8 +832,7 @@ def test_ip_pkt_with_exceeded_mtu(ptfadapter, duthost, setup, tx_dut_ports, pkt_ tcp_dport=pkt_fields["tcp_dport"] ) - do_test("L2", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], ports_info["dut_iface"], setup["neighbor_sniff_ports"], - l2_col_key=RX_ERR) + do_test("L2", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], l2_col_key=RX_ERR) def test_ip_pkt_with_expired_ttl(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): @@ -841,8 +851,7 @@ def test_ip_pkt_with_expired_ttl(ptfadapter, duthost, setup, tx_dut_ports, pkt_f tcp_dport=pkt_fields["tcp_dport"], ip_ttl=0) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("igmp_version,msg_type", [("v1", "membership_query"), ("v3", "membership_query"), ("v1", "membership_report"), @@ -902,8 +911,8 @@ def test_non_routable_igmp_pkts(ptfadapter, duthost, setup, tx_dut_ports, pkt_fi del pkt[testutils.scapy.scapy.all.Raw] pkt = pkt / igmp_types[igmp_version][msg_type] - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["dut_to_ptf_port_map"].values()) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["dut_to_ptf_port_map"].values(), tx_dut_ports) def test_absent_ip_header(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, ports_info): @@ -926,8 +935,7 @@ def test_absent_ip_header(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, pkt.type = 0x800 pkt = pkt/tcp - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("pkt_field, value", [("version", 1), ("chksum", 10), ("ihl", 1)]) @@ -946,8 +954,8 @@ def test_broken_ip_header(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, tcp_dport=pkt_fields["tcp_dport"] ) setattr(pkt[testutils.scapy.scapy.all.IP], pkt_field, value) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("eth_dst", ["01:00:5e:00:01:02", "ff:ff:ff:ff:ff:ff"]) @@ -968,8 +976,8 @@ def test_unicast_ip_incorrect_eth_dst(ptfadapter, duthost, setup, tx_dut_ports, tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"] ) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_acl_drop(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, acl_setup, ports_info): @@ -991,7 +999,7 @@ def test_acl_drop(ptfadapter, duthost, setup, tx_dut_ports, pkt_fields, acl_setu tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"] ) - base_verification("ACL", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]]) + base_verification("ACL", pkt, ptfadapter, duthost, ports_info, tx_dut_ports) # Verify packets were not egresed the DUT exp_pkt = expected_packet_mask(pkt) @@ -1014,5 +1022,5 @@ def test_egress_drop_on_down_link(ptfadapter, duthost, setup, tx_dut_ports, pkt_ tcp_sport=pkt_fields["tcp_sport"], tcp_dport=pkt_fields["tcp_dport"] ) - do_test("L3", pkt, ptfadapter, duthost, ports_info["ptf_tx_port_id"], tx_dut_ports[ports_info["dut_iface"]], - setup["neighbor_sniff_ports"]) + + do_test("L3", pkt, ptfadapter, duthost, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) diff --git a/tests/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py similarity index 97% rename from tests/test_everflow_testbed.py rename to tests/everflow/test_everflow_testbed.py index 4ea9170f85..0eae8bcb8e 100644 --- a/tests/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -1,9 +1,8 @@ import os import time -from ansible_host import AnsibleHost import pytest from ptf_runner import ptf_runner -from abc import ABCMeta, abstractmethod +from abc import abstractmethod import ipaddr as ipaddress BASE_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -168,10 +167,8 @@ def add_route(duthost, prefix, nexthop): duthost: fixture that have duthost information prefix: Ip prefix nexthop: nexthop - return:pass """ duthost.shell("vtysh -c 'configure terminal' -c 'ip route {} {}'".format(prefix, nexthop)) - pass def remove_route(duthost, prefix, nexthop): """ @@ -179,10 +176,8 @@ def remove_route(duthost, prefix, nexthop): duthost: fixture that have duthost information prefix: Ip prefix nexthop: nexthop - return:pass """ duthost.shell("vtysh -c 'configure terminal' -c 'no ip route {} {}'".format(prefix, nexthop)) - pass def get_neighbor_info(duthost, dest_port, resolved = True): @@ -242,7 +237,7 @@ def setup_mirror_session(self, duthost): @abstractmethod - def setup_acl_table(): + def setup_acl_table(self, duthost, setup_info, setup_mirror_session): """ setup the acl table return:pass @@ -251,7 +246,7 @@ def setup_acl_table(): @abstractmethod - def mirror_type(): + def mirror_type(self): """ used to parametrized test cases on mirror type :param request: pytest request object @@ -260,7 +255,7 @@ def mirror_type(): pass @abstractmethod - def acl_stage(): + def acl_stage(self): """ get the acl stage return:pass @@ -271,7 +266,6 @@ def acl_stage(): def test_everflow_case1(self, duthost, setup_info, setup_mirror_session, dest_port_type, partial_ptf_runner): """ Test on Resolved route, unresolved route, best prefix match route creation and removal flows """ - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] @@ -325,7 +319,6 @@ def test_everflow_case2(self, duthost, setup_info, setup_mirror_session, dest_po """Test case 2 - Change neighbor MAC address. Verify that session destination MAC address is changed after neighbor MAC address update.""" - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] @@ -365,10 +358,8 @@ def test_everflow_case3(self, duthost, setup_info, setup_mirror_session, dest_po """Test case 3 - ECMP route change (remove next hop not used by session). Verify that after removal of next hop that was used by session from ECMP route session state is active.""" - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] - tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] peer_ip, peer_mac = get_neighbor_info(duthost, tx_port) peer_ip0 = peer_ip @@ -419,7 +410,6 @@ def test_everflow_case4(self, duthost, setup_info, setup_mirror_session, dest_po """Test case 4 - ECMP route change (remove next hop used by session). Verify that removal of next hop that is not used by session doesn't cause DST port and MAC change.""" - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] @@ -435,19 +425,16 @@ def test_everflow_case4(self, duthost, setup_info, setup_mirror_session, dest_po dst_ports = tx_port_ptf_id) tx_port = setup_info[dest_port_type]['dest_port'][1] - tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][1] peer_ip, peer_mac = get_neighbor_info(duthost, tx_port) peer_ip1 = peer_ip add_route(duthost, setup_mirror_session['session_prefixes'][0], peer_ip) tx_port = setup_info[dest_port_type]['dest_port'][2] - tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][2] peer_ip, peer_mac = get_neighbor_info(duthost, tx_port) peer_ip2 = peer_ip add_route(duthost, setup_mirror_session['session_prefixes'][0], peer_ip) - time.sleep(3) @@ -479,7 +466,6 @@ def test_everflow_case5(self, duthost, setup_info, setup_mirror_session, dest_po """Test case 5 - Policer enforced DSCP value/mask test""" - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] @@ -523,7 +509,6 @@ def test_everflow_case6(self, duthost, setup_info, setup_mirror_session, dest_po """ Test Case 6 - ARP/ND packet mirroring""" - rx_port = setup_info[dest_port_type]['src_port'] rx_port_ptf_id = setup_info[dest_port_type] ['src_port_ptf_id'] tx_port = setup_info[dest_port_type]['dest_port'][0] tx_port_ptf_id = setup_info[dest_port_type]['dest_port_ptf_id'][0] diff --git a/tests/fdb/test_fdb.py b/tests/fdb/test_fdb.py index fa65288a79..33885a6748 100644 --- a/tests/fdb/test_fdb.py +++ b/tests/fdb/test_fdb.py @@ -1,4 +1,3 @@ -from ansible_host import AnsibleHost import pytest import ptf.testutils as testutils @@ -146,9 +145,8 @@ def setup_fdb(ptfadapter, vlan_table, router_mac, pkt_type): @pytest.fixture -def fdb_cleanup(ansible_adhoc, testbed): +def fdb_cleanup(duthost): """ cleanup FDB before and after test run """ - duthost = AnsibleHost(ansible_adhoc, testbed['dut']) try: duthost.command('sonic-clear fdb all') yield @@ -169,7 +167,7 @@ def test_fdb(ansible_adhoc, testbed, ptfadapter, duthost, ptfhost, pkt_type): host_facts = duthost.setup()['ansible_facts'] conf_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] - # remove existing IPs from PTF host + # remove existing IPs from PTF host ptfhost.script('scripts/remove_ip.sh') # set unique MACs to PTF interfaces ptfhost.script('scripts/change_mac.sh') diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index 3450b8f98c..5834da97e0 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -1,19 +1,21 @@ import pytest -from netaddr import * import time import json import logging -import requests from ptf_runner import ptf_runner from datetime import datetime logger = logging.getLogger(__name__) -HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port'] +# Usually src-mac, dst-mac, vlan-id are optional hash keys. Not all the platform supports these optional hash keys. Not enable these three by default. +# HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'src-mac', 'dst-mac', 'ip-proto', 'vlan-id'] +HASH_KEYS = ['src-ip', 'dst-ip', 'src-port', 'dst-port', 'ingress-port', 'ip-proto'] SRC_IP_RANGE = ['8.0.0.0', '8.255.255.255'] DST_IP_RANGE = ['9.0.0.0', '9.255.255.255'] SRC_IPV6_RANGE = ['20D0:A800:0:00::', '20D0:A800:0:00::FFFF'] DST_IPV6_RANGE = ['20D0:A800:0:01::', '20D0:A800:0:01::FFFF'] +VLANIDS = range(1032, 1279) +VLANIP = '192.168.{}.1/24' g_vars = {} @@ -59,6 +61,40 @@ def build_fib(duthost, config_facts, fibfile, t): else: ofp.write("{} []\n".format(prefix)) +def get_vlan_untag_ports(config_facts): + """ + get all untag vlan ports + """ + vlan_untag_ports = [] + vlans = config_facts.get('VLAN_INTERFACE', {}).keys() + for vlan in vlans: + vlan_member_info = config_facts.get('VLAN_MEMBER', {}).get(vlan, {}) + if vlan_member_info: + for port_name, tag_mode in vlan_member_info.items(): + if tag_mode['tagging_mode'] == 'untagged': + vlan_untag_ports.append(port_name) + + return vlan_untag_ports + +def get_router_interface_ports(config_facts, testbed): + """ + get all physical ports associated with router interface (physical router interface, port channel router interface and vlan router interface) + """ + + ports = config_facts.get('INTERFACE', {}).keys() + portchannels_member_ports = [] + vlan_untag_ports = [] + portchannels_name = config_facts.get('PORTCHANNEL_INTERFACE', {}).keys() + if portchannels_name: + for po_name in portchannels_name: + for port_name in config_facts.get('PORTCHANNEL', {})[po_name]['members']: + portchannels_member_ports.append(port_name) + if 't0' in testbed['topo']['name']: + vlan_untag_ports = get_vlan_untag_ports(config_facts) + + router_interface_ports = ports + portchannels_member_ports + vlan_untag_ports + + return router_interface_ports @pytest.mark.parametrize("ipv4, ipv6, mtu", [pytest.param(True, True, 1514)]) def test_fib(testbed, duthost, ptfhost, ipv4, ipv6, mtu): @@ -120,6 +156,10 @@ def setup_hash(self, testbed, duthost, ptfhost): ptfhost.copy(src="ptftests", dest="/root") logging.info("run ptf test") + # TODO + if 'dst-mac' in self.hash_keys: + self.hash_keys.remove('dst-mac') + # do not test load balancing on L4 port on vs platform as kernel 4.9 # can only do load balance base on L3 meta = config_facts.get('DEVICE_METADATA') @@ -127,12 +167,33 @@ def setup_hash(self, testbed, duthost, ptfhost): self.hash_keys.remove('src-port') self.hash_keys.remove('dst-port') - # TODO - self.hash_keys.remove('ingress-port') - g_vars['testbed_type'] = testbed['topo']['name'] g_vars['router_mac'] = duthost.shell('sonic-cfggen -d -v \'DEVICE_METADATA.localhost.mac\'')["stdout_lines"][0].decode("utf-8") - + + vlan_untag_ports = get_vlan_untag_ports(config_facts) + in_ports_name = get_router_interface_ports(config_facts, testbed) + g_vars['in_ports'] = [config_facts.get('port_index_map', {})[p] for p in in_ports_name] + + # add some vlan for hash_key vlan-id test + if 't0' in g_vars['testbed_type'] and 'vlan-id' in self.hash_keys: + for vlan in VLANIDS: + duthost.shell('config vlan add {}'.format(vlan)) + for port in vlan_untag_ports: + duthost.shell('config vlan member add {} {}'.format(vlan, port)) + duthost.shell('config interface ip add Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) + time.sleep(5) + + yield + + # remove added vlan + if 't0' in g_vars['testbed_type'] and 'vlan-id' in self.hash_keys: + for vlan in VLANIDS: + duthost.shell('config interface ip remove Vlan{} '.format(vlan) + VLANIP.format(vlan%256)) + for port in vlan_untag_ports: + duthost.shell('config vlan member del {} {}'.format(vlan, port)) + duthost.shell('config vlan del {}'.format(vlan)) + time.sleep(5) + def test_hash_ipv4(self, ptfhost): log_file = "/tmp/hash_test.HashTest.ipv4.{}.log".format(self.t) logging.info("PTF log file: %s" % log_file) @@ -148,6 +209,8 @@ def test_hash_ipv4(self, ptfhost): "fib_info": "/root/fib_info.txt", "src_ip_range": ",".join(src_ip_range), "dst_ip_range": ",".join(dst_ip_range), + "in_ports": g_vars['in_ports'], + "vlan_ids": VLANIDS, "hash_keys": self.hash_keys }, log_file=log_file, socket_recv_size=16384) @@ -167,6 +230,8 @@ def test_hash_ipv6(self, ptfhost): "fib_info": "/root/fib_info.txt", "src_ip_range": ",".join(src_ip_range), "dst_ip_range": ",".join(dst_ip_range), + "in_ports": g_vars['in_ports'], + "vlan_ids": VLANIDS, "hash_keys": self.hash_keys }, log_file=log_file, socket_recv_size=16384) diff --git a/tests/ipfwd/test_dip_sip.py b/tests/ipfwd/test_dip_sip.py new file mode 100644 index 0000000000..1c3a0f8636 --- /dev/null +++ b/tests/ipfwd/test_dip_sip.py @@ -0,0 +1,166 @@ +import pytest +import ptf.testutils as testutils +from ipaddress import ip_address +import logging + +TOPO_LIST = {'t0', 't1', 't1-lag'} +PORTS_TOPO = {'t1'} +LAG_TOPO = {'t0', 't1-lag'} +DEFAULT_HLIM_TTL = 64 +WAIT_EXPECTED_PACKET_TIMEOUT = 5 + +logger = logging.getLogger(__name__) + + +@pytest.fixture(scope='function', autouse=True) +def prepare_ptf(ptfhost): + # remove existing IPs from ptf host + ptfhost.script('scripts/remove_ip.sh') + # set unique MACs to ptf interfaces + ptfhost.script('scripts/change_mac.sh') + + +def lag_facts(dut, mg_facts): + facts = {} + + if not mg_facts['minigraph_portchannels']: + pytest.fail("minigraph_portchannels is not defined") + host_facts = dut.setup()['ansible_facts'] + # minigraph facts + src_lag = mg_facts['minigraph_portchannel_interfaces'][2]['attachto'] + dst_lag = mg_facts['minigraph_portchannel_interfaces'][0]['attachto'] + facts['src_port'] = src_lag + facts['dst_port'] = dst_lag + logger.info("src_lag is {}, dst_lag is {}".format(src_lag, dst_lag)) + # lldp facts + lldp_facts = dut.lldp()['ansible_facts']['lldp'] + facts['dst_host_mac'] = lldp_facts[mg_facts['minigraph_portchannels'][dst_lag]['members'][0]]['chassis']['mac'] + facts['src_host_mac'] = lldp_facts[mg_facts['minigraph_portchannels'][src_lag]['members'][0]]['chassis']['mac'] + facts['dst_router_mac'] = host_facts['ansible_' + dst_lag]['macaddress'] + facts['src_router_mac'] = host_facts['ansible_' + src_lag]['macaddress'] + facts['dst_router_ipv4'] = host_facts['ansible_' + dst_lag]['ipv4']['address'] + dst_ipv6 = host_facts['ansible_' + dst_lag]['ipv6'] + facts['dst_router_ipv6'] = [(item['address']) for item in dst_ipv6 if item['scope'] == 'global'][0] + facts['dst_port_ids'] = [mg_facts['minigraph_port_indices'][mg_facts['minigraph_portchannels'][dst_lag]['members'][0]]] + facts['src_port_ids'] = [mg_facts['minigraph_port_indices'][mg_facts['minigraph_portchannels'][src_lag]['members'][0]]] + + return facts + + +def port_facts(dut, mg_facts): + facts = {} + + if not mg_facts['minigraph_interfaces']: + pytest.fail("minigraph_interfaces is not defined.") + host_facts = dut.setup()['ansible_facts'] + # minigraph facts + src_port = mg_facts['minigraph_interfaces'][2]['attachto'] + dst_port = mg_facts['minigraph_interfaces'][0]['attachto'] + facts['src_port'] = src_port + facts['dst_port'] = dst_port + logger.info("src_port is {}, dst_port is {}".format(src_port, dst_port)) + # lldp facts + lldp_facts = dut.lldp()['ansible_facts']['lldp'] + facts['dst_host_mac'] = lldp_facts[dst_port]['chassis']['mac'] + facts['src_host_mac'] = lldp_facts[src_port]['chassis']['mac'] + facts['dst_router_mac'] = host_facts['ansible_' + dst_port]['macaddress'] + facts['src_router_mac'] = host_facts['ansible_' + src_port]['macaddress'] + facts['dst_router_ipv4'] = host_facts['ansible_' + dst_port]['ipv4']['address'] + dst_ipv6 = host_facts['ansible_' + dst_port]['ipv6'] + facts['dst_router_ipv6'] = [(item['address']) for item in dst_ipv6 if item['scope'] == 'global'][0] + facts['dst_port_ids'] = [mg_facts['minigraph_port_indices'][dst_port]] + facts['src_port_ids'] = [mg_facts['minigraph_port_indices'][src_port]] + + return facts + + +@pytest.fixture(scope='function') +def gather_facts(testbed, duthost): + facts = {} + topo = testbed['topo']['name'] + if topo not in TOPO_LIST: + pytest.skip("Unsupported topology") + logger.info("Gathering facts on DUT ...") + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + + # if minigraph_portchannel_interfaces is not empty - topology with lag + if mg_facts['minigraph_portchannel_interfaces']: + facts = lag_facts(duthost, mg_facts) + else: + facts = port_facts(duthost, mg_facts) + + logger.info("Facts gathered successfully") + + yield facts + + +def run_test_ipv6(ptfadapter, gather_facts): + logger.info("Running test with ipv6 packets") + dst_host_ipv6 = str(ip_address(unicode(gather_facts['dst_router_ipv6']))+1) + + pkt = testutils.simple_udpv6_packet( + eth_dst=gather_facts['src_router_mac'], + eth_src=gather_facts['src_host_mac'], + ipv6_src=dst_host_ipv6, + ipv6_dst=dst_host_ipv6, + ipv6_hlim=DEFAULT_HLIM_TTL + + ) + logger.info("\nSend Packet:\neth_dst: {}, eth_src: {}, ipv6 ip: {}".format( + gather_facts['src_router_mac'], gather_facts['src_host_mac'], dst_host_ipv6) + ) + + testutils.send(ptfadapter, int(gather_facts['src_port_ids'][0]), pkt) + + pkt = testutils.simple_udpv6_packet( + eth_dst=gather_facts['dst_host_mac'], + eth_src=gather_facts['dst_router_mac'], + ipv6_src=dst_host_ipv6, + ipv6_dst=dst_host_ipv6, + ipv6_hlim=DEFAULT_HLIM_TTL-1 + ) + logger.info("\nExpect Packet:\neth_dst: {}, eth_src: {}, ipv6 ip: {}".format( + gather_facts['dst_host_mac'], gather_facts['dst_router_mac'], dst_host_ipv6) + ) + + port_list = [int(port) for port in gather_facts['dst_port_ids']] + testutils.verify_packet_any_port(ptfadapter, pkt, port_list, timeout=WAIT_EXPECTED_PACKET_TIMEOUT) + + +def run_test_ipv4(ptfadapter, gather_facts): + logger.info("Running test with ipv4 packets") + dst_host_ipv4 = str(ip_address(unicode(gather_facts['dst_router_ipv4'])) + 1) + pkt = testutils.simple_udp_packet( + eth_dst=gather_facts['src_router_mac'], + eth_src=gather_facts['src_host_mac'], + ip_src=dst_host_ipv4, + ip_dst=dst_host_ipv4, + ip_ttl=DEFAULT_HLIM_TTL + ) + logger.info("\nSend Packet:\neth_dst: {}, eth_src: {}, ipv6 ip: {}".format( + gather_facts['src_router_mac'], gather_facts['src_host_mac'], dst_host_ipv4) + ) + + testutils.send(ptfadapter, int(gather_facts['src_port_ids'][0]), pkt) + + pkt = testutils.simple_udp_packet( + eth_dst=gather_facts['dst_host_mac'], + eth_src=gather_facts['dst_router_mac'], + ip_src=dst_host_ipv4, + ip_dst=dst_host_ipv4, + ip_ttl=DEFAULT_HLIM_TTL-1 + ) + logger.info("\nExpect Packet:\neth_dst: {}, eth_src: {}, ipv6 ip: {}".format( + gather_facts['dst_host_mac'], gather_facts['dst_router_mac'], dst_host_ipv4) + ) + + port_list = [int(port) for port in gather_facts['dst_port_ids']] + testutils.verify_packet_any_port(ptfadapter, pkt, port_list, timeout=WAIT_EXPECTED_PACKET_TIMEOUT) + + +def test_dip_sip(request, gather_facts): + ptfadapter = request.getfixturevalue('ptfadapter') + ptfadapter.reinit() + + run_test_ipv4(ptfadapter, gather_facts) + run_test_ipv6(ptfadapter, gather_facts) diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py new file mode 100644 index 0000000000..8b58fa5f84 --- /dev/null +++ b/tests/ipfwd/test_dir_bcast.py @@ -0,0 +1,39 @@ +import pytest + +from ptf_runner import ptf_runner +from datetime import datetime + +def test_dir_bcast(duthost, ptfhost, testbed, fib): + support_testbed_types = frozenset(['t0', 't0-16', 't0-56', 't0-64', 't0-64-32', 't0-116']) + testbed_type = testbed['topo']['name'] + if testbed_type not in support_testbed_types: + pytest.skip("Not support given test bed type %s" % testbed_type) + + # Copy PTF test into PTF-docker + ptfhost.copy(src="ptftests", dest="/root") + + # Copy VLAN information file to PTF-docker + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + extra_vars = { + 'minigraph_vlan_interfaces': mg_facts['minigraph_vlan_interfaces'], + 'minigraph_vlans': mg_facts['minigraph_vlans'], + 'minigraph_port_indices': mg_facts['minigraph_port_indices'] + } + ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) + ptfhost.template(src="../ansible/roles/test/templates/fdb.j2", dest="/root/vlan_info.txt") + + # Start PTF runner + host_facts = duthost.setup()['ansible_facts'] + params = { + 'testbed_type': testbed_type, + 'router_mac': host_facts['ansible_Ethernet0']['macaddress'], + 'vlan_info': '/root/vlan_info.txt' + } + log_file = "/tmp/dir_bcast.BcastTest.{}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) + ptf_runner( + ptfhost, + 'ptftests', + 'dir_bcast_test.BcastTest', + '/root/ptftests', + params=params, + log_file=log_file) \ No newline at end of file diff --git a/tests/test_mtu.py b/tests/ipfwd/test_mtu.py similarity index 100% rename from tests/test_mtu.py rename to tests/ipfwd/test_mtu.py diff --git a/tests/test_lldp.py b/tests/lldp/test_lldp.py similarity index 71% rename from tests/test_lldp.py rename to tests/lldp/test_lldp.py index bcbc797603..6907b56626 100644 --- a/tests/test_lldp.py +++ b/tests/lldp/test_lldp.py @@ -1,22 +1,18 @@ -from ansible_host import AnsibleHost import logging import pytest +logger = logging.getLogger(__name__) + @pytest.fixture(scope="module", autouse=True) def setup_check_topo(testbed): if testbed['topo']['type'] == 'ptf': pytest.skip('Unsupported topology') -logger = logging.getLogger(__name__) - -def test_lldp(localhost, ansible_adhoc, testbed, collect_techsupport): +def test_lldp(duthost, localhost, collect_techsupport): """ verify the LLDP message on DUT """ - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] - lldp_facts = ans_host.lldp()['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + lldp_facts = duthost.lldp()['ansible_facts'] minigraph_lldp_nei = {} for k, v in mg_facts['minigraph_neighbors'].items(): @@ -35,19 +31,16 @@ def test_lldp(localhost, ansible_adhoc, testbed, collect_techsupport): assert v['port']['ifname'] == mg_facts['minigraph_neighbors'][k]['port'] -def test_lldp_neighbor(localhost, ansible_adhoc, testbed, eos, collect_techsupport): +def test_lldp_neighbor(duthost, localhost, eos, collect_techsupport): """ verify LLDP information on neighbors """ - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] - res = ans_host.shell("docker exec -i lldp lldpcli show chassis | grep \"SysDescr:\" | sed -e 's/^\\s*SysDescr:\\s*//g'") + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + res = duthost.shell("docker exec -i lldp lldpcli show chassis | grep \"SysDescr:\" | sed -e 's/^\\s*SysDescr:\\s*//g'") dut_system_description = res['stdout'] - lldp_facts = ans_host.lldp()['ansible_facts'] - host_facts = ans_host.setup()['ansible_facts'] - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) + lldp_facts = duthost.lldp()['ansible_facts'] + host_facts = duthost.setup()['ansible_facts'] - config_facts = ans_host.config_facts(host=hostname, source="running")['ansible_facts'] + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] nei_meta = config_facts.get('DEVICE_NEIGHBOR_METADATA', {}) for k, v in lldp_facts['lldp'].items(): @@ -61,11 +54,11 @@ def test_lldp_neighbor(localhost, ansible_adhoc, testbed, eos, collect_techsuppo logger.info("Neighbor device {} does not sent management IP via lldp".format(v['chassis']['name'])) hostip = nei_meta[v['chassis']['name']]['mgmt_addr'] - nei_lldp_facts = lhost.lldp_facts(host=hostip, version='v2c', community=eos['snmp_rocommunity'])['ansible_facts'] + nei_lldp_facts = localhost.lldp_facts(host=hostip, version='v2c', community=eos['snmp_rocommunity'])['ansible_facts'] print nei_lldp_facts neighbor_interface = v['port']['ifname'] # Verify the published DUT system name field is correct - assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_sys_name'] == hostname + assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_sys_name'] == duthost.hostname # Verify the published DUT chassis id field is not empty assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_chassis_id'] == \ "0x%s" % (host_facts['ansible_eth0']['macaddress'].replace(':', '')) diff --git a/tests/ntp/test_ntp.py b/tests/ntp/test_ntp.py index 2d4261eae7..80d094f687 100644 --- a/tests/ntp/test_ntp.py +++ b/tests/ntp/test_ntp.py @@ -10,8 +10,11 @@ ] @pytest.fixture(scope="module") -def setup_ntp(ptfhost, duthost): +def setup_ntp(ptfhost, duthost, creds): """setup ntp client and server""" + if creds.get('proxy_env'): + # If testbed is behaind proxy then force ntpd inside ptf use local time + ptfhost.lineinfile(path="/etc/ntp.conf", line="server 127.127.1.0 prefer") # enable ntp server ptfhost.service(name="ntp", state="started") @@ -20,10 +23,10 @@ def setup_ntp(ptfhost, duthost): config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] ntp_servers = config_facts.get('NTP_SERVER', {}) for ntp_server in ntp_servers: - duthost.command("sudo config ntp del %s" % ntp_server) + duthost.command("config ntp del %s" % ntp_server) ptfip = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] - duthost.command("sudo config ntp add %s" % ptfip) + duthost.command("config ntp add %s" % ptfip) wait_until(120, 5, check_ntp_status, ptfhost) @@ -31,11 +34,10 @@ def setup_ntp(ptfhost, duthost): # stop ntp server ptfhost.service(name="ntp", state="stopped") - # reset ntp client configuration - duthost.command("sudo config ntp del %s" % ptfip) + duthost.command("config ntp del %s" % ptfip) for ntp_server in ntp_servers: - duthost.command("sudo config ntp add %s" % ntp_server) + duthost.command("config ntp add %s" % ntp_server) def check_ntp_status(host): res = host.command("ntpstat") @@ -43,7 +45,7 @@ def check_ntp_status(host): return False return True -def test_ntp(testbed_devices, duthost, setup_ntp): +def test_ntp(duthost, setup_ntp): """ verify the LLDP message on DUT """ duthost.service(name='ntp', state='stopped') diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py new file mode 100644 index 0000000000..aac3b2600a --- /dev/null +++ b/tests/pc/test_lag_2.py @@ -0,0 +1,261 @@ +import pytest + +import json +import time +import logging +import os + +from ptf_runner import ptf_runner +from common.devices import AnsibleHostBase +from common.fixtures.conn_graph_facts import conn_graph_facts +from common.utilities import wait_until + +@pytest.fixture(scope="module") +def common_setup_teardown(duthost, ptfhost, testbed): + logging.info("########### Setup for lag testing ###########") + + lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] + if lag_facts['names'] == []: + pytest.skip("No lag configuration found in %s" % duthost.hostname) + + # Copy PTF test into PTF-docker for test LACP DU + test_files = ['lag_test.py', 'acs_base_test.py', 'router_utils.py'] + for test_file in test_files: + src = "../ansible/roles/test/files/acstests/%s" % test_file + dst = "/tmp/%s" % test_file + ptfhost.copy(src=src, dest=dst) + + # Copy tests to the PTF-docker + ptfhost.copy(src="ptftests", dest="/root") + + # Inlucde testbed topology configuration + testbed_type = testbed['topo']['name'] + + support_testbed_types = frozenset(['t1-lag', 't0', 't0-116']) + if testbed_type not in support_testbed_types: + pytest.skip("Not support given test bed type %s" % testbed_type) + + yield duthost, ptfhost, lag_facts + +class LagTest: + def __init__(self, duthost, ptfhost, nbrhosts, fanouthosts, conn_graph_facts): + self.duthost = duthost + self.ptfhost = ptfhost + self.nbrhosts = nbrhosts + self.fanouthosts = fanouthosts + self.mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + self.conn_graph_facts = conn_graph_facts + self.vm_neighbors = self.mg_facts['minigraph_neighbors'] + self.fanout_neighbors = self.conn_graph_facts['device_conn'] if 'device_conn' in self.conn_graph_facts else {} + + def __get_lag_facts(self): + return self.duthost.lag_facts(host = self.duthost.hostname)['ansible_facts']['lag_facts'] + + def __get_lag_intf_info(self, lag_facts, lag_name): + # Figure out interface informations + po_interfaces = lag_facts['lags'][lag_name]['po_config']['ports'] + intf = lag_facts['lags'][lag_name]['po_config']['ports'].keys()[0] + return intf, po_interfaces + + def __check_flap(self, lag_facts, lag_name): + po_intf_num = len(lag_facts['lags'][lag_name]['po_config']['ports']) + po_min_links = lag_facts['lags'][lag_name]['po_config']['runner']['min_ports'] + return ((po_intf_num - 1) * 100 / po_min_links) < 75 + + def __check_shell_output(self, host, command): + out = host.shell(command) + return out['stdout'] == 'True' + + def __check_intf_state(self, vm_host, intf, expect): + return vm_host.check_intf_link_state(vm_host, intf) == expect + + def __verify_lag_lacp_timing(self, lacp_timer, exp_iface): + if exp_iface is None: + return + + # Check LACP timing + params = { + 'exp_iface': exp_iface, + 'timeout': 35, + 'packet_timing': lacp_timer, + 'ether_type': 0x8809, + 'interval_count': 3 + } + ptf_runner(self.ptfhost, '/tmp', "lag_test.LacpTimingTest", '/root/ptftests', params=params) + + def __verify_lag_minlink( + self, + host, + lag_name, + intf, + neighbor_intf, po_interfaces, po_flap, deselect_time, wait_timeout = 30): + delay = 5 + + try: + host.shutdown(neighbor_intf) + + # Let PortalChannel react to neighbor interface shutdown + time.sleep(deselect_time) + + # Verify PortChannel interfaces are up correctly + for po_intf in po_interfaces.keys(): + if po_intf != intf: + command = 'bash -c "teamdctl %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'runner\'][\'selected\']"' % (lag_name, po_intf) + wait_until(wait_timeout, delay, self.__check_shell_output, self.duthost, command) + + # Refresh lag facts + lag_facts = self.__get_lag_facts() + + # Verify lag member is marked deselected for the shutdown port and all other lag member interfaces are marked selected + for po_intf in po_interfaces.keys(): + if po_intf != intf: + assert lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] + else: + assert not lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] + + # Verify PortChannel's interface are marked down/up correctly if it should down/up + if po_flap == True: + assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Down' + else: + assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Up' + finally: + # Bring back port in case test error and left testbed in unknow stage + # Bring up neighbor interface + host.no_shutdown(neighbor_intf) + + # Verify PortChannel interfaces are up correctly + for po_intf in po_interfaces.keys(): + if po_intf != intf: + command = 'bash -c "teamdctl %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'link\'][\'up\']"' % (lag_name, po_intf) + wait_until(wait_timeout, delay, self.__check_shell_output, self.duthost, command) + + def run_single_lag_lacp_rate_test(self, lag_name): + logging.info("Start checking single lag lacp rate for: %s" % lag_name) + + lag_facts = self.__get_lag_facts() + intf, po_interfaces = self.__get_lag_intf_info(lag_facts, lag_name) + peer_device = self.vm_neighbors[intf]['name'] + + # Prepare for the remote VM interfaces that using PTF docker to check if the LACP DU packet rate is correct + iface_behind_lag_member = [] + for neighbor_intf in self.vm_neighbors.keys(): + if peer_device == self.vm_neighbors[neighbor_intf]['name']: + iface_behind_lag_member.append(self.mg_facts['minigraph_port_indices'][neighbor_intf]) + + neighbor_lag_intfs = [] + for po_intf in po_interfaces: + neighbor_lag_intfs.append(self.vm_neighbors[po_intf]['port']) + + try: + lag_rate_current_setting = None + + # Get the vm host(veos) by it host name + vm_host = self.nbrhosts[peer_device]['host'] + + # Make sure all lag members on VM are set to fast + logging.info("Changing lacp rate to fast for %s in %s" % (neighbor_lag_intfs[0], peer_device)) + vm_host.set_interface_lacp_rate_mode(neighbor_lag_intfs[0], 'fast') + lag_rate_current_setting = 'fast' + time.sleep(5) + for iface_behind_lag in iface_behind_lag_member: + self.__verify_lag_lacp_timing(1, iface_behind_lag) + + # Make sure all lag members on VM are set to slow + vm_host.set_interface_lacp_rate_mode(neighbor_lag_intfs[0], 'normal') + lag_rate_current_setting = 'slow' + time.sleep(5) + for iface_behind_lag in iface_behind_lag_member: + self.__verify_lag_lacp_timing(30, iface_behind_lag) + finally: + # Restore lag rate setting on VM in case of failure + if lag_rate_current_setting == 'fast': + vm_host.set_interface_lacp_rate_mode(neighbor_lag_intfs[0], 'normal') + + def run_single_lag_test(self, lag_name): + logging.info("Start checking single lag for: %s" % lag_name) + + lag_facts = self.__get_lag_facts() + intf, po_interfaces = self.__get_lag_intf_info(lag_facts, lag_name) + po_flap = self.__check_flap(lag_facts, lag_name) + + # Figure out fanout switches info if exists for the lag member and run minlink test + if intf in self.fanout_neighbors.keys(): + peer_device = self.fanout_neighbors[intf]['peerdevice'] + neighbor_intf = self.fanout_neighbors[intf]['peerport'] + self.__verify_lag_minlink(self.fanouthosts[peer_device], lag_name, intf, neighbor_intf, po_interfaces, po_flap, deselect_time=5) + + # Figure out remote VM and interface info for the lag member and run minlink test + peer_device = self.vm_neighbors[intf]['name'] + neighbor_intf = self.vm_neighbors[intf]['port'] + self.__verify_lag_minlink(self.nbrhosts[peer_device]['host'], lag_name, intf, neighbor_intf, po_interfaces, po_flap, deselect_time=95) + + def run_lag_fallback_test(self, lag_name): + logging.info("Start checking lag fall back for: %s" % lag_name) + + lag_facts = self.__get_lag_facts() + intf, po_interfaces = self.__get_lag_intf_info(lag_facts, lag_name) + po_fallback = lag_facts['lags'][lag_name]['po_config']['runner']['fallback'] + + # Figure out remote VM and interface info for the lag member and run lag fallback test + peer_device = self.vm_neighbors[intf]['name'] + neighbor_intf = self.vm_neighbors[intf]['port'] + vm_host = self.nbrhosts[peer_device]['host'] + + wait_timeout = 120 + delay = 5 + try: + # Shut down neighbor interface + vm_host.shutdown(neighbor_intf) + wait_until(wait_timeout, delay, self.__check_intf_state, vm_host, neighbor_intf, False) + + # Refresh lag facts + lag_facts = self.__get_lag_facts() + + # Get teamshow result + teamshow_result = self.duthost.shell('teamshow') + logging.debug("Teamshow result: %s" % teamshow_result) + + # Verify lag members + # 1. All other lag should keep selected state + # 2. Shutdown port should keep selected state if fallback enabled + # 3. Shutdown port should marded as deselected if fallback disabled + # is marked deselected for the shutdown port and all other lag member interfaces are marked selected + for po_intf in po_interfaces.keys(): + if po_intf != intf or po_fallback: + assert lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] + else: + assert not lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] + + # The portchannel should marked Up/Down correctly according to po fallback setting + if po_fallback: + assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Up' + else: + assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Down' + + finally: + # Bring up neighbor interface + vm_host.no_shutdown(neighbor_intf) + wait_until(wait_timeout, delay, self.__check_intf_state, vm_host, neighbor_intf, True) + +def test_lag(common_setup_teardown, nbrhosts, fanouthosts, conn_graph_facts): + duthost, ptfhost, lag_facts = common_setup_teardown + test_instance = LagTest(duthost, ptfhost, nbrhosts, fanouthosts, conn_graph_facts) + + # Test for each lag + for lag_name in lag_facts['names']: + try: + lag_facts['lags'][lag_name]['po_config']['runner']['min_ports'] + except KeyError: + logging.info("Skip [check_single_lag_lacp_rate] for lag (%s) due to min_ports not exists" % lag_name) + logging.info("Skip [check_single_lag] for lag (%s) due to min_ports not exists" % lag_name) + continue + else: + test_instance.run_single_lag_lacp_rate_test(lag_name) + test_instance.run_single_lag_test(lag_name) + + try: + lag_facts['lags'][lag_name]['po_config']['runner']['fallback'] + except KeyError: + logging.info("Skip [check_lag_fallback] for lag (%s) due to fallback was not set for it" % lag_name) + else: + test_instance.run_lag_fallback_test(lag_name) diff --git a/tests/pc/test_po_update.py b/tests/pc/test_po_update.py index 4938c0b7eb..cf0ec6415b 100644 --- a/tests/pc/test_po_update.py +++ b/tests/pc/test_po_update.py @@ -1,19 +1,13 @@ -from ansible_host import AnsibleHost import time import pytest import logging -def test_po_update(ansible_adhoc, testbed): +def test_po_update(duthost): """ test port channel add/deletion as well ip address configuration """ - - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] - int_facts = ans_host.interface_facts()['ansible_facts'] - bgp_facts = ans_host.bgp_facts()['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + int_facts = duthost.interface_facts()['ansible_facts'] # Initialize portchannel if len(mg_facts['minigraph_portchannels'].keys()) == 0: @@ -41,58 +35,58 @@ def test_po_update(ansible_adhoc, testbed): # Step 1: Remove portchannel members from portchannel for member in portchannel_members: - ans_host.shell("config portchannel member del %s %s" % (portchannel, member)) + duthost.shell("config portchannel member del %s %s" % (portchannel, member)) remove_portchannel_members = True # Step 2: Remove portchannel ip from portchannel - ans_host.shell("config interface ip remove %s %s/31" % (portchannel, portchannel_ip)) + duthost.shell("config interface ip remove %s %s/31" % (portchannel, portchannel_ip)) remove_portchannel_ip = True time.sleep(30) - int_facts = ans_host.interface_facts()['ansible_facts'] + int_facts = duthost.interface_facts()['ansible_facts'] assert not int_facts['ansible_interface_facts'][portchannel]['link'] - bgp_facts = ans_host.bgp_facts()['ansible_facts'] + bgp_facts = duthost.bgp_facts()['ansible_facts'] assert bgp_facts['bgp_statistics']['ipv4_idle'] == 1 # Step 3: Create tmp portchannel - ans_host.shell("config portchannel add %s" % tmp_portchannel) + duthost.shell("config portchannel add %s" % tmp_portchannel) create_tmp_portchannel = True # Step 4: Add portchannel member to tmp portchannel for member in portchannel_members: - ans_host.shell("config portchannel member add %s %s" % (tmp_portchannel, member)) + duthost.shell("config portchannel member add %s %s" % (tmp_portchannel, member)) add_tmp_portchannel_members = True # Step 5: Add portchannel ip to tmp portchannel - ans_host.shell("config interface ip add %s %s/31" % (tmp_portchannel, portchannel_ip)) - int_facts = ans_host.interface_facts()['ansible_facts'] + duthost.shell("config interface ip add %s %s/31" % (tmp_portchannel, portchannel_ip)) + int_facts = duthost.interface_facts()['ansible_facts'] assert int_facts['ansible_interface_facts'][tmp_portchannel]['ipv4']['address'] == portchannel_ip add_tmp_portchannel_ip = True time.sleep(30) - int_facts = ans_host.interface_facts()['ansible_facts'] + int_facts = duthost.interface_facts()['ansible_facts'] assert int_facts['ansible_interface_facts'][tmp_portchannel]['link'] - bgp_facts = ans_host.bgp_facts()['ansible_facts'] + bgp_facts = duthost.bgp_facts()['ansible_facts'] assert bgp_facts['bgp_statistics']['ipv4_idle'] == 0 finally: # Recover all states if add_tmp_portchannel_ip: - ans_host.shell("config interface ip remove %s %s/31" % (tmp_portchannel, portchannel_ip)) + duthost.shell("config interface ip remove %s %s/31" % (tmp_portchannel, portchannel_ip)) time.sleep(5) if add_tmp_portchannel_members: for member in portchannel_members: - ans_host.shell("config portchannel member del %s %s" % (tmp_portchannel, member)) + duthost.shell("config portchannel member del %s %s" % (tmp_portchannel, member)) time.sleep(5) if create_tmp_portchannel: - ans_host.shell("config portchannel del %s" % tmp_portchannel) + duthost.shell("config portchannel del %s" % tmp_portchannel) if remove_portchannel_ip: - ans_host.shell("config interface ip add %s %s/31" % (portchannel, portchannel_ip)) + duthost.shell("config interface ip add %s %s/31" % (portchannel, portchannel_ip)) if remove_portchannel_members: for member in portchannel_members: - ans_host.shell("config portchannel member add %s %s" % (portchannel, member)) + duthost.shell("config portchannel member add %s %s" % (portchannel, member)) time.sleep(30) - bgp_facts = ans_host.bgp_facts()['ansible_facts'] + bgp_facts = duthost.bgp_facts()['ansible_facts'] assert bgp_facts['bgp_statistics']['ipv4_idle'] == 0 diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py new file mode 100644 index 0000000000..3521913f29 --- /dev/null +++ b/tests/pfcwd/conftest.py @@ -0,0 +1,75 @@ +import logging +import pytest +from common.fixtures.conn_graph_facts import conn_graph_facts +from files.pfcwd_helper import TrafficPorts, set_pfc_timers, select_test_ports + +logger = logging.getLogger(__name__) + +def pytest_addoption(parser): + """ + Command line args specific for the pfcwd test + + Args: + parser: pytest parser object + + Returns: + None + + """ + parser.addoption('--warm-reboot', action='store', type=bool, default=False, + help='Warm reboot needs to be enabled or not') + +@pytest.fixture(scope="module") +def setup_pfc_test(duthost, ptfhost, conn_graph_facts): + """ + Sets up all the parameters needed for the PFC Watchdog tests + + Args: + duthost: AnsibleHost instance for DUT + ptfhost: AnsibleHost instance for PTF + conn_graph_facts: fixture that contains the parsed topology info + + Yields: + setup_info: dictionary containing pfc timers, generated test ports and selected test ports + """ + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + port_list = mg_facts['minigraph_ports'].keys() + ports = (' ').join(port_list) + neighbors = conn_graph_facts['device_conn'] + dut_facts = duthost.setup()['ansible_facts'] + dut_eth0_ip = dut_facts['ansible_eth0']['ipv4']['address'] + dut_eth0_mac = dut_facts['ansible_eth0']['macaddress'] + vlan_nw = None + + if mg_facts['minigraph_vlans']: + # gather all vlan specific info + vlan_addr = mg_facts['minigraph_vlan_interfaces'][0]['addr'] + vlan_prefix = mg_facts['minigraph_vlan_interfaces'][0]['prefixlen'] + vlan_dev = mg_facts['minigraph_vlan_interfaces'][0]['attachto'] + vlan_ips = duthost.get_ip_in_range(num=1, prefix="{}/{}".format(vlan_addr, vlan_prefix), exclude_ips=[vlan_addr])['ansible_facts']['generated_ips'] + vlan_nw = vlan_ips[0].split('/')[0] + + # set unique MACS to PTF interfaces + ptfhost.script("./scripts/change_mac.sh") + + duthost.shell("ip route flush {}/32".format(vlan_nw)) + duthost.shell("ip route add {}/32 dev {}".format(vlan_nw, vlan_dev)) + + # build the port list for the test + tp_handle = TrafficPorts(mg_facts, neighbors, vlan_nw) + test_ports = tp_handle.build_port_list() + # select a subset of ports from the generated port list + selected_ports = select_test_ports(test_ports) + + setup_info = { 'test_ports': test_ports, + 'selected_test_ports': selected_ports, + 'pfc_timers' : set_pfc_timers() + } + + # set poll interval + duthost.command("pfcwd interval {}".format(setup_info['pfc_timers']['pfc_wd_poll_time'])) + + yield setup_info + + logger.info("--- Starting Pfcwd ---") + duthost.command("pfcwd start_default") diff --git a/tests/platform/broadcom/conftest.py b/tests/pfcwd/files/__init__.py similarity index 100% rename from tests/platform/broadcom/conftest.py rename to tests/pfcwd/files/__init__.py diff --git a/tests/pfcwd/files/pfcwd_helper.py b/tests/pfcwd/files/pfcwd_helper.py new file mode 100644 index 0000000000..c2fa58e928 --- /dev/null +++ b/tests/pfcwd/files/pfcwd_helper.py @@ -0,0 +1,237 @@ +import datetime +import ipaddress + +class TrafficPorts(object): + """ Generate a list of ports needed for the PFC Watchdog test""" + def __init__(self, mg_facts, neighbors, vlan_nw): + """ + Args: + mg_facts (dict): parsed minigraph info + neighbors (list): 'device_conn' info from connection graph facts + vlan_nw (string): ip in the vlan range specified in the DUT + + """ + self.mg_facts = mg_facts + self.bgp_info = self.mg_facts['minigraph_bgp'] + self.port_idx_info = self.mg_facts['minigraph_port_indices'] + self.pc_info = self.mg_facts['minigraph_portchannels'] + self.vlan_info = self.mg_facts['minigraph_vlans'] + self.neighbors = neighbors + self.vlan_nw = vlan_nw + self.test_ports = dict() + self.pfc_wd_rx_port = None + self.pfc_wd_rx_port_addr = None + self.pfc_wd_rx_neighbor_addr = None + self.pfc_wd_rx_port_id = None + + def build_port_list(self): + """ + Generate a list of ports to be used for the test + + For T0 topology, the port list is built parsing the portchannel and vlan info and for T1, + port list is constructed from the interface info + """ + if self.mg_facts['minigraph_interfaces']: + self.parse_intf_list() + elif self.mg_facts['minigraph_portchannels']: + self.parse_pc_list() + if self.mg_facts['minigraph_vlans']: + self.test_ports.update(self.parse_vlan_list()) + return self.test_ports + + def parse_intf_list(self): + """ + Built the port info from the ports in 'minigraph_interfaces' + + The constructed port info is a dict with a port as the key (transmit port) and value contains + all the info associated with this port (its fanout neighbor, receive port, receive ptf id, + transmit ptf id, neighbor addr etc). The first port in the list is assumed to be the Rx port. + The rest of the ports will use this port as the Rx port while populating their dict + info. The selected Rx port when used as a transmit port will use the next port in + the list as its associated Rx port + """ + pfc_wd_test_port = None + first_pair = False + for intf in self.mg_facts['minigraph_interfaces']: + if ipaddress.ip_address(unicode(intf['addr'])).version != 4: + continue + # first port + if not self.pfc_wd_rx_port: + self.pfc_wd_rx_port = intf['attachto'] + self.pfc_wd_rx_port_addr = intf['addr'] + self.pfc_wd_rx_port_id = self.port_idx_info[self.pfc_wd_rx_port] + elif not pfc_wd_test_port: + # second port + first_pair = True + + # populate info for all ports except the first one + if first_pair or pfc_wd_test_port: + pfc_wd_test_port = intf['attachto'] + pfc_wd_test_port_addr = intf['addr'] + pfc_wd_test_port_id = self.port_idx_info[pfc_wd_test_port] + pfc_wd_test_neighbor_addr = None + + for item in self.bgp_info: + if ipaddress.ip_address(unicode(item['addr'])).version != 4: + continue + if not self.pfc_wd_rx_neighbor_addr and item['peer_addr'] == self.pfc_wd_rx_port_addr: + self.pfc_wd_rx_neighbor_addr = item['addr'] + if item['peer_addr'] == pfc_wd_test_port_addr: + pfc_wd_test_neighbor_addr = item['addr'] + + self.test_ports[pfc_wd_test_port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, + 'rx_port': [self.pfc_wd_rx_port], + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors[pfc_wd_test_port]['peerdevice'], + 'test_port_id': pfc_wd_test_port_id, + 'rx_port_id': [self.pfc_wd_rx_port_id], + 'test_port_type': 'interface' + } + # populate info for the first port + if first_pair: + self.test_ports[self.pfc_wd_rx_port] = {'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'rx_port': [pfc_wd_test_port], + 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, + 'peer_device': self.neighbors[self.pfc_wd_rx_port]['peerdevice'], + 'test_port_id': self.pfc_wd_rx_port_id, + 'rx_port_id': [pfc_wd_test_port_id], + 'test_port_type': 'interface' + } + + first_pair = False + + def parse_pc_list(self): + """ + Built the port info from the ports in portchannel + + The constructed port info is a dict with a port as the key (transmit port) and value contains + all the info associated with this port (its fanout neighbor, receive ports, receive + ptf ids, transmit ptf ids, neighbor portchannel addr, its own portchannel addr etc). + The first port in the list is assumed to be the Rx port. The rest + of the ports will use this port as the Rx port while populating their dict + info. The selected Rx port when used as a transmit port will use the next port in + the list as its associated Rx port + """ + pfc_wd_test_port = None + first_pair = False + for item in self.mg_facts['minigraph_portchannel_interfaces']: + if ipaddress.ip_address(unicode(item['addr'])).version != 4: + continue + pc = item['attachto'] + # first port + if not self.pfc_wd_rx_port: + self.pfc_wd_rx_portchannel = pc + self.pfc_wd_rx_port = self.pc_info[pc]['members'] + self.pfc_wd_rx_port_addr = item['addr'] + self.pfc_wd_rx_port_id = [self.port_idx_info[port] for port in self.pfc_wd_rx_port] + elif not pfc_wd_test_port: + # second port + first_pair = True + + # populate info for all ports except the first one + if first_pair or pfc_wd_test_port: + pfc_wd_test_portchannel = pc + pfc_wd_test_port = self.pc_info[pc]['members'] + pfc_wd_test_port_addr = item['addr'] + pfc_wd_test_port_id = [self.port_idx_info[port] for port in pfc_wd_test_port] + pfc_wd_test_neighbor_addr = None + + for bgp_item in self.bgp_info: + if ipaddress.ip_address(unicode(bgp_item['addr'])).version != 4: + continue + if not self.pfc_wd_rx_neighbor_addr and bgp_item['peer_addr'] == self.pfc_wd_rx_port_addr: + self.pfc_wd_rx_neighbor_addr = bgp_item['addr'] + if bgp_item['peer_addr'] == pfc_wd_test_port_addr: + pfc_wd_test_neighbor_addr = bgp_item['addr'] + + for port in pfc_wd_test_port: + self.test_ports[port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, + 'rx_port': self.pfc_wd_rx_port, + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors[port]['peerdevice'], + 'test_port_id': self.port_idx_info[port], + 'rx_port_id': self.pfc_wd_rx_port_id, + 'test_portchannel_members': pfc_wd_test_port_id, + 'test_port_type': 'portchannel' + } + # populate info for the first port + if first_pair: + for port in self.pfc_wd_rx_port: + self.test_ports[port] = {'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'rx_port': pfc_wd_test_port, + 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, + 'peer_device': self.neighbors[port]['peerdevice'], + 'test_port_id': self.port_idx_info[port], + 'rx_port_id': pfc_wd_test_port_id, + 'test_portchannel_members': self.pfc_wd_rx_port_id, + 'test_port_type': 'portchannel' + } + + first_pair = False + + def parse_vlan_list(self): + """ + Add vlan specific port info to the already populated port info dict. + + Each vlan interface will be the key and value contains all the info associated with this port + (receive fanout neighbor, receive port receive ptf id, transmit ptf id, neighbor addr etc). + + Args: + None + + Returns: + temp_ports (dict): port info constructed from the vlan interfaces + """ + temp_ports = dict() + vlan_members = self.vlan_info[self.vlan_info.keys()[0]]['members'] + for item in vlan_members: + temp_ports[item] = {'test_neighbor_addr': self.vlan_nw, + 'rx_port': self.pfc_wd_rx_port, + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors[item]['peerdevice'], + 'test_port_id': self.port_idx_info[item], + 'rx_port_id': self.pfc_wd_rx_port_id, + 'test_port_type': 'vlan' + } + + return temp_ports + +def set_pfc_timers(): + """ + Set PFC timers + + Args: + None + + Returns: + pfc_timers (dict) + """ + pfc_timers = {'pfc_wd_detect_time': 400, + 'pfc_wd_restore_time': 400, + 'pfc_wd_restore_time_large': 3000, + 'pfc_wd_poll_time': 400 + } + return pfc_timers + + +def select_test_ports(test_ports): + """ + Select a subset of ports from the generated port info + + Args: + test_ports (dict): Constructed port info + + Returns: + selected_ports (dict): random port info or set of ports matching seed + """ + selected_ports = dict() + seed = int(datetime.datetime.today().day) + for key, value in test_ports.items(): + if (int(value['test_port_id']) % 15) == (seed % 15): + selected_ports.update({key:value}) + + if not selected_ports: + random_port = test_ports.keys()[0] + selected_ports[random_port] = test_ports[random_port] + + return selected_ports diff --git a/tests/pfcwd/templates/config_test_ignore_messages b/tests/pfcwd/templates/config_test_ignore_messages new file mode 100644 index 0000000000..b93ffa1530 --- /dev/null +++ b/tests/pfcwd/templates/config_test_ignore_messages @@ -0,0 +1,8 @@ +r, ".* Port counter .* not implemented" +r, ".* Port counter .* not supported" +r, ".* Invalid port counter .*" +r, ".* Unknown.*" +r, ".* SAI_STATUS_ATTR_NOT_SUPPORT.*" +r, ".* snmp.*" +r, ".* Trying to remove nonexisting queue from flex counter .*" +r, ".* ERR ntpd.*routing socket reports: No buffer space available.*" diff --git a/tests/pfcwd/templates/pfc_config_params.json b/tests/pfcwd/templates/pfc_config_params.json new file mode 100644 index 0000000000..000d6029b8 --- /dev/null +++ b/tests/pfcwd/templates/pfc_config_params.json @@ -0,0 +1,42 @@ +{ + "pfc_wd_fwd_action": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 4000, + "pfc_wd_restoration_time": 5000 + }, + "pfc_wd_invalid_action": { + "pfc_wd_action": "invalid", + "pfc_wd_detection_time": 4000, + "pfc_wd_restoration_time": 5000 + }, + "pfc_wd_invalid_detect_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": "400a", + "pfc_wd_restoration_time": 5000 + }, + "pfc_wd_low_detect_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 40, + "pfc_wd_restoration_time": 5000 + }, + "pfc_wd_high_detect_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 4000000, + "pfc_wd_restoration_time": 5000 + }, + "pfc_wd_invalid_restore_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 4000, + "pfc_wd_restoration_time": "500c" + }, + "pfc_wd_low_restore_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 4000, + "pfc_wd_restoration_time": 50 + }, + "pfc_wd_high_restore_time": { + "pfc_wd_action": "forward", + "pfc_wd_detection_time": 4000, + "pfc_wd_restoration_time": 50000000 + } +} diff --git a/tests/pfcwd/test_pfc_config.py b/tests/pfcwd/test_pfc_config.py new file mode 100644 index 0000000000..7c7d765648 --- /dev/null +++ b/tests/pfcwd/test_pfc_config.py @@ -0,0 +1,257 @@ +import json +import os +import pytest +import logging + +from common.helpers.assertions import pytest_assert +from common.plugins.loganalyzer.loganalyzer import LogAnalyzer + +logger = logging.getLogger(__name__) + +DUT_RUN_DIR = "/home/admin/pfc_wd_tests" +TEMPLATES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates") +TMP_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testrun") +CONFIG_TEST_EXPECT_INVALID_ACTION_RE = ".* Invalid PFC Watchdog action .*" +CONFIG_TEST_EXPECT_INVALID_DETECT_TIME_RE = ".* Failed to parse PFC Watchdog .* detection_time .*" +CONFIG_TEST_EXPECT_INVALID_RESTORE_TIME_RE = ".* Failed to parse PFC Watchdog .* restoration_time .*" + +pytestmark = [pytest.mark.disable_loganalyzer] # disable automatic fixture and invoke within each test + +def create_run_dir(): + """ + Creates a temp run dir 'testrun' within the pfcwd folder + """ + try: + os.mkdir(TMP_DIR) + except OSError as err: + pytest.fail("Failed to create a temp run dir: {}".format(str(err))) + +def generate_cfg_templates(test_port): + """ + Build all the config templates that will be used for the config validation test + + Args: + test_port (string): a random port selected from the test port list + + Returns: + cfg_params (dict): all config templates + """ + create_run_dir() + with open(os.path.join(TEMPLATES_DIR, "pfc_config_params.json"), "r") as read_file: + cfg_params = json.load(read_file) + + for key in cfg_params: + write_file = key + write_params = dict() + write_params["PFC_WD"] = { test_port: { "action": cfg_params[key]["pfc_wd_action"], + "detection_time": cfg_params[key]["pfc_wd_detection_time"], + "restoration_time": cfg_params[key]["pfc_wd_restoration_time"] + } + } + # create individual template files for each test + with open(os.path.join(TMP_DIR, "{}.json".format(write_file)), "w") as wfile: + json.dump(write_params, wfile) + + return cfg_params + +def copy_templates_to_dut(duthost, cfg_params): + """ + Copy all the templates created to the DUT + + Args: + duthost (AnsibleHost): instance + cfg_params (dict): all config templates + + Returns: + None + """ + duthost.shell("mkdir -p {}".format(DUT_RUN_DIR)) + for key in cfg_params: + src_file = os.path.join(TMP_DIR, "{}.json".format(key)) + duthost.copy(src=src_file, dest="{}/{}.json".format(DUT_RUN_DIR, key)) + +def cfg_teardown(duthost): + """ + Cleans up the DUT temp dir and temp dir on the host after the module run + + Args: + duthost (AnsibleHost): instance + + Returns: + None + """ + if os.path.exists(TMP_DIR): + os.system("rm -rf {}".format(TMP_DIR)) + duthost.shell("rm -rf {}".format(DUT_RUN_DIR)) + +@pytest.fixture(scope='class', autouse=True) +def cfg_setup(setup_pfc_test, duthost): + """ + Class level automatic fixture. Prior to the test run, create all the templates + needed for each individual test and copy them on the DUT. + After the all the test cases are done, clean up temp dir on DUT and host + + Args: + setup_pfc_test: module fixture defined in module conftest.py + duthost: instance of AnsibleHost class + """ + setup_info = setup_pfc_test + pfc_wd_test_port = setup_info['test_ports'].keys()[0] + logger.info("Creating json templates for all config tests") + cfg_params = generate_cfg_templates(pfc_wd_test_port) + logger.info("Copying templates over to the DUT") + copy_templates_to_dut(duthost, cfg_params) + + yield + logger.info("--- Start running config tests ---") + + logger.info("--- Clean up config dir from DUT ---") + cfg_teardown(duthost) + + +@pytest.fixture(scope='function', autouse=True) +def stop_pfcwd(duthost): + """ + Fixture that stops PFC Watchdog before each test run + + Args: + duthost: instance of AnsibleHost class + + Returns: + None + """ + logger.info("--- Stop Pfcwd --") + duthost.command("pfcwd stop") + + +@pytest.mark.usefixtures('cfg_setup') +class TestPfcConfig(object): + """ + Test case definition and helper function class + """ + def execute_test(self, duthost, syslog_marker, ignore_regex=None, expect_regex=None, expect_errors=False): + """ + Helper function that loads each template on the DUT and verifies the expected behavior + + Args: + duthost (AnsibleHost): instance + syslog_marker (string): marker prefix name to be inserted in the syslog + ignore_regex (string): file containing regexs to be ignored by loganalyzer + expect_regex (string): regex pattern that is expected to be present in the syslog + expect_erros (bool): if the test expects an error msg in the syslog or not. Default: False + + Returns: + None + """ + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=syslog_marker) + + if ignore_regex: + ignore_file = os.path.join(TEMPLATES_DIR, ignore_regex) + reg_exp = loganalyzer.parse_regexp_file(src=ignore_file) + loganalyzer.ignore_regex.extend(reg_exp) + + if expect_regex: + loganalyzer.expect_regex = [] + loganalyzer.expect_regex.extend(expect_regex) + + loganalyzer.match_regex = [] + with loganalyzer(fail=not expect_errors): + cmd = "sonic-cfggen -j {}/{}.json --write-to-db".format(DUT_RUN_DIR, syslog_marker) + out = duthost.command(cmd) + pytest_assert(out["rc"] == 0, "Failed to execute cmd {}: Error: {}".format(cmd, out["stderr"])) + + def test_forward_action_cfg(self, duthost): + """ + Tests if the config gets loaded properly for a valid cfg template + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_fwd_action", "config_test_ignore_messages") + + def test_invalid_action_cfg(self, duthost): + """ + Tests for syslog error when invalid action is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_invalid_action", None, [CONFIG_TEST_EXPECT_INVALID_ACTION_RE], True) + + def test_invalid_detect_time_cfg(self, duthost): + """ + Tests for syslog error when invalid detect time is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_invalid_detect_time", None, [CONFIG_TEST_EXPECT_INVALID_DETECT_TIME_RE], True) + + def test_low_detect_time_cfg(self, duthost): + """ + Tests for syslog error when detect time < lower bound is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_low_detect_time", None, [CONFIG_TEST_EXPECT_INVALID_DETECT_TIME_RE], True) + + def test_high_detect_time_cfg(self, duthost): + """ + Tests for syslog error when detect time > higher bound is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_high_detect_time", None, [CONFIG_TEST_EXPECT_INVALID_DETECT_TIME_RE], True) + + def test_invalid_restore_time_cfg(self, duthost): + """ + Tests for syslog error when invalid restore time is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_invalid_restore_time", None, [CONFIG_TEST_EXPECT_INVALID_RESTORE_TIME_RE], True) + + def test_low_restore_time_cfg(self, duthost): + """ + Tests for syslog error when restore time < lower bound is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_low_restore_time", None, [CONFIG_TEST_EXPECT_INVALID_RESTORE_TIME_RE], True) + + def test_high_restore_time_cfg(self, duthost): + """ + Tests for syslog error when restore time > higher bound is configured + + Args: + duthost(AnsibleHost): instance + + Returns: + None + """ + self.execute_test(duthost, "pfc_wd_high_restore_time", None, [CONFIG_TEST_EXPECT_INVALID_RESTORE_TIME_RE], True) diff --git a/tests/platform/check_daemon_status.py b/tests/platform/check_daemon_status.py deleted file mode 100644 index fdefbed0c3..0000000000 --- a/tests/platform/check_daemon_status.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Helper script for checking status of platform daemon status - -This script contains re-usable functions for checking status of platform daemon status. -""" -import logging - - -def check_pmon_daemon_status(dut): - """ - @summary: check daemon running status inside pmon docker. - - This function use command "supervisorctl status" inside the container and check the status from the command output. - If the daemon status is "RUNNING" then return True, if daemon not exist or status is not "RUNNING", return false. - """ - daemon_list = dut.get_pmon_daemon_list() - daemon_status = {} - try: - for daemon in daemon_list: - output = dut.shell('docker exec pmon supervisorctl status | grep %s' % daemon, module_ignore_errors=True) - if bool(output["stdout_lines"]): - expected_line = output["stdout_lines"][0] - expected_line_list = expected_line.split() - daemon_status[daemon] = (daemon in expected_line_list and 'RUNNING' in expected_line_list) - logging.debug("Daemon %s status is %s" % (daemon, str(daemon_status[daemon]))) - else: - logging.debug("Daemon %s does not exist" % daemon) - return False - return all(daemon_status.values()) - except: - return False diff --git a/tests/platform/test_advanced_reboot.py b/tests/platform/test_advanced_reboot.py deleted file mode 100644 index fbff39f8d5..0000000000 --- a/tests/platform/test_advanced_reboot.py +++ /dev/null @@ -1,17 +0,0 @@ -import pytest - -@pytest.mark.usefixtures('get_advanced_reboot') -def test_fast_reboot(request, get_advanced_reboot): - ''' - Fast reboot test case is run using advacned reboot test fixture - ''' - advancedReboot = get_advanced_reboot(rebootType='fast-reboot') - advancedReboot.runRebootTestcase() - -@pytest.mark.usefixtures('get_advanced_reboot') -def test_warm_reboot(request, get_advanced_reboot): - ''' - Warm reboot test case is run using advacned reboot test fixture - ''' - advancedReboot = get_advanced_reboot(rebootType='warm-reboot') - advancedReboot.runRebootTestcase() diff --git a/tests/platform/api/conftest.py b/tests/platform_tests/api/conftest.py similarity index 96% rename from tests/platform/api/conftest.py rename to tests/platform_tests/api/conftest.py index bfb9b6cc7e..9213a8bf4a 100644 --- a/tests/platform/api/conftest.py +++ b/tests/platform_tests/api/conftest.py @@ -7,8 +7,7 @@ SERVER_PORT = 8000 @pytest.fixture(scope='function') -def start_platform_api_service(duthost, testbed_devices): - localhost = testbed_devices['localhost'] +def start_platform_api_service(duthost, localhost): res = localhost.wait_for(host=duthost.hostname, port=SERVER_PORT, state='started', delay=1, timeout=5) if 'exception' in res: supervisor_conf = [ diff --git a/tests/platform/api/test_watchdog.py b/tests/platform_tests/api/test_watchdog.py similarity index 95% rename from tests/platform/api/test_watchdog.py rename to tests/platform_tests/api/test_watchdog.py index 17e392abd3..afa5841922 100644 --- a/tests/platform/api/test_watchdog.py +++ b/tests/platform_tests/api/test_watchdog.py @@ -53,14 +53,10 @@ def conf(self, request, duthost): return config - def test_arm_disarm_states(self, testbed_devices, platform_api_conn, conf): + def test_arm_disarm_states(self, duthost, localhost, platform_api_conn, conf): ''' arm watchdog with a valid timeout value, verify it is in armed state, disarm watchdog and verify it is in disarmed state ''' - - duthost = testbed_devices['dut'] - localhost = testbed_devices['localhost'] - watchdog_timeout = conf['valid_timeout'] actual_timeout = watchdog.arm(platform_api_conn, watchdog_timeout) @@ -161,12 +157,9 @@ def test_arm_negative_timeout(self, duthost, platform_api_conn): assert actual_timeout == -1 @pytest.mark.disable_loganalyzer - def test_reboot(self, testbed_devices, platform_api_conn, conf): + def test_reboot(self, duthost, localhost, platform_api_conn, conf): ''' arm the watchdog and verify it did its job after timeout expiration ''' - duthost = testbed_devices['dut'] - localhost = testbed_devices['localhost'] - watchdog_timeout = conf['valid_timeout'] actual_timeout = watchdog.arm(platform_api_conn, watchdog_timeout) diff --git a/tests/platform/api/watchdog.yml b/tests/platform_tests/api/watchdog.yml similarity index 100% rename from tests/platform/api/watchdog.yml rename to tests/platform_tests/api/watchdog.yml diff --git a/tests/platform/mellanox/conftest.py b/tests/platform_tests/broadcom/conftest.py similarity index 100% rename from tests/platform/mellanox/conftest.py rename to tests/platform_tests/broadcom/conftest.py diff --git a/tests/platform/broadcom/files/ser_injector.py b/tests/platform_tests/broadcom/files/ser_injector.py similarity index 100% rename from tests/platform/broadcom/files/ser_injector.py rename to tests/platform_tests/broadcom/files/ser_injector.py diff --git a/tests/platform/broadcom/test_ser.py b/tests/platform_tests/broadcom/test_ser.py similarity index 100% rename from tests/platform/broadcom/test_ser.py rename to tests/platform_tests/broadcom/test_ser.py diff --git a/tests/platform/check_all_interface_info.py b/tests/platform_tests/check_all_interface_info.py similarity index 100% rename from tests/platform/check_all_interface_info.py rename to tests/platform_tests/check_all_interface_info.py diff --git a/tests/platform/check_critical_services.py b/tests/platform_tests/check_critical_services.py similarity index 100% rename from tests/platform/check_critical_services.py rename to tests/platform_tests/check_critical_services.py diff --git a/tests/platform/check_interface_status.py b/tests/platform_tests/check_interface_status.py similarity index 100% rename from tests/platform/check_interface_status.py rename to tests/platform_tests/check_interface_status.py diff --git a/tests/platform/check_transceiver_status.py b/tests/platform_tests/check_transceiver_status.py similarity index 100% rename from tests/platform/check_transceiver_status.py rename to tests/platform_tests/check_transceiver_status.py diff --git a/tests/platform/conftest.py b/tests/platform_tests/conftest.py similarity index 67% rename from tests/platform/conftest.py rename to tests/platform_tests/conftest.py index 5be0e389ec..04039b9983 100644 --- a/tests/platform/conftest.py +++ b/tests/platform_tests/conftest.py @@ -1,10 +1,10 @@ import pytest from common.fixtures.advanced_reboot import get_advanced_reboot -from args.advanced_reboot_args import add_advanced_reboot_args +from platform_args.advanced_reboot_args import add_advanced_reboot_args @pytest.fixture(autouse=True, scope="module") -def skip_on_simx(testbed_devices): - platform = testbed_devices["dut"].facts["platform"] +def skip_on_simx(duthost): + platform = duthost.facts["platform"] if "simx" in platform: pytest.skip('skipped on this platform: {}'.format(platform)) diff --git a/tests/platform/files/getportmap.py b/tests/platform_tests/files/getportmap.py similarity index 100% rename from tests/platform/files/getportmap.py rename to tests/platform_tests/files/getportmap.py diff --git a/tests/platform/files/invalid_format_policy.json b/tests/platform_tests/files/invalid_format_policy.json similarity index 100% rename from tests/platform/files/invalid_format_policy.json rename to tests/platform_tests/files/invalid_format_policy.json diff --git a/tests/platform/files/invalid_value_policy.json b/tests/platform_tests/files/invalid_value_policy.json similarity index 89% rename from tests/platform/files/invalid_value_policy.json rename to tests/platform_tests/files/invalid_value_policy.json index edf35114a5..95cee699f2 100644 --- a/tests/platform/files/invalid_value_policy.json +++ b/tests/platform_tests/files/invalid_value_policy.json @@ -5,6 +5,9 @@ }, { "type": "psu_info" + }, + { + "type": "chassis_info" } ], "policies": [ diff --git a/tests/platform/files/valid_policy.json b/tests/platform_tests/files/valid_policy.json similarity index 96% rename from tests/platform/files/valid_policy.json rename to tests/platform_tests/files/valid_policy.json index 036bae8c1e..9343746d37 100644 --- a/tests/platform/files/valid_policy.json +++ b/tests/platform_tests/files/valid_policy.json @@ -9,6 +9,9 @@ }, { "type": "psu_info" + }, + { + "type": "chassis_info" } ], "policies": [ diff --git a/tests/platform/mellanox/check_hw_mgmt_service.py b/tests/platform_tests/mellanox/check_hw_mgmt_service.py similarity index 100% rename from tests/platform/mellanox/check_hw_mgmt_service.py rename to tests/platform_tests/mellanox/check_hw_mgmt_service.py diff --git a/tests/platform/mellanox/check_sysfs.py b/tests/platform_tests/mellanox/check_sysfs.py similarity index 94% rename from tests/platform/mellanox/check_sysfs.py rename to tests/platform_tests/mellanox/check_sysfs.py index 017863fcb9..ef663d8ae3 100644 --- a/tests/platform/mellanox/check_sysfs.py +++ b/tests/platform_tests/mellanox/check_sysfs.py @@ -83,6 +83,9 @@ def check_sysfs(dut): assert min_tolerance_speed < fan_speed < max_tolerance_speed, "Speed out of tolerance speed range (%d, %d)" \ % (min_tolerance_speed, max_tolerance_speed) + cpu_temp_high_counter = 0 + cpu_temp_list = [] + cpu_crit_temp_list = [] cpu_pack_count = SWITCH_MODELS[dut_hwsku]["cpu_pack"]["number"] if cpu_pack_count != 0: cpu_pack_temp_file = "/var/run/hw-management/thermal/cpu_pack" @@ -99,7 +102,10 @@ def check_sysfs(dut): assert cpu_pack_max_temp <= cpu_pack_crit_temp, "Bad CPU pack max temp or critical temp, %s, %s " \ % (str(cpu_pack_max_temp), str(cpu_pack_crit_temp)) - assert cpu_pack_temp < cpu_pack_max_temp, "CPU pack overheated, temp: %s" % (str(cpu_pack_temp)) + if cpu_pack_temp >= cpu_pack_crit_temp: + cpu_temp_high_counter += 1 + cpu_temp_list.append(cpu_pack_temp) + cpu_crit_temp_list.append(cpu_pack_crit_temp) cpu_core_count = SWITCH_MODELS[dut_hwsku]["cpu_cores"]["number"] for core_id in range(0, cpu_core_count): @@ -117,7 +123,15 @@ def check_sysfs(dut): assert cpu_core_max_temp <= cpu_core_crit_temp, "Bad CPU core%d max temp or critical temp, %s, %s " \ % (core_id, str(cpu_core_max_temp), str(cpu_core_crit_temp)) - assert cpu_core_temp < cpu_core_max_temp, "CPU core%d overheated, temp: %s" % (core_id, str(cpu_core_temp)) + if cpu_core_temp >= cpu_core_crit_temp: + cpu_temp_high_counter += 1 + cpu_temp_list.append(cpu_core_temp) + cpu_crit_temp_list.append(cpu_core_crit_temp) + + if cpu_temp_high_counter > 0: + logging.info("CPU temperatures {}".format(cpu_temp_list)) + logging.info("CPU critical temperatures {}".format(cpu_crit_temp_list)) + assert False, "At least {} of the CPU cores or pack is overheated".format(cpu_temp_high_counter) psu_count = SWITCH_MODELS[dut_hwsku]["psus"]["number"] for psu_id in range(1, psu_count + 1): diff --git a/tests/platform_tests/mellanox/conftest.py b/tests/platform_tests/mellanox/conftest.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/platform/mellanox/mellanox_thermal_control_test_helper.py b/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py similarity index 88% rename from tests/platform/mellanox/mellanox_thermal_control_test_helper.py rename to tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py index 65816a0af9..212453f3dc 100644 --- a/tests/platform/mellanox/mellanox_thermal_control_test_helper.py +++ b/tests/platform_tests/mellanox/mellanox_thermal_control_test_helper.py @@ -3,6 +3,7 @@ import logging from thermal_control_test_helper import * from common.mellanox_data import SWITCH_MODELS +from minimum_table import MINIMUM_TABLE NOT_AVAILABLE = 'N/A' @@ -255,6 +256,12 @@ def __init__(self, mock_helper, naming_rule, index): """ self.index = index self.helper = mock_helper + dut_hwsku = self.helper.dut.facts["hwsku"] + if SWITCH_MODELS[dut_hwsku]['fans']['hot_swappable']: + self.name = 'drawer{}'.format(index) + else: + self.name = 'N/A' + self.fan_data_list = [] self.mocked_presence = None self.mocked_direction = None if 'presence' in naming_rule: @@ -336,7 +343,15 @@ def get_status_led(self): else: assert 0, 'Invalid FAN led color for FAN: {}, green={}, red={}'.format(self.name, green_led_value, red_led_value) + def get_expect_led_color(self): + if self.mocked_presence == 'Not Present': + return 'red' + for fan_data in self.fan_data_list: + if fan_data.get_expect_led_color() == 'red': + return 'red' + + return 'green' class FanData: """ @@ -346,6 +361,9 @@ class FanData: # MAX PWM value. PWM_MAX = 255 + # Speed tolerance + SPEED_TOLERANCE = 0.2 + def __init__(self, mock_helper, naming_rule, index): """ Constructor of FAN data. @@ -436,7 +454,25 @@ def get_target_speed(self): target_speed = int(round(pwm * 100.0 / FanData.PWM_MAX)) return target_speed + def get_expect_led_color(self): + """ + Get expect LED color. + :return: Return the LED color that this FAN expect to have. + """ + if self.mocked_status == 'Not OK': + return 'red' + + target_speed = self.get_target_speed() + mocked_speed = int(self.mocked_speed) + if mocked_speed > target_speed * (1 + FanData.SPEED_TOLERANCE): + return 'red' + + if mocked_speed < target_speed * (1 - FanData.SPEED_TOLERANCE): + return 'red' + return 'green' + + class TemperatureData: """ Data mocker of a thermal. @@ -531,6 +567,7 @@ def __init__(self, dut): """ FanStatusMocker.__init__(self, dut) self.mock_helper = MockerHelper(dut) + self.drawer_list = [] self.expected_data = {} def deinit(self): @@ -555,6 +592,7 @@ def mock_data(self): try: if (fan_index - 1) % MockerHelper.FAN_NUM_PER_DRAWER == 0: drawer_data = FanDrawerData(self.mock_helper, naming_rule, drawer_index) + self.drawer_list.append(drawer_data) drawer_index += 1 presence = random.randint(0, 1) drawer_data.mock_presence(presence) @@ -563,11 +601,14 @@ def mock_data(self): presence = 1 fan_data = FanData(self.mock_helper, naming_rule, fan_index) + drawer_data.fan_data_list.append(fan_data) fan_index += 1 if presence == 1: fan_data.mock_status(random.randint(0, 1)) fan_data.mock_speed(random.randint(0, 100)) self.expected_data[fan_data.name] = [ + drawer_data.name, + 'N/A', # update this value later fan_data.name, '{}%'.format(fan_data.mocked_speed), drawer_data.mocked_direction, @@ -576,6 +617,8 @@ def mock_data(self): ] else: self.expected_data[fan_data.name] = [ + drawer_data.name, + 'red', fan_data.name, 'N/A', 'N/A', @@ -586,16 +629,26 @@ def mock_data(self): logging.info('Failed to mock fan data: {}'.format(e)) continue + # update led color here + for drawer_data in self.drawer_list: + for fan_data in drawer_data.fan_data_list: + if drawer_data.mocked_presence == 'Present': + expected_data = self.expected_data[fan_data.name] + expected_data[1] = drawer_data.get_expect_led_color() + dut_hwsku = self.mock_helper.dut.facts["hwsku"] psu_count = SWITCH_MODELS[dut_hwsku]["psus"]["number"] naming_rule = FAN_NAMING_RULE['psu_fan'] for index in range(1, psu_count + 1): try: fan_data = FanData(self.mock_helper, naming_rule, index) - speed = random.randint(0, RandomFanStatusMocker.PSU_FAN_MAX_SPEED) + # PSU fan speed display PWM not percentage, it should not be less than 100 + speed = random.randint(101, RandomFanStatusMocker.PSU_FAN_MAX_SPEED) fan_data.mock_speed(speed) self.expected_data[fan_data.name] = [ + 'N/A', + '', fan_data.name, '{}RPM'.format(fan_data.mocked_speed), NOT_AVAILABLE, @@ -609,7 +662,7 @@ def mock_data(self): def check_result(self, actual_data): """ Check actual data with mocked data. - :param actual_data: A dictionary contains actual command line data. Key of the dictionary is FAN name. Value + :param actual_data: A dictionary contains actual command line data. Key of the dictionary is FAN name. Value of the dictionary is a list of field values for a line of FAN data. :return: True if match else False. """ @@ -617,6 +670,8 @@ def check_result(self, actual_data): if name in actual_data: actual_fields = actual_data[name] for i, expected_field in enumerate(fields): + if name.find('psu') != -1 and i ==1: + continue # skip led status check for PSU because we don't mock it if expected_field != actual_fields[i]: logging.error('Check fan status for {} failed, ' \ 'expected: {}, actual: {}'.format(name, expected_field, actual_fields[i])) @@ -843,6 +898,7 @@ def mock_all_normal(self): for fan_data in self.fan_data_list: try: + fan_data.mock_status(0) fan_data.mock_speed(AbnormalFanMocker.TARGET_SPEED_VALUE) fan_data.mock_target_speed(AbnormalFanMocker.TARGET_SPEED_VALUE) except SysfsNotExistError as e: @@ -853,6 +909,7 @@ def mock_normal(self): Change the mocked FAN status to 'Present' and normal speed. :return: """ + self.mock_status(0) self.mock_presence() self.mock_normal_speed() @@ -872,6 +929,15 @@ def mock_presence(self): self.fan_drawer_data.mock_presence(1) self.expect_led_color = 'green' + def mock_status(self, status): + """ + Change the mocked FAN status to good or bad + :param status: bool value indicate the target status of the FAN. + :return: + """ + self.fan_data.mock_status(0 if status else 1) + self.expect_led_color = 'green' if status else 'red' + def mock_over_speed(self): """ Change the mocked FAN speed to faster than target speed and exceed speed tolerance. @@ -898,3 +964,49 @@ def mock_normal_speed(self): self.fan_data.mock_speed(AbnormalFanMocker.TARGET_SPEED_VALUE) self.fan_data.mock_target_speed(AbnormalFanMocker.TARGET_SPEED_VALUE) self.expect_led_color = 'green' + + +@mocker('MinTableMocker') +class MinTableMocker(object): + FAN_AMB_PATH = 'fan_amb' + PORT_AMB_PATH = 'port_amb' + TRUST_PATH = 'module1_temp_fault' + def __init__(self, dut): + self.mock_helper = MockerHelper(dut) + + def get_expect_cooling_level(self, air_flow_dir, temperature, trust_state): + hwsku = self.mock_helper.dut.facts["hwsku"] + minimum_table = MINIMUM_TABLE[hwsku] + row = minimum_table['{}_{}'.format(air_flow_dir, 'trust' if trust_state else 'untrust')] + temperature = temperature / 1000 + for range_str, cooling_level in row.items(): + range_str_list = range_str.split(':') + min_temp = int(range_str_list[0]) + max_temp = int(range_str_list[1]) + if min_temp <= temperature <= max_temp: + return cooling_level - 10 + + return None + + def mock_min_table(self, air_flow_dir, temperature, trust_state): + trust_value = '0' if trust_state else '1' + if air_flow_dir == 'p2c': + fan_temp = temperature + port_temp = temperature - 100 + elif air_flow_dir == 'c2p': + fan_temp = temperature - 100 + port_temp = temperature + else: + fan_temp = temperature + port_temp = temperature + + self.mock_helper.mock_thermal_value(self.FAN_AMB_PATH, str(fan_temp)) + self.mock_helper.mock_thermal_value(self.PORT_AMB_PATH, str(port_temp)) + self.mock_helper.mock_thermal_value(self.TRUST_PATH, str(trust_value)) + + def deinit(self): + """ + Destructor of MinTableMocker. + :return: + """ + self.mock_helper.deinit() diff --git a/tests/platform_tests/mellanox/minimum_table.py b/tests/platform_tests/mellanox/minimum_table.py new file mode 100644 index 0000000000..78bf900aaf --- /dev/null +++ b/tests/platform_tests/mellanox/minimum_table.py @@ -0,0 +1,90 @@ +MINIMUM_TABLE= { + 'ACS-MSN2700': { + "p2c_trust": {"-127:40":13, "41:120":15}, + "p2c_untrust": {"-127:25":13, "26:30":14 , "31:35":15, "36:120":16}, + "c2p_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "c2p_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16} + }, + 'LS-SN2700': { + "p2c_trust": {"-127:40":13, "41:120":15}, + "p2c_untrust": {"-127:25":13, "26:30":14 , "31:35":15, "36:120":16}, + "c2p_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "c2p_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16} + }, + 'ACS-MSN2740': { + "p2c_trust": {"-127:120":13}, + "p2c_untrust": {"-127:35":13, "36:40":14 , "41:120":15}, + "c2p_trust": {"-127:120":13}, + "c2p_untrust": {"-127:15":13, "16:30":14 , "31:35":15, "36:120":17}, + "unk_trust": {"-127:120":13}, + "unk_untrust": {"-127:15":13, "16:30":14 , "31:35":15, "36:120":17}, + }, + 'ACS-MSN2410': { + "p2c_trust": {"-127:40":13, "41:120":15}, + "p2c_untrust": {"-127:25":13, "26:30":14 , "31:35":15, "36:120":16}, + "c2p_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "c2p_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16} + }, + 'Mellanox-SN2700': { + "p2c_trust": {"-127:40":13, "41:120":15}, + "p2c_untrust": {"-127:25":13, "26:30":14 , "31:35":15, "36:120":16}, + "c2p_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "c2p_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16} + }, + 'Mellanox-SN2700-D48C8': { + "p2c_trust": {"-127:40":13, "41:120":15}, + "p2c_untrust": {"-127:25":13, "26:30":14 , "31:35":15, "36:120":16}, + "c2p_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "c2p_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_trust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16}, + "unk_untrust": {"-127:20":13, "21:25":14 , "26:30":15, "31:120":16} + }, + 'ACS-MSN2100': { + "p2c_trust": {"-127:120":12}, + "p2c_untrust": {"-127:15":12, "16:25":13, "26:30":14, "31:35":15, "36:120":16}, + "c2p_trust": {"-127:40":12, "41:120":13}, + "c2p_untrust": {"-127:40":12, "41:120":13}, + "unk_trust": {"-127:40":12, "41:120":13}, + "unk_untrust": {"-127:15":12, "16:25":13, "26:30":14, "31:35":15, "36:120":16} + }, + 'ACS-MSN2010': { + "p2c_trust": {"-127:120":12}, + "p2c_untrust": {"-127:15":12, "16:20":13, "21:30":14, "31:35":15, "36:120":16}, + "c2p_trust": {"-127:120":12}, + "c2p_untrust": {"-127:20":12, "21:25":13 , "26:30":14, "31:35":15, "36:120":16}, + "unk_trust": {"-127:120":12}, + "unk_untrust": {"-127:15":12, "16:20":13 , "21:30":14, "31:35":15, "36:120":16} + }, + 'ACS-MSN3700': { + "p2c_trust": {"-127:25":12, "26:40":13 , "41:120":14}, + "p2c_untrust": {"-127:15":12, "16:30":13 , "31:35":14, "36:40":15, "41:120":16}, + "c2p_trust": {"-127:25":12, "26:40":13 , "41:120":14}, + "c2p_untrust": {"-127:25":12, "26:40":13 , "41:120":14}, + "unk_trust": {"-127:25":12, "26:40":13 , "41:120":14}, + "unk_untrust": {"-127:15":12, "16:30":13 , "31:35":14, "36:40":15, "41:120":16}, + }, + 'ACS-MSN3800': { + "p2c_trust": {"-127:35":12, "36:120":13}, + "p2c_untrust": {"-127:0":12, "1:10":13 , "11:15":14, "16:20":15, "21:35":16, "36:120":17}, + "c2p_trust": {"-127:30":12, "31:40":13 , "41:120":14}, + "c2p_untrust": {"-127:20":12, "21:30":13 , "31:35":14, "36:40":15, "41:120":16}, + "unk_trust": {"-127:30":12, "31:40":13 , "41:120":14}, + "unk_untrust": {"-127:0":12, "1:10":13 , "11:15":14, "16:20":15, "21:35":16, "36:120":17}, + }, + 'Mellanox-SN3800-D112C8': { + "p2c_trust": {"-127:35":12, "36:120":13}, + "p2c_untrust": {"-127:0":12, "1:10":13 , "11:15":14, "16:20":15, "21:35":16, "36:120":17}, + "c2p_trust": {"-127:30":12, "31:40":13 , "41:120":14}, + "c2p_untrust": {"-127:20":12, "21:30":13 , "31:35":14, "36:40":15, "41:120":16}, + "unk_trust": {"-127:30":12, "31:40":13 , "41:120":14}, + "unk_untrust": {"-127:0":12, "1:10":13 , "11:15":14, "16:20":15, "21:35":16, "36:120":17}, + }, +} diff --git a/tests/platform/mellanox/test_check_sfp_presence.py b/tests/platform_tests/mellanox/test_check_sfp_presence.py similarity index 76% rename from tests/platform/mellanox/test_check_sfp_presence.py rename to tests/platform_tests/mellanox/test_check_sfp_presence.py index d0dd52862b..0461b6d049 100644 --- a/tests/platform/mellanox/test_check_sfp_presence.py +++ b/tests/platform_tests/mellanox/test_check_sfp_presence.py @@ -5,14 +5,13 @@ import os import json -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts -def test_check_sfp_presence(testbed_devices, conn_graph_facts): +def test_check_sfp_presence(duthost, conn_graph_facts): """This test case is to check SFP presence status with CLI and sysfs. """ - ans_host = testbed_devices["dut"] - ports_config = json.loads(ans_host.command("sudo sonic-cfggen -d --var-json PORT")["stdout"]) - sysfs_path = get_sfp_sysfs_path(ans_host) + ports_config = json.loads(duthost.command("sudo sonic-cfggen -d --var-json PORT")["stdout"]) + sysfs_path = get_sfp_sysfs_path(duthost) check_qsfp_sysfs_command = 'cat {}'.format(sysfs_path) check_intf_presence_command = 'show interface transceiver presence {}' @@ -21,7 +20,7 @@ def test_check_sfp_presence(testbed_devices, conn_graph_facts): intf_lanes = ports_config[intf]["lanes"] sfp_id = int(intf_lanes.split(",")[0])/4 + 1 - check_presence_output = ans_host.command(check_intf_presence_command.format(intf)) + check_presence_output = duthost.command(check_intf_presence_command.format(intf)) assert check_presence_output["rc"] == 0, "Failed to read interface %s transceiver presence" % intf logging.info(str(check_presence_output["stdout_lines"][2])) presence_list = check_presence_output["stdout_lines"][2].split() @@ -29,7 +28,7 @@ def test_check_sfp_presence(testbed_devices, conn_graph_facts): assert intf in presence_list, "Wrong interface name in the output %s" % str(presence_list) assert 'Present' in presence_list, "Status is not expected, output %s" % str(presence_list) - check_sysfs_output = ans_host.command(check_qsfp_sysfs_command.format(str(sfp_id))) + check_sysfs_output = duthost.command(check_qsfp_sysfs_command.format(str(sfp_id))) logging.info('output of check sysfs %s' % (str(check_sysfs_output))) assert check_sysfs_output["rc"] == 0, "Failed to read sysfs of sfp%s." % str(sfp_id) assert check_sysfs_output["stdout"] == '1', "Content of sysfs of sfp%s is not correct" % str(sfp_id) diff --git a/tests/platform/mellanox/test_check_sfp_using_ethtool.py b/tests/platform_tests/mellanox/test_check_sfp_using_ethtool.py similarity index 69% rename from tests/platform/mellanox/test_check_sfp_using_ethtool.py rename to tests/platform_tests/mellanox/test_check_sfp_using_ethtool.py index 5f1e1dc268..50004a9d4e 100644 --- a/tests/platform/mellanox/test_check_sfp_using_ethtool.py +++ b/tests/platform_tests/mellanox/test_check_sfp_using_ethtool.py @@ -8,22 +8,21 @@ import os import json -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts from check_hw_mgmt_service import check_hw_management_service -def test_check_sfp_using_ethtool(testbed_devices, conn_graph_facts): +def test_check_sfp_using_ethtool(duthost, conn_graph_facts): """This test case is to check SFP using the ethtool. """ - ans_host = testbed_devices["dut"] - ports_config = json.loads(ans_host.command("sudo sonic-cfggen -d --var-json PORT")["stdout"]) + ports_config = json.loads(duthost.command("sudo sonic-cfggen -d --var-json PORT")["stdout"]) logging.info("Use the ethtool to check SFP information") for intf in conn_graph_facts["device_conn"]: intf_lanes = ports_config[intf]["lanes"] sfp_id = int(intf_lanes.split(",")[0])/4 + 1 - ethtool_sfp_output = ans_host.command("sudo ethtool -m sfp%s" % str(sfp_id)) + ethtool_sfp_output = duthost.command("sudo ethtool -m sfp%s" % str(sfp_id)) assert ethtool_sfp_output["rc"] == 0, "Failed to read eeprom of sfp%s using ethtool" % str(sfp_id) assert len(ethtool_sfp_output["stdout_lines"]) >= 5, \ "Does the ethtool output look normal? " + str(ethtool_sfp_output["stdout_lines"]) @@ -32,9 +31,9 @@ def test_check_sfp_using_ethtool(testbed_devices, conn_graph_facts): "Unexpected line %s in %s" % (line, str(ethtool_sfp_output["stdout_lines"])) logging.info("Check interface status") - mg_facts = ans_host.minigraph_facts(host=ans_host.hostname)["ansible_facts"] - intf_facts = ans_host.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + intf_facts = duthost.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"]) - check_hw_management_service(ans_host) + check_hw_management_service(duthost) diff --git a/tests/platform/mellanox/test_check_sysfs.py b/tests/platform_tests/mellanox/test_check_sysfs.py similarity index 61% rename from tests/platform/mellanox/test_check_sysfs.py rename to tests/platform_tests/mellanox/test_check_sysfs.py index a6973a492e..c4fc8109d4 100644 --- a/tests/platform/mellanox/test_check_sysfs.py +++ b/tests/platform_tests/mellanox/test_check_sysfs.py @@ -9,19 +9,16 @@ from check_sysfs import check_sysfs -def test_check_hw_mgmt_sysfs(testbed_devices): +def test_check_hw_mgmt_sysfs(duthost): """This test case is to check the symbolic links under /var/run/hw-management """ - ans_host = testbed_devices["dut"] - check_sysfs(ans_host) + check_sysfs(duthost) -def test_hw_mgmt_sysfs_mapped_to_pmon(testbed_devices): +def test_hw_mgmt_sysfs_mapped_to_pmon(duthost): """This test case is to verify that the /var/run/hw-management folder is mapped to pmon container """ - ans_host = testbed_devices["dut"] - logging.info("Verify that the /var/run/hw-management folder is mapped to the pmon container") - files_under_dut = set(ans_host.command("find /var/run/hw-management")["stdout_lines"]) - files_under_pmon = set(ans_host.command("docker exec pmon find /var/run/hw-management")["stdout_lines"]) + files_under_dut = set(duthost.command("find /var/run/hw-management")["stdout_lines"]) + files_under_pmon = set(duthost.command("docker exec pmon find /var/run/hw-management")["stdout_lines"]) assert files_under_dut == files_under_pmon, "Folder /var/run/hw-management is not mapped to pmon" diff --git a/tests/platform/mellanox/test_hw_management_service.py b/tests/platform_tests/mellanox/test_hw_management_service.py similarity index 75% rename from tests/platform/mellanox/test_hw_management_service.py rename to tests/platform_tests/mellanox/test_hw_management_service.py index f9f91ca4fb..c56b8840f1 100644 --- a/tests/platform/mellanox/test_hw_management_service.py +++ b/tests/platform_tests/mellanox/test_hw_management_service.py @@ -8,8 +8,7 @@ from check_hw_mgmt_service import check_hw_management_service -def test_hw_management_service_status(testbed_devices): +def test_hw_management_service_status(duthost): """This test case is to verify that the hw-management service is running properly """ - ans_host = testbed_devices["dut"] - check_hw_management_service(ans_host) + check_hw_management_service(duthost) diff --git a/tests/platform_tests/mellanox/test_thermal_control.py b/tests/platform_tests/mellanox/test_thermal_control.py new file mode 100644 index 0000000000..4eabad130b --- /dev/null +++ b/tests/platform_tests/mellanox/test_thermal_control.py @@ -0,0 +1,137 @@ +import logging +import operator +import pytest +import random +import time +from common.mellanox_data import SWITCH_MODELS +from common.plugins.loganalyzer.loganalyzer import LogAnalyzer +from common.utilities import wait_until +from thermal_control_test_helper import * +from mellanox_thermal_control_test_helper import MockerHelper, AbnormalFanMocker + +THERMAL_CONTROL_TEST_WAIT_TIME = 65 +THERMAL_CONTROL_TEST_CHECK_INTERVAL = 5 + +COOLING_CUR_STATE_PATH = '/run/hw-management/thermal/cooling_cur_state' +COOLING_CUR_STATE_THRESHOLD = 7 +PSU_PRESENCE_PATH = '/run/hw-management/thermal/psu{}_status' +PSU_SPEED_PATH = '/run/hw-management/thermal/psu{}_fan1_speed_get' +PSU_SPEED_TOLERANCE = 0.25 + +LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE = '.*Changed minimum cooling level to {}.*' + + +@pytest.mark.disable_loganalyzer +def test_dynamic_minimum_table(duthost, mocker_factory): + air_flow_dirs = ['p2c', 'c2p', 'unk'] + max_temperature = 45000 # 45 C + cooling_cur_state = get_cooling_cur_state(duthost) + if cooling_cur_state >= COOLING_CUR_STATE_THRESHOLD: + pytest.skip('The cooling level {} is higher than threshold {}.'.format(cooling_cur_state, COOLING_CUR_STATE_THRESHOLD)) + + mocker = mocker_factory(duthost, 'MinTableMocker') + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control') + loganalyzer.load_common_config() + + for index in range(len(air_flow_dirs)): + air_flow_index = random.randint(0, len(air_flow_dirs) - 1) + air_flow_dir = air_flow_dirs[air_flow_index] + air_flow_dirs.remove(air_flow_dir) + temperature = random.randint(0, max_temperature) + trust_state = True if random.randint(0, 1) else False + logging.info('Testing with air_flow_dir={}, temperature={}, trust_state={}'.format(air_flow_dir, temperature, trust_state)) + expect_minimum_cooling_level = mocker.get_expect_cooling_level(air_flow_dir, temperature, trust_state) + loganalyzer.expect_regex = [LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format(expect_minimum_cooling_level)] + with loganalyzer: + mocker.mock_min_table(air_flow_dir, temperature, trust_state) + time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME) + + temperature = random.randint(0, max_temperature) + logging.info('Testing with air_flow_dir={}, temperature={}, trust_state={}'.format(air_flow_dir, temperature, not trust_state)) + expect_minimum_cooling_level = mocker.get_expect_cooling_level(air_flow_dir, temperature, not trust_state) + loganalyzer.expect_regex = [LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format(expect_minimum_cooling_level)] + with loganalyzer: + mocker.mock_min_table(air_flow_dir, temperature, not trust_state) + time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME) + + +@pytest.mark.disable_loganalyzer +def test_set_psu_fan_speed(duthost, mocker_factory): + hwsku = duthost.facts["hwsku"] + psu_num = SWITCH_MODELS[hwsku]['psus']['number'] + hot_swappable = SWITCH_MODELS[hwsku]['psus']['hot_swappable'] + if not hot_swappable: + pytest.skip('The SKU {} does not support this test case.'.format(hwsku)) + + logging.info('Create mocker, it may take a few seconds...') + single_fan_mocker = mocker_factory(duthost, 'SingleFanMocker') + logging.info('Mock FAN absence...') + single_fan_mocker.mock_absence() + assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, 10, operator.eq), \ + 'Current cooling state is {}'.format(get_cooling_cur_state(duthost)) + + logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_CHECK_INTERVAL)) + time.sleep(THERMAL_CONTROL_TEST_CHECK_INTERVAL) + full_speeds = [] + for index in range(psu_num): + speed = get_psu_speed(duthost, index) + full_speeds.append(speed) + + logging.info('Full speed={}'.format(full_speeds)) + logging.info('Mock FAN presence...') + single_fan_mocker.mock_presence() + assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, 10, operator.ne), \ + 'Current cooling state is {}'.format(get_cooling_cur_state(duthost)) + logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_CHECK_INTERVAL)) + time.sleep(THERMAL_CONTROL_TEST_CHECK_INTERVAL) + cooling_cur_state = get_cooling_cur_state(duthost) + logging.info('Cooling level changed to {}'.format(cooling_cur_state)) + current_speeds = [] + for index in range(psu_num): + speed = get_psu_speed(duthost, index) + current_speeds.append(speed) + + logging.info('Current speed={}'.format(current_speeds)) + index = 0 + if cooling_cur_state < 6: + cooling_cur_state = 6 + expect_multiple = float(10) / cooling_cur_state + while index < psu_num: + full_speed = full_speeds[index] + current_speed = current_speeds[index] + index += 1 + if not full_speed or not current_speed: + continue + + actual_multiple = float(full_speed) / current_speed + if expect_multiple > actual_multiple: + assert actual_multiple > expect_multiple * (1 - PSU_SPEED_TOLERANCE) + elif expect_multiple < actual_multiple: + assert actual_multiple < expect_multiple * (1 + PSU_SPEED_TOLERANCE) + + +def get_psu_speed(dut, index): + index = index + 1 + psu_speed_path = PSU_SPEED_PATH.format(index) + file_exists = dut.stat(path=psu_speed_path) + if not file_exists: + return None + + cmd_output = dut.command('cat {}'.format(psu_speed_path)) + try: + return int(cmd_output['stdout']) + except Exception as e: + assert False, 'Bad content in {} - {}'.format(psu_speed_path, e) + + +def get_cooling_cur_state(dut): + cmd_output = dut.command('cat {}'.format(COOLING_CUR_STATE_PATH)) + try: + return int(cmd_output['stdout']) + except Exception as e: + assert False, 'Bad content in {} - {}'.format(COOLING_CUR_STATE_PATH, e) + + +def check_cooling_cur_state(dut, expect_value, op): + actual_value = get_cooling_cur_state(dut) + return op(actual_value, expect_value) diff --git a/tests/platform_tests/platform_args/__init__.py b/tests/platform_tests/platform_args/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/platform/args/advanced_reboot_args.py b/tests/platform_tests/platform_args/advanced_reboot_args.py similarity index 89% rename from tests/platform/args/advanced_reboot_args.py rename to tests/platform_tests/platform_args/advanced_reboot_args.py index 00886bcb76..df103078ff 100644 --- a/tests/platform/args/advanced_reboot_args.py +++ b/tests/platform_tests/platform_args/advanced_reboot_args.py @@ -76,3 +76,11 @@ def add_advanced_reboot_args(parser): default=180, help="DUT reboot ready timout", ) + + parser.addoption( + "--replace_fast_reboot_script", + action="store", + type=bool, + default=False, + help="Replace fast-reboot script on DUT", + ) diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py new file mode 100644 index 0000000000..8a1cc05c20 --- /dev/null +++ b/tests/platform_tests/test_advanced_reboot.py @@ -0,0 +1,203 @@ +import pytest + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_fast_reboot(request, get_advanced_reboot): + ''' + Fast reboot test case is run using advacned reboot test fixture + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='fast-reboot') + advancedReboot.runRebootTestcase() + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot(request, get_advanced_reboot): + ''' + Warm reboot test case is run using advacned reboot test fixture + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + advancedReboot.runRebootTestcase() + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_sad(request, get_advanced_reboot): + ''' + Warm reboot with sad path + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + prebootList = [ + 'neigh_bgp_down', # Shutdown single BGP session on remote device (VM) before reboot DUT + 'dut_bgp_down', # Shutdown single BGP session on DUT brefore rebooting it + 'dut_lag_down', # Shutdown single LAG session on DUT brefore rebooting it + 'neigh_lag_down', # Shutdown single LAG session on remote device (VM) before reboot DUT + 'dut_lag_member_down:1:1', # Shutdown 1 LAG member corresponding to 1 remote device (VM) on DUT + 'neigh_lag_member_down:1:1', # Shutdown 1 LAG member on 1 remote device (VM) + 'vlan_port_down', # Shutdown 1 vlan port (interface) on DUT + ] + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_multi_sad(request, get_advanced_reboot): + ''' + Warm reboot with multi sad path + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + lagMemberCnt = advancedReboot.getlagMemberCnt() + prebootList = [ + 'neigh_bgp_down:2', # Shutdown single BGP session on 2 remote devices (VMs) before reboot DUT + 'dut_bgp_down:3', # Shutdown 3 BGP sessions on DUT brefore rebooting it + 'dut_lag_down:2', # Shutdown 2 LAG sessions on DUT brefore rebooting it + 'neigh_lag_down:3', # Shutdown 1 LAG session on 3 remote devices (VMs) before reboot DUT + 'dut_lag_member_down:3:1', # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) + # on DUT + 'neigh_lag_member_down:2:1', # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) + 'vlan_port_down:4', + ] + ([ + 'dut_lag_member_down:2:{0}'.format(lagMemberCnt), + # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote + # devices (VM) on DUT + 'neigh_lag_member_down:3:{0}'.format(lagMemberCnt), + # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) + # (1 each) + ] if advancedReboot.getTestbedType() in ['t0-64', 't0-116', 't0-64-32'] else []) + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_multi_sad_inboot(request, get_advanced_reboot): + ''' + Warm reboot with multi sad path (during boot) + + inboot list format: 'inboot_oper:route_cnt' + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + inbootList = [ + 'routing_del:50', # Delete 50 routes IPv4/IPv6 each (100 total) from each BGP session + 'routing_add:50', # Add 50 routes IPv4/IPv6 each (100 total) from each BGP session + ] + + advancedReboot.runRebootTestcase( + inbootList=inbootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_sad_bgp(request, get_advanced_reboot): + ''' + Warm reboot with sad (bgp) + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + prebootList = [ + 'neigh_bgp_down:2', # Shutdown single BGP session on 2 remote devices (VMs) before reboot DUT + 'dut_bgp_down:3', # Shutdown 3 BGP sessions on DUT brefore rebooting it + ] + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_sad_lag_member(request, get_advanced_reboot): + ''' + Warm reboot with sad path (lag member) + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + lagMemberCnt = advancedReboot.getlagMemberCnt() + prebootList = [ + 'dut_lag_member_down:3:1', # Shutdown 1 LAG member of 3 LAG sessions corresponding to 3 remote devices (VM) + # on DUT + 'neigh_lag_member_down:2:1', # Shutdown 1 LAG member of 2 LAG sessions on 2 remote devices (VM) (1 each) + ] + ([ + 'dut_lag_member_down:2:{0}'.format(lagMemberCnt), + # Shutdown LAG member(s) of 2 LAG sessions corresponding to 2 remote + # devices (VM) on DUT + 'neigh_lag_member_down:3:{0}'.format(lagMemberCnt), + # Shutdown LAG member(s) of 3 LAG sessions on 3 remote devices (VM) + # (1 each) + ] if advancedReboot.getTestbedType() in ['t0-64', 't0-116', 't0-64-32'] else []) + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_sad_lag(request, get_advanced_reboot): + ''' + Warm reboot with sad path (lag) + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + prebootList = [ + 'dut_lag_down:2', # Shutdown 2 LAG sessions on DUT brefore rebooting it + 'neigh_lag_down:3', # Shutdown 1 LAG session on 3 remote devices (VMs) before reboot DUT + ] + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) + +@pytest.mark.usefixtures('get_advanced_reboot') +def test_warm_reboot_sad_vlan_port(request, get_advanced_reboot): + ''' + Warm reboot with sad path (vlan port) + + prebootList format is 'preboot oper type:number of VMS down:number of lag members down'. + For non lag member cases, this parameter will be skipped + + @param request: Spytest commandline argument + @param get_advanced_reboot: advanced reboot test fixture + ''' + advancedReboot = get_advanced_reboot(rebootType='warm-reboot') + prebootList = [ + 'vlan_port_down:4', # Shutdown 4 vlan ports (interfaces) on DUT + ] + + advancedReboot.runRebootTestcase( + prebootList=prebootList, + prebootFiles='peer_dev_info,neigh_port_info' + ) diff --git a/tests/platform_tests/test_link_flap.py b/tests/platform_tests/test_link_flap.py new file mode 100644 index 0000000000..3604f038a8 --- /dev/null +++ b/tests/platform_tests/test_link_flap.py @@ -0,0 +1,81 @@ +import logging + +import pytest + +from common.platform.device_utils import fanout_switch_port_lookup +from common.utilities import wait_until + +class TestLinkFlap: + def __get_dut_if_status(self, dut, ifname=None): + if not ifname: + status = dut.show_interface(command='status')['ansible_facts']['int_status'] + else: + status = dut.show_interface(command='status', interfaces=[ifname])['ansible_facts']['int_status'] + + return status + + + def __check_if_status(self, dut, dut_port, exp_state, verbose=False): + status = self.__get_dut_if_status(dut, dut_port)[dut_port] + if verbose: + logging.debug("Interface status : {}".format(status)) + return status['oper_state'] == exp_state + + + def __toggle_one_link(self, dut, dut_port, fanout, fanout_port): + logging.info("Testing link flap on {}".format(dut_port)) + + assert self.__check_if_status(dut, dut_port, 'up', verbose=True), "Fail: dut port {}: link operational down".format(status) + + logging.info("Shutting down fanout switch {} port {} connecting to {}".format(fanout.hostname, fanout_port, dut_port)) + self.ports_shutdown_by_test.add((fanout, fanout_port)) + fanout.shutdown(fanout_port) + wait_until(30, 1, self.__check_if_status, dut, dut_port, 'down') + assert self.__check_if_status(dut, dut_port, 'down', verbose=True), "dut port {} didn't go down as expected".format(dut_port) + + logging.info("Bring up fanout switch {} port {} connecting to {}".format(fanout.hostname, fanout_port, dut_port)) + fanout.no_shutdown(fanout_port) + wait_until(30, 1, self.__check_if_status, dut, dut_port, 'up') + assert self.__check_if_status(dut, dut_port, 'up', verbose=True), "dut port {} didn't go down as expected".format(dut_port) + self.ports_shutdown_by_test.discard((fanout, fanout_port)) + + + def __build_test_candidates(self, dut, fanouthosts): + status = self.__get_dut_if_status(dut) + candidates = [] + + for dut_port in status.keys(): + fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut_port) + + if not fanout or not fanout_port: + logging.info("Skipping port {} that is not found in connection graph".format(dut_port)) + elif status[dut_port]['admin_state'] == 'down': + logging.info("Skipping port {} that is admin down".format(dut_port)) + else: + candidates.append((dut_port, fanout, fanout_port)) + + return candidates + + + def run_link_flap_test(self, dut, fanouthosts): + self.ports_shutdown_by_test = set() + + candidates = self.__build_test_candidates(dut, fanouthosts) + if not candidates: + pytest.skip("Didn't find any port that is admin up and present in the connection graph") + + try: + for dut_port, fanout, fanout_port in candidates: + self.__toggle_one_link(dut, dut_port, fanout, fanout_port) + finally: + logging.info("Restoring fanout switch ports that were shut down by test") + for fanout, fanout_port in self.ports_shutdown_by_test: + logging.debug("Restoring fanout switch {} port {} shut down by test".format(fanout.hostname, fanout_port)) + fanout.no_shutdown(fanout_port) + + +@pytest.mark.topology('any') +@pytest.mark.platform('physical') +def test_link_flap(duthost, fanouthosts): + tlf = TestLinkFlap() + tlf.run_link_flap_test(duthost, fanouthosts) diff --git a/tests/platform/test_platform_info.py b/tests/platform_tests/test_platform_info.py similarity index 70% rename from tests/platform/test_platform_info.py rename to tests/platform_tests/test_platform_info.py index 589ac9c027..a1a2cd62d6 100644 --- a/tests/platform/test_platform_info.py +++ b/tests/platform_tests/test_platform_info.py @@ -32,10 +32,14 @@ LOG_EXPECT_POLICY_FILE_INVALID = '.*Caught exception while initializing thermal manager.*' LOG_EXPECT_FAN_REMOVE_RE = '.*Fan removed warning:.*' LOG_EXPECT_FAN_REMOVE_CLEAR_RE = '.*Fan removed warning cleared:.*' -LOG_EXPECT_FAN_UNDER_SPEED_RE = '.*Fan under speed warning:.*' -LOG_EXPECT_FAN_UNDER_SPEED_CLEAR_RE = '.*Fan under speed warning cleared:.*' -LOG_EXPECT_FAN_OVER_SPEED_RE = '.*Fan over speed warning:*' -LOG_EXPECT_FAN_OVER_SPEED_CLEAR_RE = '.*Fan over speed warning cleared:.*' +LOG_EXPECT_FAN_FAULT_RE = '.*Fan fault warning:.*' +LOG_EXPECT_FAN_FAULT_CLEAR_RE = '.*Fan fault warning cleared:.*' +LOG_EXPECT_FAN_UNDER_SPEED_RE = '.*Fan low speed warning:.*' +LOG_EXPECT_FAN_UNDER_SPEED_CLEAR_RE = '.*Fan low speed warning cleared:.*' +LOG_EXPECT_FAN_OVER_SPEED_RE = '.*Fan high speed warning:*' +LOG_EXPECT_FAN_OVER_SPEED_CLEAR_RE = '.*Fan high speed warning cleared:.*' +LOG_EXPECT_INSUFFICIENT_FAN_NUM_RE = '.*Insufficient number of working fans warning:.*' +LOG_EXPECT_INSUFFICIENT_FAN_NUM_CLEAR_RE = '.*Insufficient number of working fans warning cleared:.*' def check_sensord_status(ans_host): @@ -73,24 +77,23 @@ def stop_pmon_sensord_task(ans_host): @pytest.fixture(scope="module") -def psu_test_setup_teardown(testbed_devices): +def psu_test_setup_teardown(duthost): """ @summary: Sensord task will print out error msg when detect PSU offline, which can cause log analyzer fail the test. So stop sensord task before test and restart it after all test finished. """ logging.info("Starting psu test setup") - ans_host = testbed_devices["dut"] - stop_pmon_sensord_task(ans_host) + stop_pmon_sensord_task(duthost) yield logging.info("Starting psu test teardown") - sensord_running_status, sensord_pid = check_sensord_status(ans_host) + sensord_running_status, sensord_pid = check_sensord_status(duthost) if not sensord_running_status: ans_host.command("docker exec pmon supervisorctl restart lm-sensors") time.sleep(3) - sensord_running_status, sensord_pid = check_sensord_status(ans_host) + sensord_running_status, sensord_pid = check_sensord_status(duthost) if sensord_running_status: logging.info("sensord task restarted, pid = {}".format(sensord_pid)) else: @@ -99,14 +102,12 @@ def psu_test_setup_teardown(testbed_devices): logging.info("sensord is running, pid = {}".format(sensord_pid)) -def test_show_platform_summary(testbed_devices): +def test_show_platform_summary(duthost): """ @summary: Check output of 'show platform summary' """ - ans_host = testbed_devices["dut"] - logging.info("Check output of '%s'" % CMD_PLATFORM_SUMMARY) - platform_summary = ans_host.command(CMD_PLATFORM_SUMMARY) + platform_summary = duthost.command(CMD_PLATFORM_SUMMARY) expected_fields = set(["Platform", "HwSKU", "ASIC"]) actual_fields = set() for line in platform_summary["stdout_lines"]: @@ -137,18 +138,16 @@ def check_vendor_specific_psustatus(dut, psu_status_line): check_psu_sysfs(dut, psu_id, psu_status) -def test_show_platform_psustatus(testbed_devices): +def test_show_platform_psustatus(duthost): """ @summary: Check output of 'show platform psustatus' """ - ans_host = testbed_devices["dut"] - - logging.info("Check PSU status using '%s', hostname: %s" % (CMD_PLATFORM_PSUSTATUS, ans_host.hostname)) - psu_status = ans_host.command(CMD_PLATFORM_PSUSTATUS) + logging.info("Check PSU status using '%s', hostname: %s" % (CMD_PLATFORM_PSUSTATUS, duthost.hostname)) + psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) psu_line_pattern = re.compile(r"PSU\s+\d+\s+(OK|NOT OK|NOT PRESENT)") for line in psu_status["stdout_lines"][2:]: assert psu_line_pattern.match(line), "Unexpected PSU status output" - check_vendor_specific_psustatus(ans_host, line) + check_vendor_specific_psustatus(duthost, line) def get_psu_num(dut): @@ -189,29 +188,34 @@ def check_all_psu_on(dut, psu_test_results): return len(power_off_psu_list) == 0 -def test_turn_on_off_psu_and_check_psustatus(testbed_devices, psu_controller): +@pytest.mark.disable_loganalyzer +def test_turn_on_off_psu_and_check_psustatus(duthost, psu_controller): """ @summary: Turn off/on PSU and check PSU status using 'show platform psustatus' """ - ans_host = testbed_devices["dut"] + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='turn_on_off_psu_and_check_psustatus') + loganalyzer.load_common_config() + + loganalyzer.ignore_regex.append("Error getting sensor data: dps460.*Kernel interface error") + marker = loganalyzer.init() psu_line_pattern = re.compile(r"PSU\s+\d+\s+(OK|NOT OK|NOT PRESENT)") - psu_num = get_psu_num(ans_host) + psu_num = get_psu_num(duthost) if psu_num < 2: pytest.skip("At least 2 PSUs required for rest of the testing in this case") logging.info("Create PSU controller for testing") psu_ctrl = psu_controller if psu_ctrl is None: - pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % ans_host.hostname) + pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) logging.info("To avoid DUT being shutdown, need to turn on PSUs that are not powered") turn_all_psu_on(psu_ctrl) logging.info("Initialize test results") psu_test_results = {} - if not check_all_psu_on(ans_host, psu_test_results): + if not check_all_psu_on(duthost, psu_test_results): pytest.skip("Some PSU are still down, skip rest of the testing in this case") assert len(psu_test_results.keys()) == psu_num, \ @@ -226,32 +230,34 @@ def test_turn_on_off_psu_and_check_psustatus(testbed_devices, psu_controller): psu_ctrl.turn_off_psu(psu["psu_id"]) time.sleep(5) - cli_psu_status = ans_host.command(CMD_PLATFORM_PSUSTATUS) + cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: assert psu_line_pattern.match(line), "Unexpected PSU status output" fields = line.split() if fields[2] != "OK": psu_under_test = fields[1] - check_vendor_specific_psustatus(ans_host, line) + check_vendor_specific_psustatus(duthost, line) assert psu_under_test is not None, "No PSU is turned off" logging.info("Turn on PSU %s" % str(psu["psu_id"])) psu_ctrl.turn_on_psu(psu["psu_id"]) time.sleep(5) - cli_psu_status = ans_host.command(CMD_PLATFORM_PSUSTATUS) + cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: assert psu_line_pattern.match(line), "Unexpected PSU status output" fields = line.split() if fields[1] == psu_under_test: assert fields[2] == "OK", "Unexpected PSU status after turned it on" - check_vendor_specific_psustatus(ans_host, line) + check_vendor_specific_psustatus(duthost, line) psu_test_results[psu_under_test] = True for psu in psu_test_results: assert psu_test_results[psu], "Test psu status of PSU %s failed" % psu + loganalyzer.analyze(marker) + def parse_platform_summary(raw_input_lines): """ @@ -267,16 +273,13 @@ def parse_platform_summary(raw_input_lines): return res -def test_show_platform_syseeprom(testbed_devices): +def test_show_platform_syseeprom(duthost): """ @summary: Check output of 'show platform syseeprom' """ - ans_host = testbed_devices["dut"] - logging.info("Check output of '%s'" % CMD_PLATFORM_SYSEEPROM) - show_output = ans_host.command(CMD_PLATFORM_SYSEEPROM) - assert show_output["rc"] == 0, "Run command '%s' failed" % CMD_PLATFORM_SYSEEPROM - if ans_host.facts["asic_type"] in ["mellanox"]: + show_output = duthost.command(CMD_PLATFORM_SYSEEPROM) + if duthost.facts["asic_type"] in ["mellanox"]: expected_fields = [ "Product Name", "Part Number", @@ -291,8 +294,8 @@ def test_show_platform_syseeprom(testbed_devices): "CRC-32"] utility_cmd = "sudo python -c \"import imp; \ m = imp.load_source('eeprom', '/usr/share/sonic/device/%s/plugins/eeprom.py'); \ - t = m.board('board', '', '', ''); e = t.read_eeprom(); t.decode_eeprom(e)\"" % ans_host.facts["platform"] - utility_cmd_output = ans_host.command(utility_cmd) + t = m.board('board', '', '', ''); e = t.read_eeprom(); t.decode_eeprom(e)\"" % duthost.facts["platform"] + utility_cmd_output = duthost.command(utility_cmd) for field in expected_fields: assert show_output["stdout"].find(field) >= 0, "Expected field %s is not found" % field @@ -306,7 +309,7 @@ def test_show_platform_syseeprom(testbed_devices): def check_show_platform_fanstatus_output(lines): """ @summary: Check basic output of 'show platform fan'. Expect output are: - "Fan Not detected" or a table of fan status data with 6 columns. + "Fan Not detected" or a table of fan status data with 8 columns. """ assert len(lines) > 0, 'There must be at least one line output for show platform fans' if len(lines) == 1: @@ -315,30 +318,28 @@ def check_show_platform_fanstatus_output(lines): assert len(lines) > 2, 'There must be at least two lines output for show platform fans if any FAN is detected' second_line = lines[1] field_ranges = get_field_range(second_line) - assert len(field_ranges) == 6, 'There must be 6 columns in output of show platform fans' + assert len(field_ranges) == 8, 'There must be 8 columns in output of show platform fans' -def test_show_platform_fanstatus(testbed_devices, mocker_factory): +def test_show_platform_fanstatus(duthost, mocker_factory): """ @summary: Check output of 'show platform fan'. """ # Do basic check first - dut = testbed_devices["dut"] logging.info("Check output of '%s'" % CMD_PLATFORM_FANSTATUS) - cli_fan_status = dut.command(CMD_PLATFORM_FANSTATUS) - assert cli_fan_status["rc"] == 0, "Run command '%s' failed" % CMD_PLATFORM_FANSTATUS + cli_fan_status = duthost.command(CMD_PLATFORM_FANSTATUS) lines = cli_fan_status["stdout_lines"] check_show_platform_fanstatus_output(lines) # Mock data and check - mocker = mocker_factory(dut, 'FanStatusMocker') + mocker = mocker_factory(duthost, 'FanStatusMocker') if mocker is None: - pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % dut.facts['asic_type']) + pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) logging.info('Mock FAN status data...') mocker.mock_data() logging.info('Wait and check actual data with mocked FAN status data...') - result = check_cli_output_with_mocker(dut, mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + result = check_cli_output_with_mocker(duthost, mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) assert result, 'FAN mock data mismatch' @@ -358,116 +359,116 @@ def check_show_platform_temperature_output(lines): assert len(field_ranges) == 8, 'There must be 8 columns in output of show platform temperature' -def test_show_platform_temperature(testbed_devices, mocker_factory): +def test_show_platform_temperature(duthost, mocker_factory): """ @summary: Check output of 'show platform temperature' """ # Do basic check first - dut = testbed_devices["dut"] logging.info("Check output of '%s'" % CMD_PLATFORM_TEMPER) - cli_thermal_status = dut.command(CMD_PLATFORM_TEMPER) - assert cli_thermal_status["rc"] == 0, "Run command '%s' failed" % CMD_PLATFORM_TEMPER + cli_thermal_status = duthost.command(CMD_PLATFORM_TEMPER) # Mock data and check - mocker = mocker_factory(dut, 'ThermalStatusMocker') + mocker = mocker_factory(duthost, 'ThermalStatusMocker') if mocker is None: - pytest.skip("No ThermalStatusMocker for %s, skip rest of the testing in this case" % dut.facts['asic_type']) + pytest.skip("No ThermalStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) logging.info('Mock Thermal status data...') mocker.mock_data() logging.info('Wait and check actual data with mocked Thermal status data...') - result = check_cli_output_with_mocker(dut, mocker, CMD_PLATFORM_TEMPER, THERMAL_CONTROL_TEST_WAIT_TIME) + result = check_cli_output_with_mocker(duthost, mocker, CMD_PLATFORM_TEMPER, THERMAL_CONTROL_TEST_WAIT_TIME) assert result, 'Thermal mock data mismatch' @pytest.mark.disable_loganalyzer -def test_thermal_control_load_invalid_format_json(testbed_devices): +def test_thermal_control_load_invalid_format_json(duthost): """ @summary: Load a thermal policy file with invalid format, check thermal control daemon is up and there is an error log printed """ logging.info('Loading invalid format policy file...') - check_thermal_control_load_invalid_file(testbed_devices, THERMAL_POLICY_INVALID_VALUE_FILE) + check_thermal_control_load_invalid_file(duthost, THERMAL_POLICY_INVALID_FORMAT_FILE) @pytest.mark.disable_loganalyzer -def test_thermal_control_load_invalid_value_json(testbed_devices): +def test_thermal_control_load_invalid_value_json(duthost): """ @summary: Load a thermal policy file with invalid value, check thermal control daemon is up and there is an error log printed """ logging.info('Loading invalid value policy file...') - check_thermal_control_load_invalid_file(testbed_devices, THERMAL_POLICY_INVALID_VALUE_FILE) + check_thermal_control_load_invalid_file(duthost, THERMAL_POLICY_INVALID_VALUE_FILE) -def check_thermal_control_load_invalid_file(testbed_devices, file_name): +def check_thermal_control_load_invalid_file(duthost, file_name): """ @summary: Load an invalid thermal policy file check thermal control daemon is up and there is an error log printed """ - dut = testbed_devices["dut"] - loganalyzer = LogAnalyzer(ansible_host=dut, marker_prefix='thermal_control') - with ThermalPolicyFileContext(dut, file_name): + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control') + with ThermalPolicyFileContext(duthost, file_name): loganalyzer.expect_regex = [LOG_EXPECT_POLICY_FILE_INVALID] with loganalyzer: - restart_thermal_control_daemon(dut) + restart_thermal_control_daemon(duthost) -def test_thermal_control_psu_absence(testbed_devices, psu_controller, mocker_factory): +def test_thermal_control_psu_absence(duthost, psu_controller, mocker_factory): """ @summary: Turn off/on PSUs, check thermal control is working as expect. """ - dut = testbed_devices["dut"] - psu_num = get_psu_num(dut) + psu_num = get_psu_num(duthost) if psu_num < 2: pytest.skip("At least 2 PSUs required for rest of the testing in this case") logging.info("Create PSU controller for testing") psu_ctrl = psu_controller if psu_ctrl is None: - pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % dut.hostname) + pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) logging.info("To avoid DUT being shutdown, need to turn on PSUs that are not powered") turn_all_psu_on(psu_ctrl) logging.info("Initialize test results") psu_test_results = {} - if not check_all_psu_on(dut, psu_test_results): + if not check_all_psu_on(duthost, psu_test_results): pytest.skip("Some PSU are still down, skip rest of the testing in this case") - with ThermalPolicyFileContext(dut, THERMAL_POLICY_VALID_FILE): - fan_mocker = mocker_factory(dut, 'FanStatusMocker') + with ThermalPolicyFileContext(duthost, THERMAL_POLICY_VALID_FILE): + fan_mocker = mocker_factory(duthost, 'FanStatusMocker') if fan_mocker is None: - pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % dut.facts['asic_type']) + pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) - logging.info('Mock FAN status data...') - fan_mocker.mock_data() # make data random - restart_thermal_control_daemon(dut) + restart_thermal_control_daemon(duthost) logging.info('Wait and check all FAN speed turn to 60%...') - wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, fan_mocker.check_all_fan_speed, - 60) + wait_result = wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, + THERMAL_CONTROL_TEST_CHECK_INTERVAL, + fan_mocker.check_all_fan_speed, + 60) + if not wait_result: + pytest.skip("FAN speed is not 60%, there might be abnormal in FAN/PSU, skip rest of the testing in this case") - check_thermal_algorithm_status(dut, mocker_factory, False) + check_thermal_algorithm_status(duthost, mocker_factory, False) logging.info('Shutdown first PSU and check thermal control result...') all_psu_status = psu_ctrl.get_psu_status() psu = all_psu_status[0] - turn_off_psu_and_check_thermal_control(dut, psu_ctrl, psu, fan_mocker) + turn_off_psu_and_check_thermal_control(duthost, psu_ctrl, psu, fan_mocker) psu_test_results.clear() - if not check_all_psu_on(dut, psu_test_results): + if not check_all_psu_on(duthost, psu_test_results): pytest.skip("Some PSU are still down, skip rest of the testing in this case") logging.info('Shutdown second PSU and check thermal control result...') psu = all_psu_status[1] - turn_off_psu_and_check_thermal_control(dut, psu_ctrl, psu, fan_mocker) + turn_off_psu_and_check_thermal_control(duthost, psu_ctrl, psu, fan_mocker) psu_test_results.clear() - if not check_all_psu_on(dut, psu_test_results): + if not check_all_psu_on(duthost, psu_test_results): pytest.skip("Some PSU are still down, skip rest of the testing in this case") logging.info('Wait and check all FAN speed turn to 65%...') - wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, fan_mocker.check_all_fan_speed, - 65) + assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, + THERMAL_CONTROL_TEST_CHECK_INTERVAL, + fan_mocker.check_all_fan_speed, + 65), 'FAN speed not change to 65% according to policy' def turn_off_psu_and_check_thermal_control(dut, psu_ctrl, psu, mocker): @@ -490,68 +491,83 @@ def turn_off_psu_and_check_thermal_control(dut, psu_ctrl, psu, mocker): assert psu_under_test is not None, "No PSU is turned off" logging.info('Wait and check all FAN speed turn to 100%...') - wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, mocker.check_all_fan_speed, 100) + assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, + THERMAL_CONTROL_TEST_CHECK_INTERVAL, + mocker.check_all_fan_speed, + 100), 'FAN speed not turn to 100% after PSU off' psu_ctrl.turn_on_psu(psu["psu_id"]) + time.sleep(5) @pytest.mark.disable_loganalyzer -def test_thermal_control_fan_status(testbed_devices, mocker_factory): +def test_thermal_control_fan_status(duthost, mocker_factory): """ @summary: Make FAN absence, over speed and under speed, check logs and LED color. """ - dut = testbed_devices["dut"] - loganalyzer = LogAnalyzer(ansible_host=dut, marker_prefix='thermal_control') + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control') loganalyzer.load_common_config() - with ThermalPolicyFileContext(dut, THERMAL_POLICY_VALID_FILE): - fan_mocker = mocker_factory(dut, 'FanStatusMocker') + with ThermalPolicyFileContext(duthost, THERMAL_POLICY_VALID_FILE): + fan_mocker = mocker_factory(duthost, 'FanStatusMocker') if fan_mocker is None: - pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % dut.facts['asic_type']) + pytest.skip("No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type']) logging.info('Mock FAN status data...') fan_mocker.mock_data() # make data random - restart_thermal_control_daemon(dut) + restart_thermal_control_daemon(duthost) wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, fan_mocker.check_all_fan_speed, 60) - check_thermal_algorithm_status(dut, mocker_factory, False) + check_thermal_algorithm_status(duthost, mocker_factory, False) - single_fan_mocker = mocker_factory(dut, 'SingleFanMocker') + single_fan_mocker = mocker_factory(duthost, 'SingleFanMocker') time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME) if single_fan_mocker.is_fan_removable(): - loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_RE] + loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_RE, LOG_EXPECT_INSUFFICIENT_FAN_NUM_RE] with loganalyzer: logging.info('Mocking an absence FAN...') single_fan_mocker.mock_absence() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) - loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_CLEAR_RE] + loganalyzer.expect_regex = [LOG_EXPECT_FAN_REMOVE_CLEAR_RE, LOG_EXPECT_INSUFFICIENT_FAN_NUM_CLEAR_RE] with loganalyzer: logging.info('Make the absence FAN back to presence...') single_fan_mocker.mock_presence() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) + + loganalyzer.expect_regex = [LOG_EXPECT_FAN_FAULT_RE, LOG_EXPECT_INSUFFICIENT_FAN_NUM_RE] + with loganalyzer: + logging.info('Mocking a fault FAN...') + single_fan_mocker.mock_status(False) + check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) + loganalyzer.expect_regex = [LOG_EXPECT_FAN_FAULT_CLEAR_RE, LOG_EXPECT_INSUFFICIENT_FAN_NUM_CLEAR_RE] + with loganalyzer: + logging.info('Mocking the fault FAN back to normal...') + single_fan_mocker.mock_status(True) + check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) + loganalyzer.expect_regex = [LOG_EXPECT_FAN_OVER_SPEED_RE] with loganalyzer: logging.info('Mocking an over speed FAN...') single_fan_mocker.mock_over_speed() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) loganalyzer.expect_regex = [LOG_EXPECT_FAN_OVER_SPEED_CLEAR_RE] with loganalyzer: logging.info('Make the over speed FAN back to normal...') single_fan_mocker.mock_normal_speed() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) loganalyzer.expect_regex = [LOG_EXPECT_FAN_UNDER_SPEED_RE] with loganalyzer: logging.info('Mocking an under speed FAN...') single_fan_mocker.mock_under_speed() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) loganalyzer.expect_regex = [LOG_EXPECT_FAN_UNDER_SPEED_CLEAR_RE] with loganalyzer: logging.info('Make the under speed FAN back to normal...') single_fan_mocker.mock_normal_speed() - check_cli_output_with_mocker(dut, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME) + check_cli_output_with_mocker(duthost, single_fan_mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2) diff --git a/tests/platform/test_reboot.py b/tests/platform_tests/test_reboot.py similarity index 78% rename from tests/platform/test_reboot.py rename to tests/platform_tests/test_reboot.py index a684f0f053..14e96a4013 100644 --- a/tests/platform/test_reboot.py +++ b/tests/platform_tests/test_reboot.py @@ -22,9 +22,9 @@ from common.reboot import * from common.platform.interface_utils import check_interface_information from common.platform.transceiver_utils import check_transceiver_basic +from common.platform.daemon_utils import check_pmon_daemon_status from check_critical_services import check_critical_services -from check_daemon_status import check_pmon_daemon_status pytestmark = [pytest.mark.disable_loganalyzer] @@ -104,40 +104,32 @@ def check_interfaces_and_services(dut, interfaces, reboot_type = None): check_sysfs(dut) -def test_cold_reboot(testbed_devices, conn_graph_facts): +def test_cold_reboot(duthost, localhost, conn_graph_facts): """ @summary: This test case is to perform cold reboot and check platform status """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_COLD) - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_COLD) - -def test_fast_reboot(testbed_devices, conn_graph_facts): +def test_fast_reboot(duthost, localhost, conn_graph_facts): """ @summary: This test case is to perform cold reboot and check platform status """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_FAST) + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_FAST) -def test_warm_reboot(testbed_devices, conn_graph_facts): +def test_warm_reboot(duthost, localhost, conn_graph_facts): """ @summary: This test case is to perform cold reboot and check platform status """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - asic_type = ans_host.facts["asic_type"] + asic_type = duthost.facts["asic_type"] if asic_type in ["mellanox"]: - issu_capability = ans_host.command("show platform mlnx issu")["stdout"] + issu_capability = duthost.command("show platform mlnx issu")["stdout"] if "disabled" in issu_capability: pytest.skip("ISSU is not supported on this DUT, skip this test case") - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_WARM) + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_WARM) @pytest.fixture(params=[15, 5]) @@ -170,20 +162,18 @@ def _power_off_reboot_helper(kwargs): psu_ctrl.turn_on_psu(psu["psu_id"]) -def test_power_off_reboot(testbed_devices, conn_graph_facts, psu_controller, power_off_delay): +def test_power_off_reboot(duthost, localhost, conn_graph_facts, psu_controller, power_off_delay): """ @summary: This test case is to perform reboot via powercycle and check platform status - @param testbed_devices: Fixture initialize devices in testbed + @param duthost: Fixture for DUT AnsibleHost object + @param localhost: Fixture for interacting with localhost through ansible @param conn_graph_facts: Fixture parse and return lab connection graph @param psu_controller: The python object of psu controller @param power_off_delay: Pytest fixture. The delay between turning off and on the PSU """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - psu_ctrl = psu_controller if psu_ctrl is None: - pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % ans_host.hostname) + pytest.skip("No PSU controller for %s, skip rest of the testing in this case" % duthost.hostname) all_psu = psu_ctrl.get_psu_status() @@ -199,39 +189,33 @@ def test_power_off_reboot(testbed_devices, conn_graph_facts, psu_controller, pow logging.info("Got all power on sequences {}".format(power_on_seq_list)) - poweroff_reboot_kwargs = {"dut": ans_host} + poweroff_reboot_kwargs = {"dut": duthost} for power_on_seq in power_on_seq_list: poweroff_reboot_kwargs["psu_ctrl"] = psu_ctrl poweroff_reboot_kwargs["all_psu"] = all_psu poweroff_reboot_kwargs["power_on_seq"] = power_on_seq poweroff_reboot_kwargs["delay_time"] = power_off_delay - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], REBOOT_TYPE_POWEROFF, + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], REBOOT_TYPE_POWEROFF, _power_off_reboot_helper, poweroff_reboot_kwargs) -def test_watchdog_reboot(testbed_devices, conn_graph_facts): +def test_watchdog_reboot(duthost, localhost, conn_graph_facts): """ @summary: This test case is to perform reboot via watchdog and check platform status """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - test_watchdog_supported = "python -c \"import sonic_platform.platform as P; P.Platform().get_chassis().get_watchdog(); exit()\"" - watchdog_supported = ans_host.command(test_watchdog_supported,module_ignore_errors=True)["stderr"] + watchdog_supported = duthost.command(test_watchdog_supported,module_ignore_errors=True)["stderr"] if "" != watchdog_supported: pytest.skip("Watchdog is not supported on this DUT, skip this test case") - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], REBOOT_TYPE_WATCHDOG) + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], REBOOT_TYPE_WATCHDOG) -def test_continuous_reboot(testbed_devices, conn_graph_facts): +def test_continuous_reboot(duthost, localhost, conn_graph_facts): """ @summary: This test case is to perform 3 cold reboot in a row """ - ans_host = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - for i in range(3): - reboot_and_check(localhost, ans_host, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_COLD) + reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"], reboot_type=REBOOT_TYPE_COLD) diff --git a/tests/platform/test_reload_config.py b/tests/platform_tests/test_reload_config.py similarity index 75% rename from tests/platform/test_reload_config.py rename to tests/platform_tests/test_reload_config.py index 1cf025b46e..cbb5f5618a 100644 --- a/tests/platform/test_reload_config.py +++ b/tests/platform_tests/test_reload_config.py @@ -10,7 +10,7 @@ import pytest -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts from common.utilities import wait_until from check_critical_services import check_critical_services from check_transceiver_status import check_transceiver_basic @@ -19,26 +19,25 @@ pytestmark = [pytest.mark.disable_loganalyzer] -def test_reload_configuration(testbed_devices, conn_graph_facts): +def test_reload_configuration(duthost, conn_graph_facts): """ @summary: This test case is to reload the configuration and check platform status """ - ans_host = testbed_devices["dut"] interfaces = conn_graph_facts["device_conn"] - asic_type = ans_host.facts["asic_type"] + asic_type = duthost.facts["asic_type"] logging.info("Reload configuration") - ans_host.command("sudo config reload -y") + duthost.command("sudo config reload -y") logging.info("Wait until all critical services are fully started") - check_critical_services(ans_host) + check_critical_services(duthost) logging.info("Wait some time for all the transceivers to be detected") - assert wait_until(300, 20, check_interface_information, ans_host, interfaces), \ + assert wait_until(300, 20, check_interface_information, duthost, interfaces), \ "Not all transceivers are detected in 300 seconds" logging.info("Check transceiver status") - check_transceiver_basic(ans_host, interfaces) + check_transceiver_basic(duthost, interfaces) if asic_type in ["mellanox"]: @@ -50,7 +49,7 @@ def test_reload_configuration(testbed_devices, conn_graph_facts): from check_sysfs import check_sysfs logging.info("Check the hw-management service") - check_hw_management_service(ans_host) + check_hw_management_service(duthost) logging.info("Check sysfs") - check_sysfs(ans_host) + check_sysfs(duthost) diff --git a/tests/platform_tests/test_sensors.py b/tests/platform_tests/test_sensors.py new file mode 100644 index 0000000000..286af88d22 --- /dev/null +++ b/tests/platform_tests/test_sensors.py @@ -0,0 +1,21 @@ +import pytest +import logging + +from common.helpers.assertions import pytest_assert + +def test_sensors(duthost, creds): + # Get platform name + platform = duthost.get_platform_info()['platform'] + + # Prepare check list + sensors_checks = creds['sensors_checks'] + + # Gather sensors + if platform not in sensors_checks.keys(): + pytest.skip("Skip test due to not support check sensors for current platform({})".format(platform)) + + sensors_facts = duthost.sensors_facts(checks=sensors_checks[platform])['ansible_facts'] + + pytest_assert(not sensors_facts['sensors']['alarm'], "sensors facts: {}".format(sensors_facts)) + if sensors_facts['sensors']['warning']: + logging.debug("Show warnings: %s" % sensors_facts['sensors']['warning']) \ No newline at end of file diff --git a/tests/platform/test_sequential_restart.py b/tests/platform_tests/test_sequential_restart.py similarity index 80% rename from tests/platform/test_sequential_restart.py rename to tests/platform_tests/test_sequential_restart.py index bcab5c8152..273358c88f 100644 --- a/tests/platform/test_sequential_restart.py +++ b/tests/platform_tests/test_sequential_restart.py @@ -10,7 +10,7 @@ import pytest -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts from common.utilities import wait_until from check_critical_services import check_critical_services from check_transceiver_status import check_transceiver_basic @@ -53,20 +53,16 @@ def restart_service_and_check(localhost, dut, service, interfaces): check_sysfs(dut) -def test_restart_swss(testbed_devices, conn_graph_facts): +def test_restart_swss(duthost, localhost, conn_graph_facts): """ @summary: This test case is to restart the swss service and check platform status """ - dut = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - restart_service_and_check(localhost, dut, "swss", conn_graph_facts["device_conn"]) + restart_service_and_check(localhost, duthost, "swss", conn_graph_facts["device_conn"]) @pytest.mark.skip(reason="Restarting syncd is not supported yet") -def test_restart_syncd(testbed_devices, conn_graph_facts): +def test_restart_syncd(duthost, localhost, conn_graph_facts): """ @summary: This test case is to restart the syncd service and check platform status """ - dut = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - restart_service_and_check(localhost, dut, "syncd", conn_graph_facts["device_conn"]) + restart_service_and_check(localhost, duthost, "syncd", conn_graph_facts["device_conn"]) diff --git a/tests/platform/test_sfp.py b/tests/platform_tests/test_sfp.py similarity index 83% rename from tests/platform/test_sfp.py rename to tests/platform_tests/test_sfp.py index bd6dcf2e85..85b284d08d 100644 --- a/tests/platform/test_sfp.py +++ b/tests/platform_tests/test_sfp.py @@ -13,7 +13,7 @@ import pytest -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts from common.plugins.loganalyzer.loganalyzer import LogAnalyzer ans_host = None @@ -57,7 +57,7 @@ def parse_eeprom(output_lines): return res -def get_port_map(testbed_devices): +def get_port_map(duthost): """ @summary: Get the port mapping info from the DUT @return: a dictionary containing the port map @@ -73,7 +73,7 @@ def get_port_map(testbed_devices): # this is the first running logging.info("Retrieving port mapping from DUT") # copy the helper to DUT - ans_host = testbed_devices["dut"] + ans_host = duthost src_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files/getportmap.py') dest_path = os.path.join('/usr/share/sonic/device', ans_host.facts['platform'], 'plugins/getportmap.py') ans_host.copy(src=src_path, dest=dest_path) @@ -89,7 +89,7 @@ def get_port_map(testbed_devices): return port_mapping -def test_check_sfp_status_and_configure_sfp(testbed_devices, conn_graph_facts): +def test_check_sfp_status_and_configure_sfp(duthost, conn_graph_facts): """ @summary: Check SFP status and configure SFP @@ -101,10 +101,8 @@ def test_check_sfp_status_and_configure_sfp(testbed_devices, conn_graph_facts): * show interface transceiver eeprom * sfputil reset """ - ans_host = testbed_devices["dut"] - - if ans_host.facts["asic_type"] in ["mellanox"]: - loganalyzer = LogAnalyzer(ansible_host=ans_host, marker_prefix='sfp_cfg') + if duthost.facts["asic_type"] in ["mellanox"]: + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='sfp_cfg') loganalyzer.load_common_config() loganalyzer.ignore_regex.append("kernel.*Eeprom query failed*") @@ -116,32 +114,32 @@ def test_check_sfp_status_and_configure_sfp(testbed_devices, conn_graph_facts): cmd_xcvr_presence = "show interface transceiver presence" cmd_xcvr_eeprom = "show interface transceiver eeprom" - portmap = get_port_map(testbed_devices) + portmap = get_port_map(duthost) logging.info("Got portmap {}".format(portmap)) logging.info("Check output of '%s'" % cmd_sfp_presence) - sfp_presence = ans_host.command(cmd_sfp_presence) + sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check output of '%s'" % cmd_xcvr_presence) - xcvr_presence = ans_host.command(cmd_xcvr_presence) + xcvr_presence = duthost.command(cmd_xcvr_presence) parsed_presence = parse_output(xcvr_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_xcvr_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check output of '%s'" % cmd_sfp_eeprom) - sfp_eeprom = ans_host.command(cmd_sfp_eeprom) + sfp_eeprom = duthost.command(cmd_sfp_eeprom) parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'" assert parsed_eeprom[intf] == "SFP EEPROM detected" logging.info("Check output of '%s'" % cmd_xcvr_eeprom) - xcvr_eeprom = ans_host.command(cmd_xcvr_eeprom) + xcvr_eeprom = duthost.command(cmd_xcvr_eeprom) parsed_eeprom = parse_eeprom(xcvr_eeprom["stdout_lines"]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_eeprom, "Interface is not in output of '%s'" % cmd_xcvr_eeprom @@ -156,30 +154,30 @@ def test_check_sfp_status_and_configure_sfp(testbed_devices, conn_graph_facts): continue tested_physical_ports.add(phy_intf) logging.info("resetting {} physical interface {}".format(intf, phy_intf)) - reset_result = ans_host.command("%s %s" % (cmd_sfp_reset, intf)) + reset_result = duthost.command("%s %s" % (cmd_sfp_reset, intf)) assert reset_result["rc"] == 0, "'%s %s' failed" % (cmd_sfp_reset, intf) time.sleep(5) logging.info("Wait some time for SFP to fully recover after reset") time.sleep(60) logging.info("Check sfp presence again after reset") - sfp_presence = ans_host.command(cmd_sfp_presence) + sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") - mg_facts = ans_host.minigraph_facts(host=ans_host.hostname)["ansible_facts"] - intf_facts = ans_host.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + intf_facts = duthost.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"]) - if ans_host.facts["asic_type"] in ["mellanox"]: + if duthost.facts["asic_type"] in ["mellanox"]: loganalyzer.analyze(marker) -def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): +def test_check_sfp_low_power_mode(duthost, conn_graph_facts): """ @summary: Check SFP low power mode @@ -188,10 +186,8 @@ def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): * sfputil lpmode off * sfputil lpmode on """ - ans_host = testbed_devices["dut"] - - if ans_host.facts["asic_type"] in ["mellanox"]: - loganalyzer = LogAnalyzer(ansible_host=ans_host, marker_prefix='sfp_lpm') + if duthost.facts["asic_type"] in ["mellanox"]: + loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='sfp_lpm') loganalyzer.load_common_config() loganalyzer.ignore_regex.append("Eeprom query failed") @@ -201,11 +197,11 @@ def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): cmd_sfp_show_lpmode = "sudo sfputil show lpmode" cmd_sfp_set_lpmode = "sudo sfputil lpmode" - portmap = get_port_map(testbed_devices) + portmap = get_port_map(duthost) logging.info("Got portmap {}".format(portmap)) logging.info("Check output of '%s'" % cmd_sfp_show_lpmode) - lpmode_show = ans_host.command(cmd_sfp_show_lpmode) + lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) original_lpmode = copy.deepcopy(parsed_lpmode) for intf in conn_graph_facts["device_conn"]: @@ -222,12 +218,12 @@ def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): tested_physical_ports.add(phy_intf) logging.info("setting {} physical interface {}".format(intf, phy_intf)) new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on" - lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) + lpmode_set_result = duthost.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) logging.info("Check SFP lower power mode again after changing SFP lpmode") - lpmode_show = ans_host.command(cmd_sfp_show_lpmode) + lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode @@ -243,29 +239,29 @@ def test_check_sfp_low_power_mode(testbed_devices, conn_graph_facts): tested_physical_ports.add(phy_intf) logging.info("restoring {} physical interface {}".format(intf, phy_intf)) new_lpmode = original_lpmode[intf].lower() - lpmode_set_result = ans_host.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) + lpmode_set_result = duthost.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf)) assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf) time.sleep(10) logging.info("Check SFP lower power mode again after changing SFP lpmode") - lpmode_show = ans_host.command(cmd_sfp_show_lpmode) + lpmode_show = duthost.command(cmd_sfp_show_lpmode) parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode" logging.info("Check sfp presence again after setting lpmode") - sfp_presence = ans_host.command(cmd_sfp_presence) + sfp_presence = duthost.command(cmd_sfp_presence) parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) for intf in conn_graph_facts["device_conn"]: assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" logging.info("Check interface status") - mg_facts = ans_host.minigraph_facts(host=ans_host.hostname)["ansible_facts"] - intf_facts = ans_host.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + intf_facts = duthost.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"]) - if ans_host.facts["asic_type"] in ["mellanox"]: + if duthost.facts["asic_type"] in ["mellanox"]: loganalyzer.analyze(marker) diff --git a/tests/platform/test_xcvr_info_in_db.py b/tests/platform_tests/test_xcvr_info_in_db.py similarity index 70% rename from tests/platform/test_xcvr_info_in_db.py rename to tests/platform_tests/test_xcvr_info_in_db.py index 264a0e7885..c219cb2eb9 100644 --- a/tests/platform/test_xcvr_info_in_db.py +++ b/tests/platform_tests/test_xcvr_info_in_db.py @@ -9,14 +9,12 @@ import os from check_transceiver_status import check_transceiver_status -from platform_fixtures import conn_graph_facts +from common.fixtures.conn_graph_facts import conn_graph_facts -def test_xcvr_info_in_db(testbed_devices, conn_graph_facts): +def test_xcvr_info_in_db(duthost, conn_graph_facts): """ @summary: This test case is to verify that xcvrd works as expected by checking transceiver information in DB """ - dut = testbed_devices["dut"] - logging.info("Check transceiver status") - check_transceiver_status(dut, conn_graph_facts["device_conn"]) + check_transceiver_status(duthost, conn_graph_facts["device_conn"]) diff --git a/tests/platform/thermal_control_test_helper.py b/tests/platform_tests/thermal_control_test_helper.py similarity index 97% rename from tests/platform/thermal_control_test_helper.py rename to tests/platform_tests/thermal_control_test_helper.py index aa19c36f45..b4078629de 100644 --- a/tests/platform/thermal_control_test_helper.py +++ b/tests/platform_tests/thermal_control_test_helper.py @@ -174,6 +174,14 @@ def mock_presence(self): """ pass + def mock_status(self, status): + """ + Change the mocked FAN status to good or bad + :param status: bool value indicate the target status of the FAN. + :return: + """ + pass + def mock_normal_speed(self): """ Change the mocked FAN speed to a normal value. @@ -254,7 +262,7 @@ def get_fields(line, field_ranges): return fields -def check_cli_output_with_mocker(dut, mocker_object, command, max_wait_time): +def check_cli_output_with_mocker(dut, mocker_object, command, max_wait_time, key_index=0): """ Check the command line output matches the mocked data. :param dut: DUT object representing a SONiC switch under test. @@ -273,7 +281,7 @@ def check_cli_output_with_mocker(dut, mocker_object, command, max_wait_time): actual_data = {} for line in output["stdout_lines"][2:]: fields = get_fields(line, field_ranges) - actual_data[fields[0]] = fields + actual_data[fields[key_index]] = fields return mocker_object.check_result(actual_data) @@ -314,7 +322,7 @@ def restart_thermal_control_daemon(dut): assert output["rc"] == 0, "Run command '%s' failed" % kill_thermalctld_cmd # make sure thermalctld has restarted - max_wait_time = 5 + max_wait_time = 30 while max_wait_time > 0: max_wait_time -= 1 output = dut.command(find_thermalctld_pid_cmd) diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index b1931f296d..9ca0333364 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -1,27 +1,37 @@ import pipes -def ptf_runner(host, testdir, testname, platform_dir, params={}, \ - platform="remote", qlen=0, relax=True, debug_level="info", \ - socket_recv_size=None, log_file=None): +def ptf_runner(host, testdir, testname, platform_dir=None, params={}, + platform="remote", qlen=0, relax=True, debug_level="info", + socket_recv_size=None, log_file=None, device_sockets=[]): - ptf_test_params = ";".join(["{}={}".format(k, repr(v)) for k, v in params.items()]) + cmd = "ptf --test-dir {} {}".format(testdir, testname) + + if platform_dir: + cmd += " --platform-dir {}".format(platform_dir) - cmd = "ptf --test-dir {} {} --platform-dir {}".format(testdir, testname, platform_dir) if qlen: cmd += " --qlen={}".format(qlen) + if platform: cmd += " --platform {}".format(platform) - if ptf_test_params: + + if params: + ptf_test_params = ";".join(["{}={}".format(k, repr(v)) for k, v in params.items()]) cmd += " -t {}".format(pipes.quote(ptf_test_params)) + if relax: cmd += " --relax" + if debug_level: cmd += " --debug {}".format(debug_level) + if log_file: cmd += " --log-file {}".format(log_file) + if socket_recv_size: cmd += " --socket-recv-size {}".format(socket_recv_size) - res = host.shell(cmd, chdir="/root") - + if device_sockets: + cmd += " ".join(map(" --device-socket {}".format, device_sockets)) + host.shell(cmd, chdir="/root") diff --git a/tests/pytest.ini b/tests/pytest.ini index a3f8ab4d42..07a5e9240e 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -7,3 +7,5 @@ markers: disable_loganalyzer: make to disable automatic loganalyzer broadcom: test specific to Broadcom platform sanity_check: override the default sanity check settings + topology: specify which topology testcase can be executed on: (t0, t1, ptf, etc) + platform: specify which platform testcase can be executed on: (physical, virtual, broadcom, mellanox, etc) diff --git a/tests/pytest.org.md b/tests/pytest.org.md new file mode 100644 index 0000000000..0b13d7e8d0 --- /dev/null +++ b/tests/pytest.org.md @@ -0,0 +1,128 @@ +# Pytest organization proposal + +This proposal intends to achieve the following + - Have a standard way of categorizing tests + - Have some guidelines around test file organization + - Have a master wrapper for test execution + - Follow common documentation style + - Test result collection + +## Test categorization +Leverage pytest custom markers to group tests based on topology, asic, features, device type and connection type. +Every testcase needs to have a topology marker. Feature markers are recommended for any feature test that are getting added. +'Device_type' is optional but needs to be specified if there is a specific requirement that the test needs a physical DUT as opposed to a VS. The same criteria applies for 'connection_type' + +``` +pytest.ini +[pytest] +markers: + topology(topo_name): The topologies this particular testcase can run against. topo_name can be individual topology names like 't0', 't1', 'ptf', 'any' or a comma separated like ('t0', 't1') if supported on multiple topologies + asic(vendor_name): used for asic specific test(broadcom, mellanox etc) + feature(feature_name): feature this test is written for. eg. acl, nat + connection_type(name): names can be 'fabric' (which indicates the presence of a fanout switch) or 'direct' if a testcase uses directly connected links + device_type(name): name can 'physical' (if this test requires a physical dut) or 'vs' (if this test can be run on a virtual switch) + +``` +conftest.py + +``` +def pytest_addoption(parser): + parser.addoption("--topology", action="store", metavar="NAME", + help="only run tests matching the topology NAME") + +def pytest_runtest_setup(item): + toponames = [mark.args for mark in item.iter_markers(name="topology")] + if toponames: + cfg_topos = item.config.getoption("--topology").split(',') + if all(topo not in toponames[0] for topo in cfg_topos): + pytest.skip("test requires topology in {!r}".format(toponames)) + else: + if item.config.getoption("--topology"): + pytest.skip("test does not match topology") + +``` + +Sample test file: test_topo.py + +``` +@pytest.mark.topology('t0', 't1') +def test_all(): + assert 1 == 1 + +@pytest.mark.topology('t0') +def test_t0(): + assert 1 == 1 + + +@pytest.mark.topology('any') +def test_any(): + assert 1 == 1 + +``` + +Sample test file: test_notopo.py + +``` +def test_notopo(): + assert 1 == 1 + +``` + +Test run + +``` +py.test --inventory inv --host-pattern dut1 --module-path ../ansible/library/ --testbed tb --testbed_file tb.csv --topology t1 test_topo.py test_notopo.py -rA + +platform linux2 -- Python 2.7.12, pytest-4.6.9, py-1.8.1, pluggy-0.13.1 +ansible: 2.8.7 +rootdir: /var/nejo/Networking-acs-sonic-mgmt/tests, inifile: pytest.ini +plugins: ansible-2.2.2 +collected 4 items + +test_topo.py::test_all PASSED [ 25%] +test_topo.py::test_t0 PASSED [ 50%] +test_topo.py::test_any SKIPPED [ 75%] +test_notopo.py::test_notopo SKIPPED [100%] + +.... + +.... +PASSED test_topo.py::test_all +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:293: test requires topology in [('t0',)] +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:293: test requires topology in [('any',)] +SKIPPED [1] /var/nejo/Networking-acs-sonic-mgmt/tests/conftest.py:295: test does not match topology + +``` + +## Test file organization +- Have 2 broad categories (platform and feature). Feature specific tests and their helpers go into specific feature folders. + +``` +tests + |_ common + |_ platform + |_ ptftests + |_ nat + |_ test_nat_bindings.py + |_ files + |_ all helpers for the nat feature + |_ acl + +``` + +- Any reusable code needs to go under tests/common + +- File naming convention + The objective here is to provide meaningful names for helper files/testcase files so that the user gets a general idea of the file contents. + + +## Master wrapper +Make it easier to run a nightly test against a feature/platform/topology from the command line. Have something similar to the 'ansible/testbed-cli.sh' script which can be invoked with just the basic parameters (testbed name, what flavor of test to run) + + +## Documentation style +Follow a common style of documentation for test methods which can be used by some tool to generate html content + + +## Test result collection +Use the --junitxml attribute to collect test results. Can leverage the existing format used in sonic-utilities/sonic-swss repo for reporting test results. diff --git a/tests/qos/qos_fixtures.py b/tests/qos/qos_fixtures.py index 8f1b28fd3c..de32535c32 100644 --- a/tests/qos/qos_fixtures.py +++ b/tests/qos/qos_fixtures.py @@ -1,54 +1,38 @@ import pytest -import os -from ansible_host import AnsibleHost +from common.fixtures.conn_graph_facts import conn_graph_facts + @pytest.fixture(scope = "module") -def lossless_prio_dscp_map(testbed_devices): - dut = testbed_devices["dut"] - config_facts = dut.config_facts(host=dut.hostname, source="persistent")['ansible_facts'] - +def lossless_prio_dscp_map(duthost): + config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + if "PORT_QOS_MAP" not in config_facts.keys(): - return None - + return None + port_qos_map = config_facts["PORT_QOS_MAP"] lossless_priorities = list() intf = port_qos_map.keys()[0] if 'pfc_enable' not in port_qos_map[intf]: - return None - + return None + lossless_priorities = [int(x) for x in port_qos_map[intf]['pfc_enable'].split(',')] dscp_to_tc_map = config_facts["DSCP_TO_TC_MAP"] - + result = dict() for prio in lossless_priorities: result[prio] = list() profile = dscp_to_tc_map.keys()[0] - + for dscp in dscp_to_tc_map[profile]: tc = dscp_to_tc_map[profile][dscp] - + if int(tc) in lossless_priorities: result[int(tc)].append(int(dscp)) - - return result -@pytest.fixture(scope = "module") -def conn_graph_facts(testbed_devices): - """ - @summary: Fixture for getting testbed topology connectivity information. - @param testbed_devices: Devices in the testbed - @return: Return the topology connectivity information - """ - dut = testbed_devices["dut"] - localhost = testbed_devices["localhost"] - - base_path = os.path.dirname(os.path.realpath(__file__)) - lab_conn_graph_file = os.path.join(base_path, "../../ansible/files/lab_connection_graph.xml") - result = localhost.conn_graph_facts(host=dut.hostname, filename=lab_conn_graph_file)['ansible_facts'] - return result + @pytest.fixture(scope = "module") def leaf_fanouts(conn_graph_facts): """ @@ -58,11 +42,11 @@ def leaf_fanouts(conn_graph_facts): """ leaf_fanouts = [] conn_facts = conn_graph_facts['device_conn'] - + """ for each interface of DUT """ for intf in conn_facts: peer_device = conn_facts[intf]['peerdevice'] if peer_device not in leaf_fanouts: leaf_fanouts.append(peer_device) - return leaf_fanouts \ No newline at end of file + return leaf_fanouts diff --git a/tests/qos/qos_helpers.py b/tests/qos/qos_helpers.py index 6a2e261a4c..5e51ca978a 100644 --- a/tests/qos/qos_helpers.py +++ b/tests/qos/qos_helpers.py @@ -1,9 +1,9 @@ -from ansible_host import AnsibleHost from netaddr import IPAddress, IPNetwork -from qos_fixtures import lossless_prio_dscp_map, conn_graph_facts, leaf_fanouts +from qos_fixtures import lossless_prio_dscp_map, leaf_fanouts import json import re import ipaddress +import random PFC_GEN_FILE = 'pfc_gen.py' PFC_GEN_LOCAL_PATH = '../../ansible/roles/test/files/helpers/pfc_gen.py' @@ -18,8 +18,8 @@ def natural_keys(text): def ansible_stdout_to_str(ansible_stdout): """ @Summary: The stdout of Ansible host is essentially a list of unicode characters. This function converts it to a string. - @param ansible_stdout: stdout of Ansible - @return: Return a string + @param ansible_stdout: stdout of Ansible + @return: Return a string """ result = "" for x in ansible_stdout: @@ -40,9 +40,9 @@ def get_phy_intfs(host_ans): @param host_ans: Ansible host instance of this DUT @return: Return the list of active interfaces """ - intf_facts = host_ans.interface_facts()['ansible_facts']['ansible_interface_facts'] + intf_facts = host_ans.interface_facts()['ansible_facts']['ansible_interface_facts'] phy_intfs = [k for k in intf_facts.keys() if k.startswith('Ethernet')] - return phy_intfs + return phy_intfs def get_active_intfs(host_ans): """ @@ -52,7 +52,7 @@ def get_active_intfs(host_ans): """ int_status = host_ans.show_interface(command="status")['ansible_facts']['int_status'] active_intfs = [] - + for intf in int_status: if int_status[intf]['admin_state'] == 'up' and \ int_status[intf]['oper_state'] == 'up': @@ -61,21 +61,21 @@ def get_active_intfs(host_ans): return active_intfs def get_addrs_in_subnet(subnet, n): - """ + """ @Summary: Get N IP addresses in a subnet @param subnet: IPv4 subnet, e.g., '192.168.1.1/24' - @param n: # of IP addresses to get + @param n: # of IP addresses to get @return: Retuen n IPv4 addresses in this subnet in a list """ ip_addr = subnet.split('/')[0] ip_addrs = [str(x) for x in list(IPNetwork(subnet))] ip_addrs.remove(ip_addr) - + """ Try to avoid network and broadcast addresses """ if len(ip_addrs) >= n + 2: del ip_addrs[0] del ip_addrs[-1] - + return ip_addrs[:n] def start_pause(host_ans, pkt_gen_path, intf, pkt_count, pause_duration, pause_priority): @@ -85,19 +85,19 @@ def start_pause(host_ans, pkt_gen_path, intf, pkt_count, pause_duration, pause_p @param pkt_gen_path: path of packet generator @param intf: interface to send packets @param pkt_count: # of pause frames to send - @pause_duration: pause time duration - @pause_priority: priority to pause (None means global pause) + @pause_duration: pause time duration + @pause_priority: priority to pause (None means global pause) """ """ global pause """ if pause_priority is None: cmd = "nohup sudo python %s -i %s -g -t %d -n %d /dev/null 2>&1 &" % (pkt_gen_path, intf, pause_duration, pkt_count) else: - cmd = "nohup sudo python %s -i %s -p %d -t %d -n %d /dev/null 2>&1 &" % (pkt_gen_path, intf, 2**pause_priority, pause_duration, pkt_count) + cmd = "nohup sudo python %s -i %s -p %d -t %d -n %d /dev/null 2>&1 &" % (pkt_gen_path, intf, 2**pause_priority, pause_duration, pkt_count) + + print cmd + host_ans.host.shell(cmd) - print cmd - host_ans.shell(cmd) - def stop_pause(host_ans, pkt_gen_path): """ @Summary: Stop priority-based/global flow control pause storm on a leaf fanout switch @@ -105,117 +105,106 @@ def stop_pause(host_ans, pkt_gen_path): @param pkt_gen_path: path of packet generator """ cmd = "sudo kill -9 $(pgrep -f %s) /dev/null 2>&1 &" % (pkt_gen_path) - host_ans.shell(cmd) - -def get_active_vlan_members(host_ans, hostname): + host_ans.host.shell(cmd) + +def get_active_vlan_members(host_ans): """ @Summary: Get all the active physical interfaces enslaved to a Vlan @param host_ans: Ansible host instance of the device - @param hostname: host name of the device @return: Return the list of active physical interfaces """ - mg_facts = host_ans.minigraph_facts(host=hostname)['ansible_facts'] + mg_facts = host_ans.minigraph_facts(host=host_ans.hostname)['ansible_facts'] mg_vlans = mg_facts['minigraph_vlans'] - + if len(mg_vlans) != 1: print 'There should be only one Vlan at the DUT' return None - + """ Get all the Vlan memebrs """ vlan_intf = mg_vlans.keys()[0] vlan_members = mg_vlans[vlan_intf]['members'] - + """ Filter inactive Vlan members """ active_intfs = get_active_intfs(host_ans) vlan_members = [x for x in vlan_members if x in active_intfs] - - return vlan_members -def get_vlan_subnet(host_ans, hostname): + return vlan_members + +def get_vlan_subnet(host_ans): """ @Summary: Get Vlan subnet of a T0 device @param host_ans: Ansible host instance of the device - @param hostname: host name of the device @return: Return Vlan subnet, e.g., "192.168.1.1/24" """ - mg_facts = host_ans.minigraph_facts(host=hostname)['ansible_facts'] + mg_facts = host_ans.minigraph_facts(host=host_ans.hostname)['ansible_facts'] mg_vlans = mg_facts['minigraph_vlans'] - + if len(mg_vlans) != 1: print 'There should be only one Vlan at the DUT' return None - - mg_vlan_intfs = mg_facts['minigraph_vlan_interfaces'] + + mg_vlan_intfs = mg_facts['minigraph_vlan_interfaces'] vlan_subnet = ansible_stdout_to_str(mg_vlan_intfs[0]['subnet']) return vlan_subnet - -def gen_testbed_t0(ansible_adhoc, testbed): + +def gen_testbed_t0(duthost): """ @Summary: Generate a T0 testbed configuration - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Testbed information - @return: Return four values: DUT interfaces, PTF interfaces, PTF IP addresses, and PTF MAC addresses, - """ - dut_hostname = testbed['dut'] - dut_ans = AnsibleHost(ansible_adhoc, dut_hostname) - + @param duthost: The object for interacting with DUT through ansible + @return: Return four values: DUT interfaces, PTF interfaces, PTF IP addresses, and PTF MAC addresses, + """ + """ Get all the active physical interfaces enslaved to the Vlan """ """ These interfaces are actually server-faced interfaces at T0 """ - vlan_members = get_active_vlan_members(dut_ans, dut_hostname) - + vlan_members = get_active_vlan_members(duthost) + """ Get Vlan subnet """ - vlan_subnet = get_vlan_subnet(dut_ans, dut_hostname) - + vlan_subnet = get_vlan_subnet(duthost) + """ Prefix length to network mask """ vlan_subnet_mask = ipaddress.ip_network(unicode(vlan_subnet, "utf-8")).netmask - + """ Generate IP addresses for servers in the Vlan """ vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(vlan_members)) - + """ Generate MAC addresses 00:00:00:00:00:XX for servers in the Vlan """ vlan_mac_addrs = [5 * '00:' + format(k, '02x') for k in random.sample(range(1, 256), len(vlan_members))] - + """ Find correspoinding interfaces on PTF """ - phy_intfs = get_phy_intfs(dut_ans) + phy_intfs = get_phy_intfs(duthost) phy_intfs.sort(key=natural_keys) vlan_members.sort(key=natural_keys) vlan_members_index = [phy_intfs.index(intf) for intf in vlan_members] ptf_intfs = ['eth' + str(i) for i in vlan_members_index] - + return vlan_members, ptf_intfs, vlan_ip_addrs, vlan_mac_addrs -def setup_testbed(ansible_adhoc, testbed, leaf_fanouts, ptf_local_path, ptf_remote_path): +def setup_testbed(fanouthosts, ptfhost, leaf_fanouts, ptf_local_path, ptf_remote_path): """ @Summary: Set up the testbed - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Testbed information @param leaf_fanouts: Leaf fanout switches @param ptf_local_path: local path of PTF script @param ptf_remote_dest: remote path of PTF script """ - + """ Copy the PFC generator to leaf fanout switches """ for peer_device in leaf_fanouts: - peerdev_ans = AnsibleHost(ansible_adhoc, peer_device) + peerdev_ans = fanouthosts[peer_device] cmd = "sudo kill -9 $(pgrep -f %s) /dev/null 2>&1 &" % (PFC_GEN_FILE) - peerdev_ans.shell(cmd) - peerdev_ans.copy(src=PFC_GEN_LOCAL_PATH, dest=PFC_GEN_REMOTE_PATH, force=True) - + peerdev_ans.host.shell(cmd) + peerdev_ans.host.copy(src=PFC_GEN_LOCAL_PATH, dest=PFC_GEN_REMOTE_PATH, force=True) + """ Stop PFC storm at the leaf fanout switches """ for peer_device in leaf_fanouts: - peerdev_ans = AnsibleHost(ansible_adhoc, peer_device) + peerdev_ans = fanouthosts[peer_device] stop_pause(peerdev_ans, PFC_GEN_FILE) - + """ Remove existing python scripts on PTF """ - ptf_hostname = testbed['ptf'] - ptf_ans = AnsibleHost(ansible_adhoc, ptf_hostname) - result = ptf_ans.find(paths=['~/'], patterns="*.py")['files'] + result = ptfhost.find(paths=['~/'], patterns="*.py")['files'] files = [ansible_stdout_to_str(x['path']) for x in result] - + for file in files: - ptf_ans.file(path=file, mode="absent") + ptfhost.file(path=file, mode="absent") - """ Copy the PFC test script to the PTF container """ - ptf_ans.copy(src=ptf_local_path, dest=ptf_remote_path, force=True) \ No newline at end of file + """ Copy the PFC test script to the PTF container """ + ptfhost.copy(src=ptf_local_path, dest=ptf_remote_path, force=True) \ No newline at end of file diff --git a/tests/qos/test_pfc_counters.py b/tests/qos/test_pfc_counters.py index f0ac66eccf..4b9552bb68 100644 --- a/tests/qos/test_pfc_counters.py +++ b/tests/qos/test_pfc_counters.py @@ -1,5 +1,5 @@ -from ansible_host import AnsibleHost -from qos_fixtures import conn_graph_facts, leaf_fanouts +from common.fixtures.conn_graph_facts import conn_graph_facts +from qos_fixtures import leaf_fanouts from qos_helpers import eos_to_linux_intf import os import time @@ -10,7 +10,7 @@ The PFC Rx counter should NOT be updated when the switch receives a global flow control pause/unpause frame. In each test case, we send a specific number of pause/unpause frames to a given priority queue of a given port at the -device under test (DUT). Then we check the SONiC PFC Rx counters. +device under test (DUT). Then we check the SONiC PFC Rx counters. """ PFC_GEN_FILE_RELATIVE_PATH = r'../../ansible/roles/test/files/helpers/pfc_gen.py' @@ -18,91 +18,83 @@ PFC_GEN_FILE_DEST = r'~/pfc_gen.py' """ Number of generated packets for each test case """ PKT_COUNT = 10 -""" Number of switch priorities """ +""" Number of switch priorities """ PRIO_COUNT = 8 -def setup_testbed(ansible_adhoc, testbed, leaf_fanouts): +def setup_testbed(fanouthosts, duthost, leaf_fanouts): """ - @Summary: Set up the testbed, including clearing counters, and copying the PFC generator to the leaf fanout switches. - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Testbed information + @Summary: Set up the duthost, including clearing counters, and copying the PFC generator to the leaf fanout switches. + @param duthost: dut host information @param leaf_fanouts: Leaf fanout switches """ - dut_hostname = testbed['dut'] - dut_ans = AnsibleHost(ansible_adhoc, dut_hostname) """ Clear PFC counters """ - dut_ans.sonic_pfc_counters(method = "clear") + duthost.sonic_pfc_counters(method = "clear") """ Copy the PFC generator to all the leaf fanout switches """ for peer_device in leaf_fanouts: - peerdev_ans = AnsibleHost(ansible_adhoc, peer_device) + peerdev_ans = fanouthosts[peer_device] file_src = os.path.join(os.path.dirname(__file__), PFC_GEN_FILE_RELATIVE_PATH) - peerdev_ans.copy(src = file_src, dest = PFC_GEN_FILE_DEST, force = True) + peerdev_ans.host.copy(src = file_src, dest = PFC_GEN_FILE_DEST, force = True) -def run_test(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts, is_pfc=True, pause_time=65535): +def run_test(fanouthosts, duthost, conn_graph_facts, leaf_fanouts, is_pfc=True, pause_time=65535): """ @Summary: Run test for Ethernet flow control (FC) or priority-based flow control (PFC) - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Testbed information + @param duthost: The object for interacting with DUT through ansible @param conn_graph_facts: Testbed topology connectivity information @param leaf_fanouts: Leaf fanout switches @param is_pfc: If this test is for PFC? @param pause_time: Pause time quanta (0-65535) in the frame. 0 means unpause. """ - setup_testbed(ansible_adhoc, testbed, leaf_fanouts) + setup_testbed(fanouthosts, duthost, leaf_fanouts) conn_facts = conn_graph_facts['device_conn'] - - dut_hostname = testbed['dut'] - dut_ans = AnsibleHost(ansible_adhoc, dut_hostname) - int_status = dut_ans.show_interface(command = "status")['ansible_facts']['int_status'] - + + int_status = duthost.show_interface(command = "status")['ansible_facts']['int_status'] + """ We only test active physical interfaces """ active_phy_intfs = [intf for intf in int_status if \ intf.startswith('Ethernet') and \ int_status[intf]['admin_state'] == 'up' and \ int_status[intf]['oper_state'] == 'up'] - + """ Generate PFC or FC packets for active physical interfaces """ - for intf in active_phy_intfs: + for intf in active_phy_intfs: peer_device = conn_facts[intf]['peerdevice'] peer_port = conn_facts[intf]['peerport'] peer_port_name = eos_to_linux_intf(peer_port) - peerdev_ans = AnsibleHost(ansible_adhoc, peer_device) + peerdev_ans = fanouthosts[peer_device] if is_pfc: for priority in range(PRIO_COUNT): cmd = "sudo python %s -i %s -p %d -t %d -n %d" % (PFC_GEN_FILE_DEST, peer_port_name, 2 ** priority, pause_time, PKT_COUNT) - peerdev_ans.command(cmd) + peerdev_ans.host.command(cmd) else: cmd = "sudo python %s -i %s -g -t %d -n %d" % (PFC_GEN_FILE_DEST, peer_port_name, pause_time, PKT_COUNT) - peerdev_ans.command(cmd) - + peerdev_ans.host.command(cmd) + """ SONiC takes some time to update counters in database """ time.sleep(5) """ Check results """ - counter_facts = dut_ans.sonic_pfc_counters(method = "get")['ansible_facts'] + counter_facts = duthost.sonic_pfc_counters(method = "get")['ansible_facts'] - for intf in active_phy_intfs: + for intf in active_phy_intfs: if is_pfc: assert counter_facts[intf]['Rx'] == [str(PKT_COUNT)] * PRIO_COUNT else: assert counter_facts[intf]['Rx'] == ['0'] * PRIO_COUNT -def test_pfc_pause(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts): +def test_pfc_pause(fanouthosts, duthost, conn_graph_facts, leaf_fanouts): """ @Summary: Run PFC pause frame (pause time quanta > 0) tests """ - run_test(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts) + run_test(fanouthosts, duthost, conn_graph_facts, leaf_fanouts) -def test_pfc_unpause(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts): +def test_pfc_unpause(fanouthosts, duthost, conn_graph_facts, leaf_fanouts): """ @Summary: Run PFC unpause frame (pause time quanta = 0) tests """ - run_test(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts, pause_time=0) + run_test(fanouthosts, duthost, conn_graph_facts, leaf_fanouts, pause_time=0) -def test_fc_pause(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts): +def test_fc_pause(fanouthosts, duthost, conn_graph_facts, leaf_fanouts): """ @Summary: Run FC pause frame (pause time quanta > 0) tests """ - run_test(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts, is_pfc=False) + run_test(fanouthosts, duthost, conn_graph_facts, leaf_fanouts, is_pfc=False) -def test_fc_unpause(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts): - """ @Summary: Run FC pause frame (pause time quanta = 0) tests """ - run_test(ansible_adhoc, testbed, conn_graph_facts, leaf_fanouts, is_pfc=False, pause_time=0) \ No newline at end of file +def test_fc_unpause(fanouthosts, duthost, conn_graph_facts, leaf_fanouts): + """ @Summary: Run FC pause frame (pause time quanta = 0) tests """ + run_test(fanouthosts, duthost, conn_graph_facts, leaf_fanouts, is_pfc=False, pause_time=0) diff --git a/tests/qos/test_pfc_pause.py b/tests/qos/test_pfc_pause.py index 0081bd58e3..051ec6a23b 100644 --- a/tests/qos/test_pfc_pause.py +++ b/tests/qos/test_pfc_pause.py @@ -1,12 +1,12 @@ -from ansible_host import AnsibleHost import pytest import os import time import re import struct import random -from qos_fixtures import lossless_prio_dscp_map, conn_graph_facts, leaf_fanouts +from common.fixtures.conn_graph_facts import conn_graph_facts +from qos_fixtures import lossless_prio_dscp_map, leaf_fanouts from qos_helpers import ansible_stdout_to_str, eos_to_linux_intf, start_pause, stop_pause, setup_testbed, gen_testbed_t0, PFC_GEN_FILE, PFC_GEN_REMOTE_PATH PFC_PKT_COUNT = 1000000000 @@ -19,25 +19,23 @@ """ Maximum number of interfaces to test on a DUT """ MAX_TEST_INTFS_COUNT = 4 - -def run_test_t0(ansible_adhoc, - testbed, - conn_graph_facts, - leaf_fanouts, - dscp, - dscp_bg, - queue_paused, - send_pause, - pfc_pause, - pause_prio, - pause_time=65535, + +def run_test_t0(fanouthosts, + duthost, + ptfhost, + conn_graph_facts, + leaf_fanouts, + dscp, + dscp_bg, + queue_paused, + send_pause, + pfc_pause, + pause_prio, + pause_time=65535, max_test_intfs_count=128): - """ + """ @Summary: Run a series of tests on a T0 topology. - For the T0 topology, we only test Vlan (server-faced) interfaces. - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. - @param testbed: Testbed information + For the T0 topology, we only test Vlan (server-faced) interfaces. @param conn_graph_facts: Testbed topology @param leaf_fanouts: Leaf fanout switches @param dscp: DSCP value of test data packets @@ -47,67 +45,62 @@ def run_test_t0(ansible_adhoc, @param pfc_pause: send PFC pause frames or not @param pause_prio: priority of PFC franme @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default. - @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces. - return: Return # of iterations and # of passed iterations for each tested interface. + @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces. + return: Return # of iterations and # of passed iterations for each tested interface. """ - dut_hostname = testbed['dut'] - dut_ans = AnsibleHost(ansible_adhoc, dut_hostname) - - ptf_hostname = testbed['ptf'] - ptf_ans = AnsibleHost(ansible_adhoc, ptf_hostname) - + """ Clear DUT's PFC counters """ - dut_ans.sonic_pfc_counters(method="clear") - + duthost.sonic_pfc_counters(method="clear") + """ Disable DUT's PFC wd """ - dut_ans.shell('sudo pfcwd stop') - + duthost.shell('sudo pfcwd stop') + """ Generate a T0 testbed configuration """ - dut_intfs, ptf_intfs, ptf_ip_addrs, ptf_mac_addrs = gen_testbed_t0(ansible_adhoc, testbed) + dut_intfs, ptf_intfs, ptf_ip_addrs, ptf_mac_addrs = gen_testbed_t0(duthost) results = dict() for i in range(min(max_test_intfs_count, len(ptf_intfs))): src_index = i dst_index = (i + 1) % len(ptf_intfs) - + src_intf = ptf_intfs[src_index] dst_intf = ptf_intfs[dst_index] - + src_ip = ptf_ip_addrs[src_index] dst_ip = ptf_ip_addrs[dst_index] - + src_mac = ptf_mac_addrs[src_index] dst_mac = ptf_mac_addrs[dst_index] - + """ DUT interface to pause """ dut_intf_paused = dut_intfs[dst_index] - + """ Clear MAC table in DUT """ - dut_ans.shell('sonic-clear fdb all') + duthost.shell('sonic-clear fdb all') time.sleep(2) - - if send_pause: + + if send_pause: peer_device = conn_graph_facts['device_conn'][dut_intf_paused]['peerdevice'] peer_port = conn_graph_facts['device_conn'][dut_intf_paused]['peerport'] peer_port_name = eos_to_linux_intf(peer_port) - peerdev_ans = AnsibleHost(ansible_adhoc, peer_device) + peerdev_ans = fanouthosts[peer_device] if not pfc_pause: - pause_prio = None - + pause_prio = None + start_pause(host_ans=peerdev_ans, - pkt_gen_path=PFC_GEN_REMOTE_PATH, + pkt_gen_path=PFC_GEN_REMOTE_PATH, intf=peer_port_name, pkt_count=PFC_PKT_COUNT, pause_duration=pause_time, pause_priority=pause_prio) - + """ Wait for PFC pause frame generation """ time.sleep(1) - + """ Run PTF test """ intf_info = '--interface %d@%s --interface %d@%s' % (src_index, src_intf, dst_index, dst_intf) - + test_params = ("mac_src=\'%s\';" % src_mac + "mac_dst=\'%s\';" % dst_mac + "ip_src=\'%s\';" % src_ip @@ -120,14 +113,14 @@ def run_test_t0(ansible_adhoc, + "port_dst=%d;" % dst_index + "queue_paused=%s;" % queue_paused + "dut_has_mac=False") - + cmd = 'ptf --test-dir %s %s --test-params="%s"' % (os.path.dirname(PTF_FILE_REMOTE_PATH), intf_info, test_params) - print cmd - stdout = ansible_stdout_to_str(ptf_ans.shell(cmd)['stdout']) + print cmd + stdout = ansible_stdout_to_str(ptfhost.shell(cmd)['stdout']) words = stdout.split() - - """ - Expected format: "Passes: a / b" + + """ + Expected format: "Passes: a / b" where a is # of passed iterations and b is total # of iterations """ if len(words) != 4: @@ -135,33 +128,33 @@ def run_test_t0(ansible_adhoc, results[dut_intf_paused] = [0, 0] else: - results[dut_intf_paused] = [int(words[1]), int(words[3])] + results[dut_intf_paused] = [int(words[1]), int(words[3])] time.sleep(1) if send_pause: """ Stop PFC / FC storm """ stop_pause(peerdev_ans, PFC_GEN_FILE) time.sleep(1) - + return results -def run_test(ansible_adhoc, - testbed, - conn_graph_facts, - leaf_fanouts, - dscp, - dscp_bg, - queue_paused, - send_pause, - pfc_pause, - pause_prio, - pause_time=65535, +def run_test(fanouthosts, + duthost, + ptfhost, + testbed, + conn_graph_facts, + leaf_fanouts, + dscp, + dscp_bg, + queue_paused, + send_pause, + pfc_pause, + pause_prio, + pause_time=65535, max_test_intfs_count=128): - """ + """ @Summary: Run a series of tests (only support T0 topology) - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. @param testbed: Testbed information @param conn_graph_facts: Testbed topology @param leaf_fanouts: Leaf fanout switches @@ -172,66 +165,69 @@ def run_test(ansible_adhoc, @param pfc_pause: send PFC pause frames or not @param pause_prio: priority of PFC franme @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default. - @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces. - return: Return # of iterations and # of passed iterations for each tested interface. + @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces. + return: Return # of iterations and # of passed iterations for each tested interface. """ - - print testbed + + print testbed if testbed['topo']['name'].startswith('t0'): - return run_test_t0(ansible_adhoc=ansible_adhoc, - testbed=testbed, - conn_graph_facts=conn_graph_facts, leaf_fanouts=leaf_fanouts, - dscp=dscp, - dscp_bg=dscp_bg, - queue_paused=queue_paused, + return run_test_t0(fanouthosts=fanouthosts, + duthost=duthost, + ptfhost=ptfhost, + conn_graph_facts=conn_graph_facts, leaf_fanouts=leaf_fanouts, + dscp=dscp, + dscp_bg=dscp_bg, + queue_paused=queue_paused, send_pause=send_pause, pfc_pause=pfc_pause, pause_prio=pause_prio, - pause_time=pause_time, + pause_time=pause_time, max_test_intfs_count=max_test_intfs_count) - + else: - return None - -def test_pfc_pause_lossless(ansible_adhoc, - testbed, - conn_graph_facts, - leaf_fanouts, + return None + +def test_pfc_pause_lossless(fanouthosts, + duthost, + ptfhost, + testbed, + conn_graph_facts, + leaf_fanouts, lossless_prio_dscp_map): - - """ + + """ @Summary: Test if PFC pause frames can pause a lossless priority without affecting the other priorities - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. @param testbed: Testbed information @param conn_graph_facts: Testbed topology @param lossless_prio_dscp_map: lossless priorities and their DSCP values - """ - setup_testbed(ansible_adhoc=ansible_adhoc, - testbed=testbed, - leaf_fanouts=leaf_fanouts, - ptf_local_path=PTF_FILE_LOCAL_PATH, + """ + setup_testbed(fanouthosts=fanouthosts, + ptfhost=ptfhost, + leaf_fanouts=leaf_fanouts, + ptf_local_path=PTF_FILE_LOCAL_PATH, ptf_remote_path=PTF_FILE_REMOTE_PATH) errors = [] - + """ DSCP vlaues for lossless priorities """ lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]] """ DSCP values for lossy priorities """ lossy_dscps = list(set(range(64)) - set(lossless_dscps)) - + for prio in lossless_prio_dscp_map: """ DSCP values of the other lossless priorities """ other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio])) """ We also need to test some DSCP values for lossy priorities """ other_dscps = other_lossless_dscps + lossy_dscps[0:2] - + for dscp in lossless_prio_dscp_map[prio]: for dscp_bg in other_dscps: - results = run_test(ansible_adhoc=ansible_adhoc, + results = run_test(fanouthosts=fanouthosts, + duthost=duthost, + ptfhost=ptfhost, testbed=testbed, conn_graph_facts=conn_graph_facts, - leaf_fanouts=leaf_fanouts, + leaf_fanouts=leaf_fanouts, dscp=dscp, dscp_bg=dscp_bg, queue_paused=True, @@ -243,65 +239,68 @@ def test_pfc_pause_lossless(ansible_adhoc, """ results should not be none """ if results is None: - assert 0 - + assert 0 + errors = dict() for intf in results: if len(results[intf]) != 2: continue - + pass_count = results[intf][0] total_count = results[intf][1] if total_count == 0: continue - + if pass_count < total_count * PTF_PASS_RATIO_THRESH: errors[intf] = results[intf] if len(errors) > 0: print "errors occured:\n{}".format("\n".join(errors)) - assert 0 + assert 0 -def test_no_pfc(ansible_adhoc, - testbed, - conn_graph_facts, - leaf_fanouts, +def test_no_pfc(fanouthosts, + duthost, + ptfhost, + testbed, + conn_graph_facts, + leaf_fanouts, lossless_prio_dscp_map): - - """ + + """ @Summary: Test if lossless and lossy priorities can forward packets in the absence of PFC pause frames - @param ansible_adhoc: Fixture provided by the pytest-ansible package. Source of the various device objects. It is - mandatory argument for the class constructors. + @param fanouthosts: Fixture for fanout hosts @param testbed: Testbed information @param conn_graph_facts: Testbed topology @param lossless_prio_dscp_map: lossless priorities and their DSCP values - """ - setup_testbed(ansible_adhoc=ansible_adhoc, - testbed=testbed, - leaf_fanouts=leaf_fanouts, - ptf_local_path=PTF_FILE_LOCAL_PATH, + """ + setup_testbed(fanouthosts=fanouthosts, + ptfhost=ptfhost, + leaf_fanouts=leaf_fanouts, + ptf_local_path=PTF_FILE_LOCAL_PATH, ptf_remote_path=PTF_FILE_REMOTE_PATH) errors = [] - + """ DSCP vlaues for lossless priorities """ lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]] """ DSCP values for lossy priorities """ lossy_dscps = list(set(range(64)) - set(lossless_dscps)) - + for prio in lossless_prio_dscp_map: """ DSCP values of the other lossless priorities """ other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio])) """ We also need to test some DSCP values for lossy priorities """ other_dscps = other_lossless_dscps + lossy_dscps[0:2] - + for dscp in lossless_prio_dscp_map[prio]: for dscp_bg in other_dscps: - results = run_test(ansible_adhoc=ansible_adhoc, + results = run_test(fanouthosts=fanouthosts, + duthost=duthost, + ptfhost=ptfhost, testbed=testbed, conn_graph_facts=conn_graph_facts, - leaf_fanouts=leaf_fanouts, + leaf_fanouts=leaf_fanouts, dscp=dscp, dscp_bg=dscp_bg, queue_paused=False, @@ -313,22 +312,22 @@ def test_no_pfc(ansible_adhoc, """ results should not be none """ if results is None: - assert 0 - + assert 0 + errors = dict() for intf in results: if len(results[intf]) != 2: continue - + pass_count = results[intf][0] total_count = results[intf][1] if total_count == 0: continue - + if pass_count < total_count * PTF_PASS_RATIO_THRESH: errors[intf] = results[intf] if len(errors) > 0: print "errors occured:\n{}".format("\n".join(errors)) - assert 0 + assert 0 diff --git a/tests/route/test_default_route.py b/tests/route/test_default_route.py new file mode 100644 index 0000000000..ebc18f695d --- /dev/null +++ b/tests/route/test_default_route.py @@ -0,0 +1,51 @@ +import ipaddress +import logging +from common.helpers.assertions import pytest_assert + +logger = logging.getLogger(__name__) + +def test_default_route_set_src(duthost): + """ + check if ipv4 and ipv6 default src address match Loopback0 address + + """ + + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + + lo_ipv4 = None + lo_ipv6 = None + los = config_facts.get("LOOPBACK_INTERFACE", {}) + logger.info("Loopback IPs: {}".format(los)) + for k, v in los.items(): + if k == "Loopback0": + for ipstr in v.keys(): + ip = ipaddress.ip_interface(ipstr) + if ip.version == 4: + lo_ipv4 = ip + elif ip.version == 6: + lo_ipv6 = ip + + pytest_assert(lo_ipv4, "cannot find ipv4 Loopback0 address") + pytest_assert(lo_ipv6, "cannot find ipv6 Loopback0 address") + + rtinfo = duthost.get_ip_route_info(ipaddress.ip_address(u"0.0.0.0")) + pytest_assert(rtinfo['set_src'], "default route do not have set src. {}".format(rtinfo)) + pytest_assert(rtinfo['set_src'] == lo_ipv4.ip, \ + "default route set src to wrong IP {} != {}".format(rtinfo['set_src'], lo_ipv4.ip)) + + rtinfo = duthost.get_ip_route_info(ipaddress.ip_address(u"::")) + pytest_assert(rtinfo['set_src'], "default v6 route do not have set src. {}".format(rtinfo)) + pytest_assert(rtinfo['set_src'] == lo_ipv6.ip, \ + "default v6 route set src to wrong IP {} != {}".format(rtinfo['set_src'], lo_ipv6.ip)) + +def test_default_ipv6_route_next_hop_global_address(duthost): + """ + check if ipv6 default route nexthop address uses global address + + """ + + rtinfo = duthost.get_ip_route_info(ipaddress.ip_address(u"::")) + pytest_assert(rtinfo['nexthops'] > 0, "cannot find ipv6 nexthop for default route") + for nh in rtinfo['nexthops']: + pytest_assert(not nh[0].is_link_local, \ + "use link local address {} for nexthop".format(nh[0])) diff --git a/tests/scripts/add_ip.sh b/tests/scripts/add_ip.sh new file mode 100755 index 0000000000..e2fc433ada --- /dev/null +++ b/tests/scripts/add_ip.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -e + +for i in `cat /proc/net/dev | grep eth | awk -F'eth|:' '{print $2}'`; do + last_el=$((1+i*2)) + ip address add 10.0.0.$last_el/31 dev eth$i +done diff --git a/tests/scripts/fast-reboot b/tests/scripts/fast-reboot new file mode 100755 index 0000000000..c4ea1805cb --- /dev/null +++ b/tests/scripts/fast-reboot @@ -0,0 +1,547 @@ +#!/bin/bash -e + +REBOOT_USER=$(logname) +REBOOT_TIME=$(date) +REBOOT_CAUSE_FILE="/host/reboot-cause/reboot-cause.txt" +WARM_DIR=/host/warmboot +REDIS_FILE=dump.rdb +REBOOT_SCRIPT_NAME=$(basename $0) +REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" +VERBOSE=no +FORCE=no +STRICT=no +REBOOT_METHOD="/sbin/kexec -e" +ASSISTANT_IP_LIST="" +ASSISTANT_SCRIPT="/usr/bin/neighbor_advertiser" + +# Require 100M available on the hard drive for warm reboot temp files, +# Size is in 1K blocks: +MIN_HD_SPACE_NEEDED=100000 + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_NOT_SUPPORTED=2 +EXIT_FILE_SYSTEM_FULL=3 +EXIT_NEXT_IMAGE_NOT_EXISTS=4 +EXIT_ORCHAGENT_SHUTDOWN=10 +EXIT_SYNCD_SHUTDOWN=11 +EXIT_FAST_REBOOT_DUMP_FAILURE=12 +EXIT_NO_CONTROL_PLANE_ASSISTANT=20 + +function error() +{ + echo $@ >&2 +} + +function debug() +{ + if [[ x"${VERBOSE}" == x"yes" ]]; then + echo `date` $@ + fi + logger "$@" +} + +function showHelpAndExit() +{ + echo "Usage: ${REBOOT_SCRIPT_NAME} [options]" + echo " -h,-? : get this help" + echo " -v : turn on verbose" + echo " -f : force execution" + echo " -r : reboot with /sbin/reboot" + echo " -k : reboot with /sbin/kexec -e [default]" + echo " -x : execute script with -x flag" + echo " -c : specify control plane assistant IP list" + echo " -s : strict mode: do not proceed without:" + echo " - control plane assistant IP list." + + exit "${EXIT_SUCCESS}" +} + +function parseOptions() +{ + while getopts "vfh?rkxc:s" opt; do + case ${opt} in + h|\? ) + showHelpAndExit + ;; + v ) + VERBOSE=yes + ;; + f ) + FORCE=yes + ;; + r ) + REBOOT_METHOD="/sbin/reboot" + ;; + k ) + REBOOT_METHOD="/sbin/kexec -e" + ;; + x ) + set -x + ;; + c ) + ASSISTANT_IP_LIST=${OPTARG} + ;; + s ) + STRICT=yes + ;; + esac + done +} + +function clear_fast_boot() +{ + debug "${REBOOT_TYPE} failure ($?) cleanup ..." + + /sbin/kexec -u || /bin/true + + teardown_control_plane_assistant +} + +function clear_warm_boot() +{ + clear_fast_boot + + result=`timeout 10s config warm_restart disable; if [[ $? == 124 ]]; then echo timeout; else echo "code ($?)"; fi` || /bin/true + debug "Cancel warm-reboot: ${result}" + + TIMESTAMP=`date +%Y%m%d-%H%M%S` + if [[ -f ${WARM_DIR}/${REDIS_FILE} ]]; then + mv -f ${WARM_DIR}/${REDIS_FILE} ${WARM_DIR}/${REDIS_FILE}.${TIMESTAMP} || /bin/true + fi +} + +function init_warm_reboot_states() +{ + # If the current running instanace was booted up with warm reboot. Then + # the current DB contents will likely mark warm reboot is done. + # Clear these states so that the next boot up image won't get confused. + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + redis-cli -n 6 eval " + for _, key in ipairs(redis.call('keys', 'WARM_RESTART_TABLE|*')) do + redis.call('hdel', key, 'state') + end + " 0 >/dev/null + fi +} + +function initialize_pre_shutdown() +{ + debug "Initialize pre-shutdown ..." + TABLE="WARM_RESTART_TABLE|warm-shutdown" + RESTORE_COUNT=`/usr/bin/redis-cli -n 6 hget "${TABLE}" restore_count` + if [[ -z "$RESTORE_COUNT" ]]; then + /usr/bin/redis-cli -n 6 hset "${TABLE}" "restore_count" "0" > /dev/null + fi + /usr/bin/redis-cli -n 6 hset "${TABLE}" "state" "requesting" > /dev/null +} + +function request_pre_shutdown() +{ + debug "Requesting pre-shutdown ..." + /usr/bin/docker exec -i syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null || { + error "Failed to request pre-shutdown" + } +} + +function wait_for_pre_shutdown_complete_or_fail() +{ + debug "Waiting for pre-shutdown ..." + TABLE="WARM_RESTART_TABLE|warm-shutdown" + STATE="requesting" + declare -i waitcount + declare -i retrycount + waitcount=0 + retrycount=0 + # Wait up to 60 seconds for pre-shutdown to complete + while [[ ${waitcount} -lt 600 ]]; do + # timeout doesn't work with -i option of "docker exec". Therefore we have + # to invoke docker exec directly below. + STATE=`timeout 5s docker exec database redis-cli -n 6 hget "${TABLE}" state; if [[ $? == 124 ]]; then echo "timed out"; fi` + + if [[ x"${STATE}" == x"timed out" ]]; then + waitcount+=50 + retrycount+=1 + debug "Timed out getting pre-shutdown state (${waitcount}) retry count ${retrycount} ..." + if [[ retrycount -gt 2 ]]; then + break + fi + elif [[ x"${STATE}" != x"requesting" ]]; then + break + else + sleep 0.1 + waitcount+=1 + fi + done + + if [[ x"${STATE}" != x"pre-shutdown-succeeded" ]]; then + debug "Syncd pre-shutdown failed: ${STATE} ..." + else + debug "Pre-shutdown succeeded ..." + fi +} + +function backup_database() +{ + debug "Backing up database ..." + # Dump redis content to a file 'dump.rdb' in warmboot directory + mkdir -p $WARM_DIR + # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|* + redis-cli -n 6 eval " + for _, k in ipairs(redis.call('keys', '*')) do + if not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ + and not string.match(k, 'MIRROR_SESSION_TABLE|') \ + and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') then + redis.call('del', k) + end + end + " 0 > /dev/null + redis-cli save > /dev/null + docker cp database:/var/lib/redis/$REDIS_FILE $WARM_DIR + docker exec -i database rm /var/lib/redis/$REDIS_FILE +} + +function setup_control_plane_assistant() +{ + if [[ -n "${ASSISTANT_IP_LIST}" && -x ${ASSISTANT_SCRIPT} ]]; then + debug "Setting up control plane assistant: ${ASSISTANT_IP_LIST} ..." + ${ASSISTANT_SCRIPT} -s ${ASSISTANT_IP_LIST} -m set + elif [[ X"${STRICT}" == X"yes" ]]; then + debug "Strict mode: fail due to lack of control plane assistant ..." + exit ${EXIT_NO_CONTROL_PLANE_ASSISTANT} + fi +} + +function teardown_control_plane_assistant() +{ + if [[ -n "${ASSISTANT_IP_LIST}" && -x ${ASSISTANT_SCRIPT} ]]; then + debug "Tearing down control plane assistant: ${ASSISTANT_IP_LIST} ..." + ${ASSISTANT_SCRIPT} -s ${ASSISTANT_IP_LIST} -m reset + fi +} + +function setup_reboot_variables() +{ + # Kernel and initrd image + CURRENT_SONIC_IMAGE=$(sonic_installer list | grep "Current: " | cut -d ' ' -f 2) + NEXT_SONIC_IMAGE=$(sonic_installer list | grep "Next: " | cut -d ' ' -f 2) + IMAGE_PATH="/host/image-${NEXT_SONIC_IMAGE#SONiC-OS-}" + if grep -q aboot_platform= /host/machine.conf; then + KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" + BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + elif grep -q onie_platform= /host/machine.conf; then + KERNEL_OPTIONS=$(cat /host/grub/grub.cfg | sed "/$NEXT_SONIC_IMAGE'/,/}/"'!'"g" | grep linux) + KERNEL_IMAGE="/host$(echo $KERNEL_OPTIONS | cut -d ' ' -f 2)" + BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + else + error "Unknown bootloader. ${REBOOT_TYPE} is not supported." + exit "${EXIT_NOT_SUPPORTED}" + fi + INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') +} + +function reboot_pre_check() +{ + # Make sure that the file system is normal: read-write able + filename="/host/test-`date +%Y%m%d-%H%M%S`" + if [[ ! -f ${filename} ]]; then + touch ${filename} + fi + rm ${filename} + + # Make sure /host has enough space for warm reboot temp files + avail=$(df -k /host | tail -1 | awk '{ print $4 }') + if [[ ${avail} -lt ${MIN_HD_SPACE_NEEDED} ]]; then + debug "/host has ${avail}K bytes available, not enough for warm reboot." + exit ${EXIT_FILE_SYSTEM_FULL} + fi + + # Make sure that the next image exists + if [[ ! -d ${IMAGE_PATH} ]]; then + debug "Next image ${NEXT_SONIC_IMAGE} doesn't exist ..." + exit ${EXIT_NEXT_IMAGE_NOT_EXISTS} + fi + + # Make sure ASIC configuration has not changed between images + ASIC_CONFIG_CHECK_SCRIPT="/usr/bin/asic_config_check" + ASIC_CONFIG_CHECK_SUCCESS=0 + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + ASIC_CONFIG_CHECK_EXIT_CODE=0 + ${ASIC_CONFIG_CHECK_SCRIPT} || ASIC_CONFIG_CHECK_EXIT_CODE=$? + + if [[ "${ASIC_CONFIG_CHECK_EXIT_CODE}" != "${ASIC_CONFIG_CHECK_SUCCESS}" ]]; then + if [[ x"${FORCE}" == x"yes" ]]; then + debug "Ignoring ASIC config checksum failure..." + else + error "ASIC config may have changed: errno=${ASIC_CONFIG_CHECK_EXIT_CODE}" + exit "${EXIT_FAILURE}" + fi + fi + fi +} + +function unload_kernel() +{ + # Unload the previously loaded kernel if any loaded + if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then + /sbin/kexec -u + fi +} + +# main starts here +parseOptions $@ + +# Check root privileges +if [[ "$EUID" -ne 0 ]] +then + echo "This command must be run as root" >&2 + exit "${EXIT_FAILURE}" +fi + +sonic_asic_type=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) + +# Check reboot type supported +BOOT_TYPE_ARG="cold" +case "$REBOOT_TYPE" in + "fast-reboot") + BOOT_TYPE_ARG=$REBOOT_TYPE + trap clear_fast_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM + ;; + "warm-reboot") + if [[ "$sonic_asic_type" == "mellanox" ]]; then + REBOOT_TYPE="fastfast-reboot" + BOOT_TYPE_ARG="fastfast" + # source mlnx-ffb.sh file with + # functions to check ISSU upgrade possibility + source mlnx-ffb.sh + else + BOOT_TYPE_ARG="warm" + fi + trap clear_warm_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM + config warm_restart enable system + ;; + *) + error "Not supported reboot type: $REBOOT_TYPE" + exit "${EXIT_NOT_SUPPORTED}" + ;; +esac + +# Stopping all SLB neighbors if they're presented +if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + debug "Stopping all SLB neighbors if they are presented" + PASSIVE_BGP_NEIGHBORS=$(sonic-cfggen -d -v "BGP_PEER_RANGE | list") + case "$PASSIVE_BGP_NEIGHBORS" in + *BGPSLBPassive*) + ASN=$(sonic-cfggen -d -v "DEVICE_METADATA['localhost']['bgp_asn']") + vtysh -c "configure terminal" -c "router bgp ${ASN}" -c "neighbor BGPSLBPassive shutdown" + sleep 30 # wait for 30 seconds - BGP RouteAdv default timer + ;; + *) + ;; + esac +fi + +unload_kernel + +setup_reboot_variables + +reboot_pre_check + +# Install new FW for mellanox platforms before control plane goes down +# So on boot switch will not spend time to upgrade FW increasing the CP downtime +if [[ "$sonic_asic_type" == "mellanox" ]]; then + MLNX_EXIT_SUCCESS=0 + MLNX_EXIT_FW_ERROR=100 + MLNX_EXIT_FFB_FAILURE=101 + + MLNX_FW_UPGRADE_SCRIPT="/usr/bin/mlnx-fw-upgrade.sh" + + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + check_ffb || { + error "Warm reboot is not supported" + exit "${MLNX_EXIT_FFB_FAILURE}" + } + fi + + debug "Prepare MLNX ASIC to ${REBOOT_TYPE}: install new FW if required" + + ${MLNX_FW_UPGRADE_SCRIPT} --upgrade + MLNX_EXIT_CODE="$?" + if [[ "${MLNX_EXIT_CODE}" != "${MLNX_EXIT_SUCCESS}" ]]; then + error "Failed to burn MLNX FW: errno=${MLNX_EXIT_CODE}" + exit "${MLNX_EXIT_FW_ERROR}" + fi +fi + +# Load kernel into the memory +/sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + +if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Dump the ARP and FDB tables to files also as default routes for both IPv4 and IPv6 + # into /host/fast-reboot + mkdir -p /host/fast-reboot + FAST_REBOOT_DUMP_RC=0 + /usr/bin/fast-reboot-dump.py -t /host/fast-reboot || FAST_REBOOT_DUMP_RC=$? + if [[ FAST_REBOOT_DUMP_RC -ne 0 ]]; then + error "Failed to run fast-reboot-dump.py. Exit code: $FAST_REBOOT_DUMP_RC" + unload_kernel + exit "${EXIT_FAST_REBOOT_DUMP_FAILURE}" + fi +fi + +init_warm_reboot_states + +setup_control_plane_assistant + +if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + # Freeze orchagent for warm restart + # Ask orchagent_restart_check to try freeze 5 times with interval of 2 seconds, + # it is possible that the orchagent is in transient state and no opportunity to be freezed + # Note: assume that 2*5 seconds is enough for orchagent to process the request and respone freeze or not + debug "Pausing orchagent ..." + docker exec -i swss /usr/bin/orchagent_restart_check -w 2000 -r 5 > /dev/null || RESTARTCHECK_RC=$? + if [[ RESTARTCHECK_RC -ne 0 ]]; then + error "RESTARTCHECK failed" + if [[ x"${FORCE}" == x"yes" ]]; then + debug "Ignoring orchagent pausing failure ..." + else + exit "${EXIT_ORCHAGENT_SHUTDOWN}" + fi + fi +fi + +# We are fully committed to reboot from this point on becasue critical +# service will go down and we cannot recover from it. +set +e + +# Kill radv before stopping BGP service to prevent annoucing our departure. +debug "Stopping radv ..." +docker kill radv &>/dev/null || [ $? == 1 ] +systemctl stop radv + +# Kill bgpd to start the bgp graceful restart procedure +debug "Stopping bgp ..." +docker exec -i bgp pkill -9 zebra +docker exec -i bgp pkill -9 bgpd || [ $? == 1 ] +debug "Stopped bgp ..." + +# Kill lldp, otherwise it sends informotion about reboot. +# We call `docker kill lldp` to ensure the container stops as quickly as possible, +# then immediately call `systemctl stop lldp` to prevent the service from +# restarting the container automatically. +docker kill lldp &> /dev/null || debug "Docker lldp is not running ($?) ..." +systemctl stop lldp + +if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + debug "Stopping teamd ..." + docker kill teamd &> /dev/null || debug "Docker teamd is not running ($?) ..." + systemctl stop teamd + debug "Stopped teamd ..." +fi + +# Kill swss Docker container +# We call `docker kill swss` to ensure the container stops as quickly as possible, +# then immediately call `systemctl stop swss` to prevent the service from +# restarting the container automatically. +docker kill swss &> /dev/null || debug "Docker swss is not running ($?) ..." + +# Pre-shutdown syncd +if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + initialize_pre_shutdown + + request_pre_shutdown + + wait_for_pre_shutdown_complete_or_fail + + # Warm reboot: dump state to host disk + if [[ "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + redis-cli -n 1 FLUSHDB > /dev/null + redis-cli -n 2 FLUSHDB > /dev/null + redis-cli -n 5 FLUSHDB > /dev/null + fi + + # TODO: backup_database preserves FDB_TABLE + # need to cleanup as well for fastfast boot case + backup_database +fi + +# Stop teamd gracefully +if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + debug "Stopping teamd ..." + # Send USR1 signal to all teamd instances to stop them + # It will prepare teamd for warm-reboot + # Note: We must send USR1 signal before syncd, because it will send the last packet through CPU port + docker exec -i teamd pkill -USR1 teamd > /dev/null || [ $? == 1 ] + debug "Stopped teamd ..." +fi + +debug "Stopping syncd ..." +if [[ ${CURRENT_SONIC_IMAGE} =~ "20180330" && "$sonic_asic_type" = 'broadcom' ]]; then + debug "Stopping syncd on ${CURRENT_SONIC_IMAGE} ..." + + # Gracefully stop syncd + docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null + + # Check that syncd was stopped + while docker top syncd | grep -q /usr/bin/syncd + do + sleep 0.1 + done +else + systemctl stop syncd || debug "Ignore stopping syncd service error $?" +fi +debug "Stopped syncd ..." + +# Kill other containers to make the reboot faster +# We call `docker kill ...` to ensure the container stops as quickly as possible, +# then immediately call `systemctl stop ...` to prevent the service from +# restarting the container automatically. +debug "Stopping all remaining containers ..." +for CONTAINER_NAME in $(docker ps --format '{{.Names}}'); do + CONTAINER_STOP_RC=0 + docker kill $CONTAINER_NAME &> /dev/null || CONTAINER_STOP_RC=$? + systemctl stop $CONTAINER_NAME || debug "Ignore stopping $CONTAINER_NAME error $?" + if [[ CONTAINER_STOP_RC -ne 0 ]]; then + debug "Failed killing container $CONTAINER_NAME RC $CONTAINER_STOP_RC ." + fi +done +debug "Stopped all remaining containers ..." + +# Stop the docker container engine. Otherwise we will have a broken docker storage +systemctl stop docker.service || debug "Ignore stopping docker service error $?" + +# Stop kernel modules for Nephos platform +if [[ "$sonic_asic_type" = 'nephos' ]]; +then + systemctl stop nps-modules-`uname -r`.service || debug "Ignore stopping nps service error $?" +fi + +# Update the reboot cause file to reflect that user issued this script +# Upon next boot, the contents of this file will be used to determine the +# cause of the previous reboot +echo "User issued '${REBOOT_SCRIPT_NAME}' command [User: ${REBOOT_USER}, Time: ${REBOOT_TIME}]" > ${REBOOT_CAUSE_FILE} + +# Wait until all buffers synced with disk +sync +sleep 1 +sync + +# sync the current system time to CMOS +if [ -x /sbin/hwclock ]; then + /sbin/hwclock -w || /bin/true +fi + +# Enable Watchdog Timer +if [[ -x /usr/bin/watchdog ]]; then + debug "Enabling Watchdog before ${REBOOT_TYPE}" + /usr/bin/watchdog -e +fi + +# Reboot: explicity call Linux native reboot under sbin +debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." +exec ${REBOOT_METHOD} + +# Should never reach here +error "${REBOOT_TYPE} failed!" +exit "${EXIT_FAILURE}" diff --git a/tests/scripts/remove_ip.sh b/tests/scripts/remove_ip.sh deleted file mode 120000 index a03b98f404..0000000000 --- a/tests/scripts/remove_ip.sh +++ /dev/null @@ -1 +0,0 @@ -../../ansible/roles/test/files/helpers/remove_ip.sh \ No newline at end of file diff --git a/tests/scripts/remove_ip.sh b/tests/scripts/remove_ip.sh new file mode 100755 index 0000000000..abed39e4dc --- /dev/null +++ b/tests/scripts/remove_ip.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -euo pipefail + +INTF_LIST=$(ls /sys/class/net | grep -E "^eth[0-9]+$") + +for INTF in ${INTF_LIST}; do + echo "Flush ${INTF} IP address" + ip addr flush dev ${INTF} +done diff --git a/tests/test_sflow.py b/tests/sflow/test_sflow.py similarity index 98% rename from tests/test_sflow.py rename to tests/sflow/test_sflow.py index f42acfd44e..38a70921b5 100644 --- a/tests/test_sflow.py +++ b/tests/sflow/test_sflow.py @@ -55,12 +55,12 @@ def setup(duthost, ptfhost): yield # -------- Teardown ---------- config_reload(duthost, config_source='minigraph', wait=120) - + # ---------------------------------------------------------------------------------- def setup_ptf(ptfhost, collector_ports): root_dir = "/root" extra_vars = {'arp_responder_args' : '--conf /tmp/sflow_arpresponder.conf'} - ptfhost.host.options['variable_manager'].extra_vars = extra_vars + ptfhost.host.options['variable_manager'].extra_vars.update(extra_vars) ptfhost.template(src="../ansible/roles/test/templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf") ptfhost.copy(src="ptftests", dest=root_dir) ptfhost.copy(src="../ansible/roles/test/files/helpers/arp_responder.py", dest="/opt") @@ -390,8 +390,7 @@ def testAddAgent(self, sflowbase_config, duthost, partial_ptf_runner): class TestReboot(): - def testRebootSflowEnable(self, sflowbase_config, duthost, testbed_devices, localhost, partial_ptf_runner, ptfhost): - duthost = testbed_devices["dut"] + def testRebootSflowEnable(self, sflowbase_config, duthost, localhost, partial_ptf_runner, ptfhost): duthost.command("config sflow polling-interval 80") verify_show_sflow(duthost,status='up',polling_int=80) duthost.command('sudo config save -y') @@ -412,7 +411,7 @@ def testRebootSflowEnable(self, sflowbase_config, duthost, testbed_devices, loca active_collectors="['collector0','collector1']" ) - def testRebootSflowDisable(self, sflowbase_config, duthost, testbed_devices, localhost, partial_ptf_runner, ptfhost): + def testRebootSflowDisable(self, sflowbase_config, duthost, localhost, partial_ptf_runner, ptfhost): config_sflow(duthost,sflow_status='disable') verify_show_sflow(duthost,status='down') partial_ptf_runner( @@ -431,7 +430,7 @@ def testRebootSflowDisable(self, sflowbase_config, duthost, testbed_devices, loc active_collectors="[]" ) - def testFastreboot(self, sflowbase_config, duthost, testbed_devices, localhost, partial_ptf_runner, ptfhost): + def testFastreboot(self, sflowbase_config, duthost, localhost, partial_ptf_runner, ptfhost): config_sflow(duthost,sflow_status='enable') verify_show_sflow(duthost,status='up',collector=['collector0','collector1']) diff --git a/tests/show_techsupport/test_techsupport.py b/tests/show_techsupport/test_techsupport.py index ebcc3422f1..99f361a41a 100644 --- a/tests/show_techsupport/test_techsupport.py +++ b/tests/show_techsupport/test_techsupport.py @@ -136,9 +136,18 @@ def neighbor_ip(duthost, testbed): # ptf-32 topo is not supported in mirroring if testbed['topo']['name'] == 'ptf32': pytest.skip('Unsupported Topology') - mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] - dst_ip = mg_facts["minigraph_portchannel_interfaces"][0]['peer_addr'] + dst_ip = None + if mg_facts["minigraph_portchannel_interfaces"]: + dst_ip = mg_facts["minigraph_portchannel_interfaces"][0]['peer_addr'] + else: + peer_addr_list = [(item['peer_addr']) for item in mg_facts["minigraph_interfaces"] if 'peer_addr' in item] + if peer_addr_list: + dst_ip = peer_addr_list[0] + + if dst_ip is None: + pytest.skip("No neighbor ip available. Skipping test.") + yield str(dst_ip) @@ -173,7 +182,6 @@ def mirroring(duthost, neighbor_ip, mirror_setup, gre_version): :param mirror_setup: mirror_setup fixture :param mirror_config: mirror_config fixture """ - logger.info("Adding mirror_session to DUT") acl_rule_file = os.path.join(mirror_setup['dut_tmp_dir'], ACL_RULE_PERSISTENT_FILE) extra_vars = { @@ -257,8 +265,8 @@ def test_techsupport(request, config, duthost, testbed): for i in range(loop_range): logger.debug("Running show techsupport ... ") - wait_until(300, 20, execute_command, duthost, since) - tar_file = [i for i in pytest.tar_stdout.split('\n') if i != ''][-1] + wait_until(300, 20, execute_command, duthost, str(since)) + tar_file = [j for j in pytest.tar_stdout.split('\n') if j != ''][-1] stdout = duthost.command("rm -rf {}".format(tar_file)) logger.debug("Sleeping for {} seconds".format(loop_delay)) time.sleep(loop_delay) diff --git a/tests/snmp/conftest.py b/tests/snmp/conftest.py index 2296391f51..71fddb9f17 100644 --- a/tests/snmp/conftest.py +++ b/tests/snmp/conftest.py @@ -2,6 +2,5 @@ from common.utilities import wait_until @pytest.fixture(scope="module", autouse=True) -def setup_check_snmp_ready(testbed_devices): - dut = testbed_devices['dut'] - assert wait_until(300, 20, dut.is_service_fully_started, "snmp"), "SNMP service is not running" +def setup_check_snmp_ready(duthost): + assert wait_until(300, 20, duthost.is_service_fully_started, "snmp"), "SNMP service is not running" diff --git a/tests/snmp/test_snmp_cpu.py b/tests/snmp/test_snmp_cpu.py index cf5f7f0e17..6090b4dfed 100644 --- a/tests/snmp/test_snmp_cpu.py +++ b/tests/snmp/test_snmp_cpu.py @@ -1,9 +1,9 @@ import pytest import time -from ansible_host import AnsibleHost + @pytest.mark.bsl -def test_snmp_cpu(ansible_adhoc, testbed, creds): +def test_snmp_cpu(duthost, localhost, creds): """ Test SNMP CPU Utilization @@ -15,34 +15,31 @@ def test_snmp_cpu(ansible_adhoc, testbed, creds): TODO: abstract the snmp OID by SKU """ - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) - hostip = ans_host.host.options['inventory_manager'].get_host(hostname).vars['ansible_host'] - host_facts = ans_host.setup()['ansible_facts'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + host_facts = duthost.setup()['ansible_facts'] host_vcpus = int(host_facts['ansible_processor_vcpus']) # Gather facts with SNMP version 2 - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"], is_dell=True)['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"], is_dell=True)['ansible_facts'] assert int(snmp_facts['ansible_ChStackUnitCpuUtil5sec']) try: for i in range(host_vcpus): - ans_host.shell("nohup yes > /dev/null 2>&1 & sleep 1") + duthost.shell("nohup yes > /dev/null 2>&1 & sleep 1") # Wait for load to reflect in SNMP time.sleep(20) # Gather facts with SNMP version 2 - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"], is_dell=True)['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"], is_dell=True)['ansible_facts'] # Pull CPU utilization via shell - # Explanation: Run top command with 2 iterations, 5sec delay. + # Explanation: Run top command with 2 iterations, 5sec delay. # Discard the first iteration, then grap the CPU line from the second, # subtract 100% - idle, and round down to integer. - output = ans_host.shell("top -bn2 -d5 | awk '/^top -/ { p=!p } { if (!p) print }' | awk '/Cpu/ { cpu = 100 - $8 };END { print cpu }' | awk '{printf \"%.0f\",$1}'") + output = duthost.shell("top -bn2 -d5 | awk '/^top -/ { p=!p } { if (!p) print }' | awk '/Cpu/ { cpu = 100 - $8 };END { print cpu }' | awk '{printf \"%.0f\",$1}'") print int(snmp_facts['ansible_ChStackUnitCpuUtil5sec']) print int(output['stdout']) @@ -52,7 +49,7 @@ def test_snmp_cpu(ansible_adhoc, testbed, creds): if cpu_diff > 5: pytest.fail("cpu diff large than 5%%, %d, %d" % (int(snmp_facts['ansible_ChStackUnitCpuUtil5sec']), int(output['stdout']))) - ans_host.shell("killall yes") + duthost.shell("killall yes") except: - ans_host.shell("killall yes") + duthost.shell("killall yes") raise diff --git a/tests/snmp/test_snmp_interfaces.py b/tests/snmp/test_snmp_interfaces.py index a43fb8d9ec..4e9771dfde 100644 --- a/tests/snmp/test_snmp_interfaces.py +++ b/tests/snmp/test_snmp_interfaces.py @@ -1,15 +1,13 @@ import pytest -from ansible_host import AnsibleHost + @pytest.mark.bsl -def test_snmp_interfaces(ansible_adhoc, duthost, creds): +def test_snmp_interfaces(duthost, localhost, creds): """compare the bgp facts between observed states and target state""" - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) - hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] snmp_ifnames = [ v['name'] for k, v in snmp_facts['snmp_interfaces'].items() ] @@ -22,7 +20,7 @@ def test_snmp_interfaces(ansible_adhoc, duthost, creds): # Verify all port channels in snmp interface list for po_name in config_facts.get('PORTCHANNEL', {}): assert po_name in snmp_ifnames - + # Verify management port in snmp interface list for name in config_facts.get('MGMT_INTERFACE', {}): assert name in snmp_ifnames diff --git a/tests/snmp/test_snmp_lldp.py b/tests/snmp/test_snmp_lldp.py index 86fbd85cfc..fba5b691ed 100644 --- a/tests/snmp/test_snmp_lldp.py +++ b/tests/snmp/test_snmp_lldp.py @@ -1,5 +1,5 @@ import pytest -from ansible_host import AnsibleHost + @pytest.fixture(scope="module", autouse=True) def setup_check_topo(testbed): @@ -7,13 +7,13 @@ def setup_check_topo(testbed): pytest.skip('Unsupported topology') @pytest.mark.bsl -def test_snmp_lldp(ansible_adhoc, testbed, creds): +def test_snmp_lldp(duthost, localhost, creds): """ Test checks for ieee802_1ab MIBs: - lldpLocalSystemData 1.0.8802.1.1.2.1.3 - lldpLocPortTable 1.0.8802.1.1.2.1.3.7 - lldpLocManAddrTable 1.0.8802.1.1.2.1.3.8 - + - lldpRemTable 1.0.8802.1.1.2.1.4.1 - lldpRemManAddrTable 1.0.8802.1.1.2.1.4.2 @@ -22,13 +22,10 @@ def test_snmp_lldp(ansible_adhoc, testbed, creds): (similar to lldp test) """ - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) - hostip = ans_host.host.options['inventory_manager'].get_host(hostname).vars['ansible_host'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] print snmp_facts['snmp_lldp'] for k in ['lldpLocChassisIdSubtype', 'lldpLocChassisId', 'lldpLocSysName', 'lldpLocSysDesc']: @@ -37,7 +34,7 @@ def test_snmp_lldp(ansible_adhoc, testbed, creds): # Check if lldpLocPortTable is present for all ports for k, v in snmp_facts['snmp_interfaces'].items(): - if "Ethernet" in v['name'] or "eth" in v['name']: + if "Ethernet" in v['name'] or "eth" in v['name']: for oid in ['lldpLocPortIdSubtype', 'lldpLocPortId', 'lldpLocPortDesc']: assert v.has_key(oid) assert "No Such Object currently exists" not in v[oid] @@ -55,7 +52,7 @@ def test_snmp_lldp(ansible_adhoc, testbed, creds): if "server" not in v['name'].lower(): minigraph_lldp_nei.append(k) print minigraph_lldp_nei - + # Check if lldpRemTable is present active_intf = [] for k, v in snmp_facts['snmp_interfaces'].items(): @@ -71,10 +68,10 @@ def test_snmp_lldp(ansible_adhoc, testbed, creds): active_intf.append(k) print "lldpRemTable: ", active_intf - assert len(active_intf) >= len(minigraph_lldp_nei) * 0.8 + assert len(active_intf) >= len(minigraph_lldp_nei) * 0.8 # skip neighbors that do not send chassis information via lldp - lldp_facts = ans_host.lldp()['ansible_facts'] + lldp_facts = duthost.lldp()['ansible_facts'] nei = [k for k, v in lldp_facts['lldp'].items() if k != 'eth0' and v['chassis'].has_key('mgmt-ip') ] print "neighbors {} send chassis management IP information".format(nei) @@ -83,7 +80,8 @@ def test_snmp_lldp(ansible_adhoc, testbed, creds): for k, v in snmp_facts['snmp_interfaces'].items(): if v.has_key("lldpRemManAddrIfSubtype") and \ v.has_key("lldpRemManAddrIfId") and \ - v.has_key("lldpRemManAddrOID"): + v.has_key("lldpRemManAddrOID") and \ + v['name'] != 'eth0': active_intf.append(k) print "lldpRemManAddrTable: ", active_intf diff --git a/tests/snmp/test_snmp_pfc_counters.py b/tests/snmp/test_snmp_pfc_counters.py index 6dc614f8bf..8f9ae73a55 100644 --- a/tests/snmp/test_snmp_pfc_counters.py +++ b/tests/snmp/test_snmp_pfc_counters.py @@ -1,14 +1,10 @@ import pytest -from ansible_host import AnsibleHost -def test_snmp_pfc_counters(ansible_adhoc, testbed, creds): +def test_snmp_pfc_counters(duthost, localhost, creds): - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) - hostip = ans_host.host.options['inventory_manager'].get_host(hostname).vars['ansible_host'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] # Check PFC counters # Ignore management ports, assuming the names starting with 'eth', eg. eth0 diff --git a/tests/snmp/test_snmp_psu.py b/tests/snmp/test_snmp_psu.py index 3f82a2b6c4..593aca151b 100644 --- a/tests/snmp/test_snmp_psu.py +++ b/tests/snmp/test_snmp_psu.py @@ -1,16 +1,13 @@ import pytest -from ansible_host import AnsibleHost PSU_STATUS_OK = 2 @pytest.mark.bsl -def test_snmp_numpsu(testbed_devices, creds, duthost): +def test_snmp_numpsu(duthost, localhost, creds): - ans_host = testbed_devices['dut'] - lhost = testbed_devices['localhost'] - hostip = ans_host.host.options['inventory_manager'].get_host(ans_host.hostname).vars['ansible_host'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] res = duthost.shell("psuutil numpsus") assert int(res[u'rc']) == 0, "Failed to get number of PSUs" @@ -19,13 +16,11 @@ def test_snmp_numpsu(testbed_devices, creds, duthost): @pytest.mark.bsl -def test_snmp_psu_status(testbed_devices, creds): +def test_snmp_psu_status(duthost, localhost, creds): - ans_host = testbed_devices['dut'] - lhost = testbed_devices['localhost'] - hostip = ans_host.host.options['inventory_manager'].get_host(ans_host.hostname).vars['ansible_host'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] for k, v in snmp_facts['snmp_psu'].items(): if int(v['operstatus']) != PSU_STATUS_OK: diff --git a/tests/snmp/test_snmp_queue.py b/tests/snmp/test_snmp_queue.py index 0543dae036..54f1b3623f 100644 --- a/tests/snmp/test_snmp_queue.py +++ b/tests/snmp/test_snmp_queue.py @@ -1,14 +1,11 @@ import pytest -from ansible_host import AnsibleHost -def test_snmp_queues(ansible_adhoc, testbed, creds, collect_techsupport): - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - lhost = AnsibleHost(ansible_adhoc, 'localhost', True) - hostip = ans_host.host.options['inventory_manager'].get_host(hostname).vars['ansible_host'] +def test_snmp_queues(duthost, localhost, creds, collect_techsupport): - snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] + hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + + snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts'] for k, v in snmp_facts['snmp_interfaces'].items(): if "Ethernet" in v['description']: diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py index 6a047d5f02..a4b1ddcc13 100644 --- a/tests/tacacs/test_ro_user.py +++ b/tests/tacacs/test_ro_user.py @@ -5,9 +5,8 @@ pytest.mark.disable_loganalyzer, ] -def test_ro_user(testbed_devices, duthost, creds, setup_tacacs): +def test_ro_user(localhost, duthost, creds, setup_tacacs): - localhost = localhost = testbed_devices['localhost'] dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] res = localhost.shell("sshpass -p {} ssh "\ "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "\ diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py new file mode 100644 index 0000000000..a5bef9658b --- /dev/null +++ b/tests/telemetry/test_telemetry.py @@ -0,0 +1,62 @@ +from common.helpers.assertions import pytest_assert + +# Helper functions +def get_dict_stdout(gnmi_out, certs_out): + """ Extracts dictionary from redis output. + """ + gnmi_list = [] + gnmi_list = get_list_stdout(gnmi_out) + get_list_stdout(certs_out) + # Elements in list alternate between key and value. Separate them and combine into a dict. + key_list = gnmi_list[0::2] + value_list = gnmi_list[1::2] + params_dict = dict(zip(key_list, value_list)) + return params_dict + +def get_list_stdout(cmd_out): + out_list = [] + for x in cmd_out: + result = x.encode('UTF-8') + out_list.append(result) + return out_list + +# Test functions +def test_config_db_parameters(duthost): + """Verifies required telemetry parameters from config_db. + """ + gnmi = duthost.shell('/usr/bin/redis-cli -n 4 hgetall "TELEMETRY|gnmi"', module_ignore_errors=False)['stdout_lines'] + pytest_assert(gnmi is not None, "TELEMETRY|gnmi does not exist in config_db") + + certs = duthost.shell('/usr/bin/redis-cli -n 4 hgetall "TELEMETRY|certs"', module_ignore_errors=False)['stdout_lines'] + pytest_assert(certs is not None, "TELEMETRY|certs does not exist in config_db") + + d = get_dict_stdout(gnmi, certs) + for key, value in d.items(): + if str(key) == "client_auth": + client_auth_expected = "true" + pytest_assert(str(value) == client_auth_expected, "'client_auth' value is not '{}'".format(client_auth_expected)) + if str(key) == "port": + port_expected = "50051" + pytest_assert(str(value) == port_expected, "'port' value is not '{}'".format(port_expected)) + if str(key) == "ca_crt": + ca_crt_value_expected = "/etc/sonic/telemetry/dsmsroot.cer" + pytest_assert(str(value) == ca_crt_value_expected, "'ca_crt' value is not '{}'".format(ca_crt_value_expected)) + if str(key) == "server_key": + server_key_expected = "/etc/sonic/telemetry/streamingtelemetryserver.key" + pytest_assert(str(value) == server_key_expected, "'server_key' value is not '{}'".format(server_key_expected)) + if str(key) == "server_crt": + server_crt_expected = "/etc/sonic/telemetry/streamingtelemetryserver.cer" + pytest_assert(str(value) == server_crt_expected, "'server_crt' value is not '{}'".format(server_crt_expected)) + +def test_telemetry_enabledbydefault(duthost): + """Verify telemetry should be enabled by default + """ + status = duthost.shell('/usr/bin/redis-cli -n 4 hgetall "FEATURE|telemetry"', module_ignore_errors=False)['stdout_lines'] + status_list = get_list_stdout(status) + # Elements in list alternate between key and value. Separate them and combine into a dict. + status_key_list = status_list[0::2] + status_value_list = status_list[1::2] + status_dict = dict(zip(status_key_list, status_value_list)) + for k, v in status_dict.items(): + if str(k) == "status": + status_expected = "enabled"; + pytest_assert(str(v) == status_expected, "Telemetry feature is not enabled") diff --git a/tests/templates/ptf_nn_agent.conf.dut.j2 b/tests/templates/ptf_nn_agent.conf.dut.j2 new file mode 100644 index 0000000000..7e327fde45 --- /dev/null +++ b/tests/templates/ptf_nn_agent.conf.dut.j2 @@ -0,0 +1,10 @@ +[program:ptf_nn_agent] +command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 1@tcp://0.0.0.0:10900 -i 1-{{ nn_target_port }}@{{ nn_target_interface }} --set-nn-rcv-buffer=109430400 --set-iface-rcv-buffer=109430400 --set-nn-snd-buffer=109430400 --set-iface-snd-buffer=109430400 +process_name=ptf_nn_agent +stdout_logfile=/tmp/ptf_nn_agent.out.log +stderr_logfile=/tmp/ptf_nn_agent.err.log +redirect_stderr=false +autostart=true +autorestart=true +startsecs=1 +numprocs=1 diff --git a/tests/templates/ptf_nn_agent.conf.ptf.j2 b/tests/templates/ptf_nn_agent.conf.ptf.j2 new file mode 100644 index 0000000000..bb1282bc4a --- /dev/null +++ b/tests/templates/ptf_nn_agent.conf.ptf.j2 @@ -0,0 +1,10 @@ +[program:ptf_nn_agent] +command=/usr/bin/python /opt/ptf_nn_agent.py --device-socket 0@tcp://127.0.0.1:10900 -i 0-{{ nn_target_port }}@eth{{ nn_target_port }} +process_name=ptf_nn_agent +stdout_logfile=/tmp/ptf_nn_agent.out.log +stderr_logfile=/tmp/ptf_nn_agent.err.log +redirect_stderr=false +autostart=true +autorestart=true +startsecs=1 +numprocs=1 diff --git a/tests/test_features.py b/tests/test_features.py new file mode 100644 index 0000000000..a6058cb59a --- /dev/null +++ b/tests/test_features.py @@ -0,0 +1,34 @@ +# Helper Functions +def get_dict_stdout(cmd_out): + """Extract dictionary from show features command output + """ + result = "" + out_dict = {} + cmd = cmd_out[2:] + for x in cmd: + result = x.encode('UTF-8') + r = result.split() + out_dict[r[0]] = r[1] + return out_dict + +def get_status_redisout(status_out): + """Extract status value for feature in redis + """ + status_list = status_out[1:] + status = "" + for s in status_list: + status = s.encode('UTF-8') + return status + +# Test Functions +def test_show_features(duthost): + """Verify show features command output against CONFIG_DB + """ + features_stdout = duthost.shell('show features', module_ignore_errors=False)['stdout_lines'] + features_dict = get_dict_stdout(features_stdout) + for cmd_key, cmd_value in features_dict.items(): + feature = str(cmd_key) + status_out = duthost.shell('/usr/bin/redis-cli -n 4 hgetall "FEATURE|{}"'.format(feature), module_ignore_errors=False)['stdout_lines'] + redis_value = get_status_redisout(status_out) + status_value_expected = str(cmd_value) + assert str(redis_value) == status_value_expected, "'{}' is '{}' which does not match with config_db".format(cmd_key, cmd_value) diff --git a/tests/test_interfaces.py b/tests/test_interfaces.py index 3131a800fb..bb59cc6bd6 100644 --- a/tests/test_interfaces.py +++ b/tests/test_interfaces.py @@ -1,15 +1,11 @@ -from ansible_host import AnsibleHost from netaddr import IPAddress import pytest -def test_interfaces(ansible_adhoc, testbed): +def test_interfaces(duthost): """compare the interfaces between observed states and target state""" - hostname = testbed['dut'] - ans_host = AnsibleHost(ansible_adhoc, hostname) - - host_facts = ans_host.setup()['ansible_facts'] - mg_facts = ans_host.minigraph_facts(host=hostname)['ansible_facts'] + host_facts = duthost.setup()['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] verify_port(host_facts, mg_facts['minigraph_portchannels'].keys()) for k, v in mg_facts['minigraph_portchannels'].items(): @@ -36,7 +32,7 @@ def verify_port(host_facts, ports): for port in ports: ans_ifname = "ansible_%s" % port assert host_facts[ans_ifname]['active'] - + def verify_ip_address(host_facts, intfs): for intf in intfs: if intf.has_key('attachto'): diff --git a/tests/test_lag_2.py b/tests/test_lag_2.py deleted file mode 100644 index a8c1e843de..0000000000 --- a/tests/test_lag_2.py +++ /dev/null @@ -1,220 +0,0 @@ -import pytest - -import json -import time -import logging -import os - -from ptf_runner import ptf_runner -from common.devices import AnsibleHostBase -from common.fixtures.conn_graph_facts import conn_graph_facts -from common.utilities import wait_until - -@pytest.fixture(scope="module") -def common_setup_teardown(duthost, ptfhost, testbed, conn_graph_facts): - logging.info("########### Setup for lag testing ###########") - - lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] - fanout_neighbors = conn_graph_facts['device_conn'] - - if lag_facts['names'] == []: - pytest.skip("No lag configuration found in %s" % duthost.hostname) - - mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] - vm_neighbors = mg_facts['minigraph_neighbors'] - - # Copy PTF test into PTF-docker for test LACP DU - test_files = ['lag_test.py', 'acs_base_test.py', 'router_utils.py'] - for test_file in test_files: - src = "../ansible/roles/test/files/acstests/%s" % test_file - dst = "/tmp/%s" % test_file - ptfhost.copy(src=src, dest=dst) - - # Copy tests to the PTF-docker - ptfhost.copy(src="ptftests", dest="/root") - - # Inlucde testbed topology configuration - testbed_type = testbed['topo']['name'] - - support_testbed_types = frozenset(['t1-lag', 't0', 't0-116']) - if testbed_type not in support_testbed_types: - pytest.skip("Not support given test bed type %s" % testbed_type) - - yield duthost, ptfhost, vm_neighbors, mg_facts, lag_facts, fanout_neighbors - -def test_lag_2(common_setup_teardown, nbrhosts): - duthost, ptfhost, vm_neighbors, mg_facts, lag_facts, fanout_neighbors = common_setup_teardown - - # Test for each lag - for lag_name in lag_facts['names']: - try: - lag_facts['lags'][lag_name]['po_config']['runner']['min_ports'] - except: - logging.info("Skip [check_single_lap_lacp_rate] for lag (%s) due to min_ports not exists" % lag_name) - logging.info("Skip [check_single_lap] for lag (%s) due to min_ports not exists" % lag_name) - continue - else: - check_single_lag_lacp_rate(common_setup_teardown, nbrhosts, lag_name) - check_single_lag(common_setup_teardown, nbrhosts, lag_name) - -def check_single_lag_lacp_rate(common_setup_teardown, nbrhosts, lag_name): - duthost, ptfhost, vm_neighbors, mg_facts, lag_facts, fanout_neighbors = common_setup_teardown - logging.info("Start checking single lap lacp rate for: %s" % lag_name) - - intf, po_interfaces = get_lag_intfs(lag_facts, lag_name) - peer_device = vm_neighbors[intf]['name'] - - # Prepare for the remote VM interfaces that using PTF docker to check if the LACP DU packet rate is correct - iface_behind_lag_member = [] - for neighbor_int in mg_facts['minigraph_neighbors'].keys(): - if peer_device == mg_facts['minigraph_neighbors'][neighbor_int]['name']: - iface_behind_lag_member.append(mg_facts['minigraph_port_indices'][neighbor_int]) - - neighbor_lag_intfs = [] - for po_interface in po_interfaces: - neighbor_lag_intfs.append(vm_neighbors[po_interface]['port']) - - try: - lag_rate_current_setting = None - - # Get the vm host(veos) by it host name - vm_host = nbrhosts[peer_device] - - # Make sure all lag members on VM are set to fast - logging.info("Changing lacp rate to fast for %s" % neighbor_lag_intfs[0]) - set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'fast') - lag_rate_current_setting = 'fast' - time.sleep(5) - for iface_behind_lag in iface_behind_lag_member: - verify_lag_lacp_timing(ptfhost, peer_device, 1, iface_behind_lag) - - # Make sure all lag members on VM are set to slow - set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'normal') - lag_rate_current_setting = 'slow' - time.sleep(5) - for iface_behind_lag in iface_behind_lag_member: - verify_lag_lacp_timing(ptfhost, peer_device, 30, iface_behind_lag) - finally: - # Restore lag rate setting on VM in case of failure - if lag_rate_current_setting == 'fast': - set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'normal') - -def check_single_lag(common_setup_teardown, nbrhosts, lag_name): - duthost, ptfhost, vm_neighbors, mg_facts, lag_facts, fanout_neighbors = common_setup_teardown - logging.info("Start checking single lap for: %s" % lag_name) - - intf, po_interfaces = get_lag_intfs(lag_facts, lag_name) - po_flap = check_flap(lag_facts, lag_name) - - # Figure out fanout switches info if exists for the lag member and run minlink test - if intf in fanout_neighbors.keys(): - peer_device = fanout_neighbors[intf]['peerdevice'] - neighbor_interface = fanout_neighbors[intf]['peerport'] - vm_host = nbrhosts[peer_device] - verify_lag_minlink(duthost, vm_host, lag_name, peer_device, intf, neighbor_interface, po_interfaces, po_flap, deselect_time=5) - - # Figure out remote VM and interface info for the lag member and run minlink test - peer_device = vm_neighbors[intf]['name'] - neighbor_interface = vm_neighbors[intf]['port'] - vm_host = nbrhosts[peer_device] - verify_lag_minlink(duthost, vm_host, lag_name, peer_device, intf, neighbor_interface, po_interfaces, po_flap, deselect_time=95) - -def verify_lag_lacp_timing(ptfhost, vm_name, lacp_timer, exp_iface): - if exp_iface is None: - return - - # Check LACP timing - params = { - 'exp_iface': exp_iface, - 'timeout': 35, - 'packet_timing': lacp_timer, - 'ether_type': 0x8809, - 'interval_count': 3 - } - ptf_runner(ptfhost, '/tmp', "lag_test.LacpTimingTest", '/root/ptftests', params=params) - -def verify_lag_minlink( - duthost, - vm_host, - lag_name, - peer_device, - intf, - neighbor_interface, - po_interfaces, - po_flap, - deselect_time, - wait_timeout = 30): - - delay = 5 - retries = wait_timeout / delay - try: - set_neighbor_interface(vm_host, neighbor_interface, shut=True) - - # Let PortalChannel react to neighbor interface shutdown - time.sleep(deselect_time) - - # Verify PortChannel interfaces are up correctly - for po_intf in po_interfaces.keys(): - if po_intf != intf: - command = 'bash -c "teamdctl %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'runner\'][\'selected\']"' % (lag_name, po_intf) - wait_until(wait_timeout, delay, check_shell_output, duthost, command) - - # Refresh lag facts - lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] - - # Verify lag member is marked deselected for the shutdown porta and all other lag member interfaces are marked selected - for po_intf in po_interfaces.keys(): - if po_intf != intf: - assert lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] - else: - assert not lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] - - # Verify PortChannel's interface are marked down/up correctly if it should down/up - if po_flap == True: - assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Down' - else: - assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Up' - finally: - # Bring back port in case test error and left testbed in unknow stage - # Bring up neighbor interface - set_neighbor_interface(vm_host, neighbor_interface, shut=False) - - # Verify PortChannel interfaces are up correctly - for po_intf in po_interfaces.keys(): - if po_intf != intf: - command = 'bash -c "teamdctl %s state dump" | python -c "import sys, json; print json.load(sys.stdin)[\'ports\'][\'%s\'][\'link\'][\'up\']"' % (lag_name, po_intf) - wait_until(wait_timeout, delay, check_shell_output, duthost, command) - - # Refresh lag facts - lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts'] - for po_intf in po_interfaces.keys(): - assert lag_facts['lags'][lag_name]['po_stats']['ports'][po_intf]['runner']['selected'] == True - - assert lag_facts['lags'][lag_name]['po_intf_stat'] == 'Up' - -def get_lag_intfs(lag_facts, lag_name): - # Figure out interface informations - po_interfaces = lag_facts['lags'][lag_name]['po_config']['ports'] - intf = lag_facts['lags'][lag_name]['po_config']['ports'].keys()[0] - return intf, po_interfaces - -def check_flap(lag_facts, lag_name): - po_intf_num = len(lag_facts['lags'][lag_name]['po_config']['ports']) - po_min_links = lag_facts['lags'][lag_name]['po_config']['runner']['min_ports'] - return ((po_intf_num - 1) * 100 / po_min_links) < 75 - -def set_interface_lacp_rate(vm_host, intf, mode): - vm_host.eos_config( - lines=['lacp rate %s' % mode], - parents='interface %s' % intf) - logging.info("Set interface [%s] lacp rate to [%s]" % (intf, mode)) - -def set_neighbor_interface(vm_host, neighbor_interface, shut): - vm_host.eos_config( - lines=['%sshutdown' % ('' if shut else 'no ')], - parents='interface %s' % neighbor_interface) - logging.info('%s interface [%s]' % ('Shut' if shut else 'No shut', neighbor_interface)) - -def check_shell_output(host, command): - out = host.shell(command) - return out['stdout'] == 'True' \ No newline at end of file diff --git a/tests/test_mgmtvrf.py b/tests/test_mgmtvrf.py index 8dada6c209..cf6b7c5262 100644 --- a/tests/test_mgmtvrf.py +++ b/tests/test_mgmtvrf.py @@ -4,21 +4,20 @@ from common import reboot from common.utilities import wait_until import re -from ansible_host import AnsibleModuleException import logging logger = logging.getLogger(__name__) @pytest.fixture(scope='module',autouse=True) -def setup_mvrf(duthost, testbed_devices, testbed, localhost): +def setup_mvrf(duthost, testbed, localhost): ''' Setup Management vrf configs before the start of testsuite ''' logging.info(' Configure mgmt vrf') global var - global mvrf - mvrf = True + global mvrf + mvrf = True var = {} var['dut_ip'] = duthost.setup()['ansible_facts']['ansible_eth0']['ipv4']['address'] var['ptf_ip'] = testbed['ptf_ip'] @@ -34,8 +33,8 @@ def setup_mvrf(duthost, testbed_devices, testbed, localhost): timeout=90) time.sleep(5) verify_show_command(duthost) - yield - mvrf = False + yield + mvrf = False logging.info(' Unconfigure mgmt vrf') duthost.copy(src="mvrf/config_vrf_del.sh",dest="/tmp/config_vrf_del.sh",mode=0755) duthost.shell("nohup /tmp/config_vrf_del.sh < /dev/null > /dev/null 2>&1 &") @@ -56,39 +55,37 @@ def verify_show_command(duthost, mvrf=True): show_mgmt_vrf=duthost.shell('show mgmt-vrf')['stdout'] mvrf_interfaces = {} logging.debug("show mgmt vrf \n {}".format(show_mgmt_vrf)) - if mvrf: + if mvrf: mvrf_interfaces['mgmt'] = "\d+:\s+mgmt:\s+ mtu\s+\d+\s+qdisc\s+noqueue\s+state\s+UP" mvrf_interfaces['vrf_table'] = "vrf table 5000" mvrf_interfaces['eth0'] = "\d+:\s+eth0+:\s+.*master mgmt\s+state\s+UP " - mvrf_interfaces['lo'] = "\d+:\s+lo-m:\s+.*master mgmt" + mvrf_interfaces['lo'] = "\d+:\s+lo-m:\s+.*master mgmt" assert "ManagementVRF : Enabled" in show_mgmt_vrf for intf , pattern in mvrf_interfaces.items(): assert re.search(pattern,show_mgmt_vrf) is not None - else: + else: assert "ManagementVRF : Disabled" in show_mgmt_vrf def execute_dut_command(duthost, command, mvrf = True, ignore_errors=False): - result = {} + result = {} prefix = "" - if mvrf: + if mvrf: prefix = "sudo cgexec -g l3mdev:mgmt " result=duthost.command(prefix+command, module_ignore_errors=ignore_errors) return result - + class TestMvrfInbound(): - def test_ping(self,duthost, localhost): + def test_ping(self, duthost, localhost): res = duthost.ping() - def test_snmp_fact(self,testbed_devices): - localhost = testbed_devices['localhost'] - duthost = testbed_devices['dut'] - snmp_res = localhost.snmp_facts(host=var['dut_ip'],version='v2c',community='public' ) - -class TestMvrfOutbound(): + def test_snmp_fact(self, localhost): + snmp_res = localhost.snmp_facts(host=var['dut_ip'],version='v2c',community='public' ) + +class TestMvrfOutbound(): - @pytest.fixture + @pytest.fixture def apt_install_wget(self, duthost): logging.info("apt-get update , apt-get install wget") apt_update_cmd = ' apt-get update -y' @@ -96,16 +93,16 @@ def apt_install_wget(self, duthost): apt_remove = ' apt-get remove wget -y' execute_dut_command(duthost, apt_update_cmd, mvrf=True) execute_dut_command(duthost, apt_install_wget, mvrf=True) - yield + yield logging.info(" remove wget") duthost.file(path=var['filename'], state='absent') execute_dut_command(duthost, apt_remove, mvrf = True) def test_ping(self, testbed, duthost): logging.info("Test OutBound Ping") - command = "ping -c 3 " + var['ptf_ip'] + command = "ping -c 3 " + var['ptf_ip'] result = execute_dut_command(duthost, command, mvrf=True) - + def test_wget(self, duthost, apt_install_wget): logging.info("Test Wget") wget_command=" wget https://github.com/raw/Azure/SONiC/master/README.md" @@ -125,16 +122,16 @@ def test_curl(self, duthost): class TestServices(): - def check_ntp_status(self, duthost): + def check_ntp_status(self, duthost): ntpstat_cmd = "ntpstat" ntp_stat = execute_dut_command(duthost,ntpstat_cmd,mvrf=True,ignore_errors=True) if ntp_stat['rc'] != 0 : - return False - return True + return False + return True def test_ntp(self, duthost): force_ntp=" ntpd -gq" - duthost.service(name='ntp' , state='stopped') + duthost.service(name='ntp' , state='stopped') logging.info("Ntp restart in mgmt vrf") execute_dut_command(duthost, force_ntp) duthost.service(name='ntp' , state='restarted') @@ -146,8 +143,8 @@ def test_service_acl(self, duthost, localhost): SONIC_SSH_PORT = 22 SONIC_SSH_REGEX = 'OpenSSH_[\\w\\.]+ Debian' dut_ip = duthost.setup()['ansible_facts']['ansible_eth0']['ipv4']['address'] - duthost.copy(src="mvrf/config_service_acls.sh",dest="/tmp/config_service_acls.sh",mode=0755) - duthost.shell("nohup /tmp/config_service_acls.sh < /dev/null > /dev/null 2>&1 &") + duthost.copy(src="mvrf/config_service_acls.sh",dest="/tmp/config_service_acls.sh",mode=0755) + duthost.shell("nohup /tmp/config_service_acls.sh < /dev/null > /dev/null 2>&1 &") time.sleep(5) logger.info('waiting for ssh to drop') res = localhost.wait_for(host=dut_ip, @@ -161,42 +158,39 @@ def test_service_acl(self, duthost, localhost): state='started', search_regex=SONIC_SSH_REGEX, timeout=90) - time.sleep(20) + time.sleep(20) duthost.file(path="/tmp/config_service_acls.sh",state='absent') class TestReboot(): - def basic_check_after_reboot(self, duthost, localhost, testbed_devices, testbed): + def basic_check_after_reboot(self, duthost, localhost, testbed): verify_show_command(duthost) inbound_test = TestMvrfInbound() outbound_test = TestMvrfOutbound() outbound_test.test_ping(testbed,duthost) inbound_test.test_ping(duthost,localhost) - inbound_test.test_snmp_fact(testbed_devices) + inbound_test.test_snmp_fact(localhost) - def test_warmboot(self, localhost, testbed_devices, testbed): + def test_warmboot(self, duthost, localhost, testbed): - duthost = testbed_devices["dut"] duthost.command('sudo config save -y') reboot(duthost, localhost, reboot_type='warm') assert wait_until(120, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" - self.basic_check_after_reboot(duthost, localhost, testbed_devices, testbed) + self.basic_check_after_reboot(duthost, localhost, testbed) - def test_reboot(self, localhost, testbed_devices, testbed): - duthost = testbed_devices["dut"] + def test_reboot(self, duthost, localhost, testbed): duthost.command('sudo config save -y') reboot(duthost, localhost) assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" - self.basic_check_after_reboot(duthost, localhost, testbed_devices, testbed) + self.basic_check_after_reboot(duthost, localhost, testbed) + + def test_fastboot(self, duthost, localhost, testbed): - def test_fastboot(self, localhost, testbed_devices, testbed): - - duthost = testbed_devices["dut"] duthost.command('sudo config save -y') reboot(duthost, localhost,reboot_type='fast') assert wait_until(300, 20, duthost.critical_services_fully_started), "Not all critical services are fully started" - self.basic_check_after_reboot(duthost, localhost, testbed_devices, testbed) + self.basic_check_after_reboot(duthost, localhost, testbed) diff --git a/tests/test_nbr_health.py b/tests/test_nbr_health.py index 1a875e19d6..1fe0ea3329 100644 --- a/tests/test_nbr_health.py +++ b/tests/test_nbr_health.py @@ -47,11 +47,10 @@ def check_bgp_facts(hostname, host): if not res.has_key('stdout_lines') or u'BGP summary' not in res['stdout_lines'][0][0]: return "neighbor {} bgp not configured correctly".format(hostname) -def test_neighbors_health(duthost, testbed_devices, nbrhosts, eos): +def test_neighbors_health(duthost, localhost, nbrhosts, eos): """Check each neighbor device health""" fails = [] - localhost = testbed_devices['localhost'] config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] nei_meta = config_facts.get('DEVICE_NEIGHBOR_METADATA', {}) for k, v in nei_meta.items(): @@ -59,7 +58,7 @@ def test_neighbors_health(duthost, testbed_devices, nbrhosts, eos): if failmsg: fails.append(failmsg) - eoshost = nbrhosts[k] + eoshost = nbrhosts[k]['host'] failmsg = check_eos_facts(k, v['mgmt_addr'], eoshost) if failmsg: fails.append(failmsg) diff --git a/tests/test_procdockerstatsd.py b/tests/test_procdockerstatsd.py new file mode 100644 index 0000000000..3e5503608f --- /dev/null +++ b/tests/test_procdockerstatsd.py @@ -0,0 +1,28 @@ +from common.helpers.assertions import pytest_assert + +# Helper Functions +def get_count_fromredisout(keys_out): + """Extract keys count from redis output + """ + count = "" + for s in keys_out: + count = s.encode('UTF-8') + return count + +# Test Functions +def test_verify_status(duthost): + """Verify procdockerstatsd is active and running + """ + status = duthost.get_service_props('procdockerstatsd') + pytest_assert(status["ActiveState"] == "active" and status["SubState"] == "running", "Procdockerstatsd either not active or not running") + +def test_verify_redisexport(duthost): + """Verify procdockerstatsd is exporting values to redis. + """ + docker_stdout = duthost.shell('/usr/bin/redis-cli -n 6 KEYS "DOCKER_STATS|*" | wc -l', module_ignore_errors=False)['stdout_lines'] + docker_keys_count = get_count_fromredisout(docker_stdout) + process_stdout= duthost.shell('/usr/bin/redis-cli -n 6 KEYS "PROCESS_STATS|*" | wc -l', module_ignore_errors=False)['stdout_lines'] + process_keys_count = get_count_fromredisout(process_stdout) + # if entry for process or docker data found then daemon is upload is sucessful + pytest_assert(int(docker_keys_count) > 1, "No data docker data upload found by Procdockerstatsd daemon to state_db") + pytest_assert(int(process_keys_count) > 1, "No data process data upload found by Procdockerstatsd daemon to state_db") diff --git a/tests/test_vrf.py b/tests/test_vrf.py index 6b9384d971..cab28000d4 100644 --- a/tests/test_vrf.py +++ b/tests/test_vrf.py @@ -220,31 +220,31 @@ def check_bgp_facts(duthost, cfg_facts): # FIXME later may move to "common.reboot" # -# The reason to introduce a new 'reboot' here is due to +# The reason to introduce a new 'reboot' here is due to # the difference of fixture 'localhost' between the two 'reboot' functions. -# -# 'common.reboot' request *ansible_fixtures.localhost*, +# +# 'common.reboot' request *ansible_fixtures.localhost*, # but here it request *common.devices.Localhost*. def reboot(duthost, localhost, timeout=120, basic_check=True): duthost.shell("nohup reboot &") dut_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).address - + logging.info('waiting for dut to go down') - res = localhost.wait_for(host=dut_ip, - port=22, - state="stopped", - delay=10, + res = localhost.wait_for(host=dut_ip, + port=22, + state="stopped", + delay=10, timeout=timeout, module_ignore_errors=True) if res.is_failed: raise Exception('DUT did not shutdown in {}s'.format(timeout)) logging.info('waiting for dut to startup') - res = localhost.wait_for(host=dut_ip, - port=22, - state="started", - delay=10, + res = localhost.wait_for(host=dut_ip, + port=22, + state="started", + delay=10, timeout=timeout, module_ignore_errors=True) if res.is_failed: @@ -382,10 +382,6 @@ def gen_vrf_neigh_file(vrf, ptfhost, render_file): ptfhost.template(src="vrf/vrf_neigh.j2", dest=render_file) # fixtures -@pytest.fixture(scope="module") -def localhost(testbed_devices): - return testbed_devices['localhost'] - @pytest.fixture(scope="module") def host_facts(duthost): return get_host_facts(duthost) @@ -771,7 +767,7 @@ def test_ping_vrf1_loopback(self, ptfhost, duthost): for ip in ips: if ip.version == 4: # FIXME Within a vrf, currently ping(4) does not support using - # an ip of loopback intface as source(it complains 'Cannot assign + # an ip of loopback intface as source(it complains 'Cannot assign # requested address'). An alternative is ping the loopback address # from ptf ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf1'], ip.ip)) @@ -784,7 +780,7 @@ def test_ping_vrf2_loopback(self, ptfhost, duthost): for ip in ips: if ip.version == 4: # FIXME Within a vrf, currently ping(4) does not support using - # an ip of loopback intface as source(it complains 'Cannot assign + # an ip of loopback intface as source(it complains 'Cannot assign # requested address'). An alternative is ping the loopback address # from ptf ptfhost.shell("ip netns exec {} ping {} -c 3 -f -W2".format(g_vars['vlan_peer_vrf2ns_map']['Vrf2'], ip.ip)) @@ -1391,7 +1387,7 @@ def setup_vrf_deletion(self, duthost, ptfhost, testbed, cfg_facts): def setup_vrf_restore(self, duthost, cfg_facts): self.restore_vrf(duthost) self.c_vars['restore_vrf'] = False # Mark to skip restore vrf during teardown - + # check bgp session state after restore assert wait_until(120, 10, check_bgp_facts, duthost, cfg_facts), \ "Bgp sessions should be re-estabalished after restore Vrf1" diff --git a/tests/testbed_setup/conftest.py b/tests/testbed_setup/conftest.py new file mode 100644 index 0000000000..a9cd374667 --- /dev/null +++ b/tests/testbed_setup/conftest.py @@ -0,0 +1,15 @@ +from setup_args.populate_fdb_args import add_populate_fdb_args +from common.fixtures.populate_fdb import populate_fdb + +# FDB pytest arguments +def pytest_addoption(parser): + """ + Adds option to FDB pytest + + Args: + parser: pytest parser object + + Returns: + None + """ + add_populate_fdb_args(parser) diff --git a/tests/testbed_setup/setup_args/__init__.py b/tests/testbed_setup/setup_args/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/testbed_setup/setup_args/populate_fdb_args.py b/tests/testbed_setup/setup_args/populate_fdb_args.py new file mode 100644 index 0000000000..7ca658505d --- /dev/null +++ b/tests/testbed_setup/setup_args/populate_fdb_args.py @@ -0,0 +1,35 @@ +# Populate FDB Args file + +def add_populate_fdb_args(parser): + ''' + Adding arguments required for populate fdb test cases + + Args: + parser: pytest parser object + + Returns: + None + ''' + parser.addoption( + '--mac_to_ip_ratio', + action='store', + type=str, + default='100:1', + help='Ratio of distinct MAC addresses to distinct IP addresses assigned to VM', + ) + + parser.addoption( + '--start_mac', + action='store', + type=str, + default='00:25:ae:22:11:00', + help='VM start MAC address. Subsequent MAC addresses are increment of 1 on top of start MAC', + ) + + parser.addoption( + '--packet_count', + action='store', + type=int, + default=2000, + help='Number of packets to be created and sent to DUT', + ) diff --git a/tests/testbed_setup/test_populate_fdb.py b/tests/testbed_setup/test_populate_fdb.py new file mode 100644 index 0000000000..961aabd84a --- /dev/null +++ b/tests/testbed_setup/test_populate_fdb.py @@ -0,0 +1,15 @@ +import pytest + +def test_populate_fdb(populate_fdb): + """ + Populates DUT FDB entries + + Args: + request: pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + pass diff --git a/tests/veos.vtb b/tests/veos.vtb index a7ffee0e00..31f4304aee 100644 --- a/tests/veos.vtb +++ b/tests/veos.vtb @@ -55,6 +55,11 @@ server_1 [servers:vars] topologies=['t1', 't1-lag', 't1-64-lag', 't1-64-lag-clet', 't0', 't0-16', 't0-56', 't0-52', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] +[lab] +vlab-01 +vlab-02 +vlab-03 + [sonic] vlab-01 ansible_host=10.250.0.101 type=kvm hwsku=Force10-S6000 ansible_password=password ansible_user=admin vlab-02 ansible_host=10.250.0.102 type=kvm hwsku=Force10-S6100 ansible_password=password diff --git a/tests/vlan/test_vlan.py b/tests/vlan/test_vlan.py index 00be8e39fe..022c9e7221 100644 --- a/tests/vlan/test_vlan.py +++ b/tests/vlan/test_vlan.py @@ -1,4 +1,3 @@ -from ansible_host import AnsibleHost import pytest import ptf.packet as scapy @@ -34,7 +33,7 @@ def vlan_ports_list(cfg_facts, ptfhost): config_portchannels = cfg_facts.get('PORTCHANNEL', {}) config_port_indices = cfg_facts['port_index_map'] ptf_ports_available_in_topo = ptfhost.host.options['variable_manager'].extra_vars.get("ifaces_map") - + pvid_cycle = itertools.cycle(vlan_id_list) # when running on t0 we can use the portchannel members @@ -80,9 +79,9 @@ def create_vlan_interfaces(vlan_ports_list, vlan_intfs_list, duthost, ptfhost): ptfhost.command("ip link set eth{idx}.{pvid} up".format( idx=vlan_port["port_index"], - pvid=permit_vlanid + pvid=permit_vlanid )) - + for vlan in vlan_intfs_list: duthost.command("config interface ip add Vlan{} {}".format(vlan['vlan_id'], vlan['ip'])) @@ -154,7 +153,7 @@ def setup_vlan(ptfadapter, duthost, ptfhost, vlan_ports_list, vlan_intfs_list, c ptfhost.command('supervisorctl reread') ptfhost.command('supervisorctl update') - + logger.info("Start arp_responder") ptfhost.command('supervisorctl start arp_responder') @@ -180,7 +179,7 @@ def tearDown(vlan_ports_list, duthost, ptfhost, vlan_intfs_list, portchannel_int if int(permit_vlanid) != vlan_port["pvid"]: ptfhost.command("ip link delete eth{idx}.{pvid}".format( idx=vlan_port["port_index"], - pvid=permit_vlanid + pvid=permit_vlanid )) except RunAnsibleModuleFail as e: logger.error(e) @@ -200,14 +199,14 @@ def setUpArpResponder(vlan_ports_list, ptfhost): else: iface = "eth{}.{}".format(vlan_port["port_index"], permit_vlanid) d[iface].append(vlan_port["permit_vlanid"][permit_vlanid]["peer_ip"]) - + with open('/tmp/from_t1.json', 'w') as file: json.dump(d, file) ptfhost.copy(src='/tmp/from_t1.json', dest='/tmp/from_t1.json') def build_icmp_packet(vlan_id, src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff", src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64): - + pkt = testutils.simple_icmp_packet(pktlen=100 if vlan_id == 0 else 104, eth_dst=dst_mac, eth_src=src_mac, @@ -243,9 +242,9 @@ def verify_icmp_packets(ptfadapter, vlan_ports_list, vlan_port, vlan_id): @pytest.mark.bsl def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list): """ - Test case #1 - Verify packets egress without tag from ports whose PVID same with ingress port - Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port. + Test case #1 + Verify packets egress without tag from ports whose PVID same with ingress port + Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port. """ logger.info("Test case #1 starting ...") @@ -262,8 +261,8 @@ def test_vlan_tc1_send_untagged(ptfadapter, vlan_ports_list): def test_vlan_tc2_send_tagged(ptfadapter, vlan_ports_list): """ Test case #2 - Send tagged packets from each port. - Verify packets egress without tag from ports whose PVID same with ingress port + Send tagged packets from each port. + Verify packets egress without tag from ports whose PVID same with ingress port Verify packets egress with tag from ports who include VLAN ID but PVID different from ingress port. """ @@ -282,7 +281,7 @@ def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list): """ Test case #3 Send packets with invalid VLAN ID - Verify no port can receive these pacekts + Verify no port can receive these pacekts """ logger.info("Test case #3 starting ...") @@ -293,7 +292,7 @@ def test_vlan_tc3_send_invalid_vid(ptfadapter, vlan_ports_list): for vlan_port in vlan_ports_list: src_port = vlan_port["port_index"] - dst_ports = [port["port_index"] for port in vlan_ports_list + dst_ports = [port["port_index"] for port in vlan_ports_list if port != vlan_port ] logger.info("Send invalid tagged packet " + " from " + str(src_port) + "...") logger.info(invalid_tagged_pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%")) diff --git a/tests/vtestbed.csv b/tests/vtestbed.csv index 721352f6ca..0e95fcfb32 100644 --- a/tests/vtestbed.csv +++ b/tests/vtestbed.csv @@ -1,4 +1,4 @@ # conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,server,vm_base,dut,comment -vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,server_1,VM0100,vlab-01,Tests virtual switch vm -vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,server_1,VM0100,vlab-02,Tests virtual switch vm -vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,server_1,VM0104,vlab-03,Tests virtual switch vm +vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,server_1,VM0100,[vlab-01],Tests virtual switch vm +vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,server_1,VM0100,[vlab-02],Tests virtual switch vm +vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,server_1,VM0104,[vlab-03],Tests virtual switch vm diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py new file mode 100644 index 0000000000..7a486cc7ef --- /dev/null +++ b/tests/vxlan/test_vxlan_decap.py @@ -0,0 +1,149 @@ +import json +import logging +from datetime import datetime + +import pytest +from jinja2 import Template +from netaddr import IPAddress + +from ptf_runner import ptf_runner + +logger = logging.getLogger(__name__) + +VTEP2_IP = "8.8.8.8" +VNI_BASE = 336 +COUNT = 10 + + +def prepare_ptf(ptfhost, mg_facts, dut_facts): + """ + @summary: Prepare the PTF docker container for testing + @param mg_facts: Minigraph facts + @param dut_facts: Host facts of DUT + """ + logger.info("Remove IP and change MAC") + ptfhost.script("./scripts/remove_ip.sh") + ptfhost.script("./scripts/change_mac.sh") + + logger.info("Prepare arp_responder") + ptfhost.copy(src="../ansible/roles/test/files/helpers/arp_responder.py", dest="/opt") + + arp_responder_conf = Template(open("../ansible/roles/test/templates/arp_responder.conf.j2").read()) + ptfhost.copy(content=arp_responder_conf.render(arp_responder_args="--conf /tmp/vxlan_arpresponder.conf"), + dest="/etc/supervisor/conf.d/arp_responder.conf") + + ptfhost.shell("supervisorctl reread") + ptfhost.shell("supervisorctl update") + + logger.info("Put information needed by the PTF script to the PTF container.") + vxlan_decap = { + "minigraph_port_indices": mg_facts["minigraph_port_indices"], + "minigraph_portchannel_interfaces": mg_facts["minigraph_portchannel_interfaces"], + "minigraph_portchannels": mg_facts["minigraph_portchannels"], + "minigraph_lo_interfaces": mg_facts["minigraph_lo_interfaces"], + "minigraph_vlans": mg_facts["minigraph_vlans"], + "minigraph_vlan_interfaces": mg_facts["minigraph_vlan_interfaces"], + "dut_mac": dut_facts["ansible_Ethernet0"]["macaddress"] + } + ptfhost.copy(content=json.dumps(vxlan_decap, indent=2), dest="/tmp/vxlan_decap.json") + + logger.info("Copy PTF scripts to PTF container") + ptfhost.copy(src="ptftests", dest="/root") + + +def generate_vxlan_config_files(duthost, mg_facts): + """ + @summary: Generate VXLAN tunnel and VXLAN map configuration files to DUT. + @param duthost: DUT host object + @mg_facts: Minigraph facts + """ + loopback_ip = None + for intf in mg_facts["minigraph_lo_interfaces"]: + if IPAddress(intf["addr"]).version == 4: + loopback_ip = intf["addr"] + break + if not loopback_ip: + pytest.fail("ipv4 lo interface not found") + + # Generate vxlan tunnel config json file on DUT + vxlan_tunnel_cfg = { + "VXLAN_TUNNEL": { + "tunnelVxlan": { + "src_ip": loopback_ip, + "dst_ip": VTEP2_IP + } + } + } + duthost.copy(content=json.dumps(vxlan_tunnel_cfg, indent=2), dest="/tmp/vxlan_db.tunnel.json") + + # Generate vxlan maps config json file on DUT + vxlan_maps_cfg = { + "VXLAN_TUNNEL_MAP": {} + } + for vlan in mg_facts["minigraph_vlans"]: + vxlan_maps_cfg["VXLAN_TUNNEL_MAP"]["tunnelVxlan|map%s" % vlan] = { + "vni": int(vlan.replace("Vlan", "")) + VNI_BASE, + "vlan": vlan + } + duthost.copy(content=json.dumps(vxlan_maps_cfg, indent=2), dest="/tmp/vxlan_db.maps.json") + + +@pytest.fixture(scope="module") +def setup(duthost, ptfhost): + + logger.info("Gather some facts") + mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + dut_facts = duthost.setup(gather_subset="!all,!any,network", filter="ansible_Ethernet*")["ansible_facts"] + ptf_facts = ptfhost.setup(gather_subset="!all,!any,network")["ansible_facts"] + + logger.info("Prepare PTF") + prepare_ptf(ptfhost, mg_facts, dut_facts) + + logger.info("Generate VxLAN config files") + generate_vxlan_config_files(duthost, mg_facts) + + setup_info = { + "mg_facts": mg_facts + } + + yield setup_info + + logger.info("Stop arp_responder on PTF") + ptfhost.shell("supervisorctl stop arp_responder") + + logger.info("Always try to remove any possible VxLAN tunnel and map configuration") + for vlan in mg_facts["minigraph_vlans"]: + duthost.shell('docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map%s"' % vlan) + duthost.shell('docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan"') + + +@pytest.fixture(params=["NoVxLAN", "Enabled", "Removed"]) +def vxlan_status(setup, request, duthost): + if request.param == "Enabled": + duthost.shell("sonic-cfggen -j /tmp/vxlan_db.tunnel.json --write-to-db") + duthost.shell("sonic-cfggen -j /tmp/vxlan_db.maps.json --write-to-db") + return True, request.param + elif request.param == "Removed": + for vlan in setup["mg_facts"]["minigraph_vlans"]: + duthost.shell('docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL_MAP|tunnelVxlan|map%s"' % vlan) + duthost.shell('docker exec -i database redis-cli -n 4 -c DEL "VXLAN_TUNNEL|tunnelVxlan"') + return False, request.param + else: + return False, request.param + + +def test_vxlan_decap(setup, vxlan_status, duthost, ptfhost): + + vxlan_enabled, scenario = vxlan_status + + logger.info("vxlan_enabled=%s, scenario=%s" % (vxlan_enabled, scenario)) + log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format(scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) + ptf_runner(ptfhost, + "ptftests", + "vxlan-decap.Vxlan", + platform_dir="ptftests", + params={"vxlan_enabled": vxlan_enabled, + "config_file": '/tmp/vxlan_decap.json', + "count": COUNT}, + qlen=1000, + log_file=log_file)