Skip to content

Commit

Permalink
Add support for headroom pool watermark (sonic-net#1453)
Browse files Browse the repository at this point in the history
What I did
Added 'SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES' in the interested counters to be queried
Updated the the buffer lua script to update the headroom pool watermark counters
Updated watermark orch to act on headroom pool clear request

Why I did it
To expose the headroom pool watermark counters in SONiC

How I verified it
On platforms with headroom pool support, verified that headroom pool watermark counters are getting updated as expected
On platforms without headroom pool support, headroom pool watermark counters show as N/A
New sonic mgmt test added and it passed. sonic-net/sonic-mgmt#2614

Signed-off-by: Neetha John <nejo@microsoft.com>
  • Loading branch information
neethajohn authored Dec 9, 2020
1 parent 64576f0 commit 599ed41
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 55 deletions.
1 change: 1 addition & 0 deletions orchagent/bufferorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ extern sai_object_id_t gSwitchId;
static const vector<sai_buffer_pool_stat_t> bufferPoolWatermarkStatIds =
{
SAI_BUFFER_POOL_STAT_WATERMARK_BYTES,
SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES
};

type_map BufferOrch::m_buffer_type_maps = {
Expand Down
73 changes: 34 additions & 39 deletions orchagent/watermark_bufferpool.lua
Original file line number Diff line number Diff line change
Expand Up @@ -12,51 +12,46 @@ local persistent_table_name = 'PERSISTENT_WATERMARKS'
local periodic_table_name = 'PERIODIC_WATERMARKS'

local sai_buffer_pool_watermark_stat_name = 'SAI_BUFFER_POOL_STAT_WATERMARK_BYTES'
local sai_hdrm_pool_watermark_stat_name = 'SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES'

local rets = {}

redis.call('SELECT', counters_db)
local rets = redis.call('SELECT', counters_db)

-- Iterate through each buffer pool oid
local n = table.getn(KEYS)
for i = n, 1, -1 do
-- Get new watermark value from COUNTERS
local wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if wm then
wm = tonumber(wm)

-- Get last value from *_WATERMARKS
local user_wm_last = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)

-- Set higher value to *_WATERMARKS
if user_wm_last then
user_wm_last = tonumber(user_wm_last)
if wm > user_wm_last then
redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end

local persistent_wm_last = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if persistent_wm_last then
persistent_wm_last = tonumber(persistent_wm_last)
if wm > persistent_wm_last then
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end

local periodic_wm_last = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
if periodic_wm_last then
periodic_wm_last = tonumber(periodic_wm_last)
if wm > periodic_wm_last then
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
else
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name, wm)
end
local buffer_pool_wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local hdrm_pool_wm = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)

-- Get last value from *_WATERMARKS
local user_buffer_pool_wm = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local persistent_buffer_pool_wm = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)
local periodic_buffer_pool_wm = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name)

local user_hdrm_pool_wm = redis.call('HGET', user_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)
local persistent_hdrm_pool_wm = redis.call('HGET', persistent_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)
local periodic_hdrm_pool_wm = redis.call('HGET', periodic_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name)

if buffer_pool_wm then
buffer_pool_wm = tonumber(buffer_pool_wm)

redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
user_buffer_pool_wm and math.max(buffer_pool_wm, user_buffer_pool_wm) or buffer_pool_wm)
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
persistent_buffer_pool_wm and math.max(buffer_pool_wm, persistent_buffer_pool_wm) or buffer_pool_wm)
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_buffer_pool_watermark_stat_name,
periodic_buffer_pool_wm and math.max(buffer_pool_wm, periodic_buffer_pool_wm) or buffer_pool_wm)
end

if hdrm_pool_wm then
hdrm_pool_wm = tonumber(hdrm_pool_wm)

redis.call('HSET', user_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
user_hdrm_pool_wm and math.max(hdrm_pool_wm, user_hdrm_pool_wm) or hdrm_pool_wm)
redis.call('HSET', persistent_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
persistent_hdrm_pool_wm and math.max(hdrm_pool_wm, persistent_hdrm_pool_wm) or hdrm_pool_wm)
redis.call('HSET', periodic_table_name .. ':' .. KEYS[i], sai_hdrm_pool_watermark_stat_name,
periodic_hdrm_pool_wm and math.max(hdrm_pool_wm, periodic_hdrm_pool_wm) or hdrm_pool_wm)
end
end

Expand Down
45 changes: 30 additions & 15 deletions orchagent/watermarkorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#define CLEAR_QUEUE_SHARED_UNI_REQUEST "Q_SHARED_UNI"
#define CLEAR_QUEUE_SHARED_MULTI_REQUEST "Q_SHARED_MULTI"
#define CLEAR_BUFFER_POOL_REQUEST "BUFFER_POOL"
#define CLEAR_HEADROOM_POOL_REQUEST "HEADROOM_POOL"

extern PortsOrch *gPortsOrch;
extern BufferOrch *gBufferOrch;
Expand Down Expand Up @@ -182,32 +183,38 @@ void WatermarkOrch::doTask(NotificationConsumer &consumer)
if (data == CLEAR_PG_HEADROOM_REQUEST)
{
clearSingleWm(table,
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
}
else if (data == CLEAR_PG_SHARED_REQUEST)
{
clearSingleWm(table,
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
}
else if (data == CLEAR_QUEUE_SHARED_UNI_REQUEST)
{
clearSingleWm(table,
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
}
else if (data == CLEAR_QUEUE_SHARED_MULTI_REQUEST)
{
clearSingleWm(table,
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
}
else if (data == CLEAR_BUFFER_POOL_REQUEST)
{
clearSingleWm(table,
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
}
else if (data == CLEAR_HEADROOM_POOL_REQUEST)
{
clearSingleWm(table,
"SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
}
else
{
Expand Down Expand Up @@ -243,15 +250,23 @@ void WatermarkOrch::doTask(SelectableTimer &timer)
}

clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES", m_pg_ids);
"SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES",
m_pg_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES",
m_pg_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES", m_pg_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_unicast_queue_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_unicast_queue_ids);
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES",
m_multicast_queue_ids);
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", m_multicast_queue_ids);
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
clearSingleWm(m_periodicWatermarkTable.get(),
"SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", gBufferOrch->getBufferPoolNameOidMap());
"SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES",
gBufferOrch->getBufferPoolNameOidMap());
SWSS_LOG_DEBUG("Periodic watermark cleared by timer!");
}
}
Expand Down
9 changes: 8 additions & 1 deletion tests/test_watermark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ class SaiWmStats:
pg_shared = "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES"
pg_headroom = "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES"
buffer_pool = "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES"
hdrm_pool = "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES"


class WmTables:
Expand All @@ -23,7 +24,7 @@ class WmTables:
class WmFCEntry:
queue_stats_entry = {"QUEUE_COUNTER_ID_LIST": SaiWmStats.queue_shared}
pg_stats_entry = {"PG_COUNTER_ID_LIST": "{},{}".format(SaiWmStats.pg_shared, SaiWmStats.pg_headroom)}
buffer_stats_entry = {"BUFFER_POOL_COUNTER_ID_LIST": SaiWmStats.buffer_pool}
buffer_stats_entry = {"BUFFER_POOL_COUNTER_ID_LIST": "{},{}".format(SaiWmStats.buffer_pool, SaiWmStats.hdrm_pool)}


class TestWatermark(object):
Expand Down Expand Up @@ -80,6 +81,7 @@ def populate_asic_all(self, dvs, val):
self.populate_asic(dvs, "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", SaiWmStats.pg_shared, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", SaiWmStats.pg_headroom, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_BUFFER_POOL", SaiWmStats.buffer_pool, val)
self.populate_asic(dvs, "SAI_OBJECT_TYPE_BUFFER_POOL", SaiWmStats.hdrm_pool, val)
time.sleep(self.DEFAULT_POLL_INTERVAL)

def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value):
Expand Down Expand Up @@ -181,6 +183,7 @@ def test_telemetry_period(self, dvs):
self.verify_value(dvs, self.pgs, WmTables.periodic, SaiWmStats.pg_headroom, "0")
self.verify_value(dvs, self.qs, WmTables.periodic, SaiWmStats.queue_shared, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.buffer_pool, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.hdrm_pool, "0")

self.populate_asic_all(dvs, "123")

Expand All @@ -193,6 +196,7 @@ def test_telemetry_period(self, dvs):
self.verify_value(dvs, self.pgs, WmTables.periodic, SaiWmStats.pg_headroom, "0")
self.verify_value(dvs, self.qs, WmTables.periodic, SaiWmStats.queue_shared, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.buffer_pool, "0")
self.verify_value(dvs, self.buffers, WmTables.periodic, SaiWmStats.hdrm_pool, "0")

finally:
self.clear_flex_counter(dvs)
Expand All @@ -214,6 +218,7 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "192")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "192")

self.populate_asic_all(dvs, "96")

Expand All @@ -222,6 +227,7 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "192")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "192")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "192")

self.populate_asic_all(dvs, "288")

Expand All @@ -230,6 +236,7 @@ def test_lua_plugins(self, dvs):
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_headroom, "288")
self.verify_value(dvs, self.selected_pgs, table_name, SaiWmStats.pg_shared, "288")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.buffer_pool, "288")
self.verify_value(dvs, self.buffers, table_name, SaiWmStats.hdrm_pool, "288")

finally:
self.clear_flex_counter(dvs)
Expand Down

0 comments on commit 599ed41

Please sign in to comment.