From 66aad1d13aa26a6f3f409bbe0170780707268796 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Tue, 10 Apr 2018 13:22:28 +0200 Subject: [PATCH 001/219] feat(influxdb-metrics): First release of a Lua connector --- stream-connectors/README.md | 6 + .../influxdb/influxdb-metrics.lua | 174 ++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100644 stream-connectors/README.md create mode 100644 stream-connectors/influxdb/influxdb-metrics.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md new file mode 100644 index 00000000000..8074e07a9c2 --- /dev/null +++ b/stream-connectors/README.md @@ -0,0 +1,6 @@ +# Centreon Stream Connectors # + +Here are several stream connectors for the +[Centreon Broker](https://github.com/centreon/centreon-broker). + +The goal is to provide useful scripts to the community. diff --git a/stream-connectors/influxdb/influxdb-metrics.lua b/stream-connectors/influxdb/influxdb-metrics.lua new file mode 100644 index 00000000000..9d7a3d79ed1 --- /dev/null +++ b/stream-connectors/influxdb/influxdb-metrics.lua @@ -0,0 +1,174 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker InfluxDB Connector +-- Tested with versions +-- 1.4.3 +-- +-- References: +-- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ +-- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an influxdb server +-- You can install one with docker and these commands: +-- docker pull influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- You need to create a database +-- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" +-- +-- The Lua-socket library is required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" --data-urlencode "q=SELECT * from Cpu" +-------------------------------------------------------------------------------- + + +local http = require("socket.http") +local ltn12 = require("ltn12") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local event_queue = { + __internal_ts_last_flush = nil, + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "http", + events = {}, + influx_database = "mydb", + influx_database = "mydb", + max_buffer_size = 5000, + max_buffer_age = 5 +} + +-- Constructor: event_queue:new +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + if self[i] and i ~= "events" and string.sub(i, 1, 11) ~= "__internal_" then + broker_log:info(1, "event_queue:new: getting parameter " .. i .. " => " .. v) + self[i] = v + else + broker_log:warning(1, "event_queue:new: ignoring parameter " .. i .. " => " .. v) + end + end + self.__internal_ts_last_flush = os.time() + broker_log:info(2, "event_queue:new: setting the internal timestamp to " .. self.__internal_ts_last_flush) + return o +end + +-- Method: event_queue:flush +-- Called when the max number of events or when the max age of buffer is reached +function event_queue:flush() + broker_log:info(2, "event_queue:flush: Concatenating all the events as one string") + -- we concatenate all the events + local http_post_data = "" + local http_result_body = {} + for i, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. raw_event + end + broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "\"") + broker_log:info(3, "event_queue:flush: HTTP POST data are: '" .. http_post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.http_server_protocol.."://"..self.http_server_address..":"..self.http_server_port.."/write?db="..self.influx_database, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(http_post_data) + } + } + -- Handling the return code + if hr_code == 204 then + broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: message line " .. i .. " is \"" .. v .. "\"") + end + end + + -- now that the data has been sent, we empty the events array + self.events = {} + -- and update the timestamp + self.__internal_ts_last_flush = os.time() +end + +-- Méthode event_queue:add +function event_queue:add(e) + local metric = e.name + -- time is a reserved word in influxDB so I rename it + if metric == "time" then + metric = "_"..metric + end + -- retrieve objects names instead of IDs + local host_name = broker_cache:get_hostname(e.host_id) + local service_description = broker_cache:get_service_description(e.host_id, e.service_id) + -- what if we could not get them from cache + if not host_name then + broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") + host_name = e.host_id + end + if not service_description then + broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") + service_description = e.service_id + end + -- we finally append the event to the events table + broker_log:info(3, "event_queue:add: adding \"" .. service_description..",host="..host_name.." "..metric.."="..e.value.." "..e.ctime.."000000000\" to event list.") + self.events[#self.events + 1] = service_description..",host="..host_name.." "..metric.."="..e.value.." "..e.ctime.."000000000\n" + + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "event_queue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + self:flush() + return true + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + broker_log:info(2, "event_queue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + self:flush() + return true + else + return false + end +end +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +-- Fonction init() +function init(conf) + broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector-influxdb.log") + broker_log:info(2, "init: Beginning init() function") + queue = event_queue:new(nil, conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function\n") + return true +end + +-- Fonction filter() +-- return true if you want to handle this type of event (category, element) +-- return false if you want to ignore them +function filter(category, element) + if category == 3 and element == 1 then + return true + end + return false +end From e8dd7093226c6f3d4657817855a25ac21eef5726 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 28 Jun 2018 11:58:47 +0200 Subject: [PATCH 002/219] feat(influxdb-neb): New script to send data to influxdb This script uses neb events and not metrics. --- stream-connectors/README.md | 7 + stream-connectors/influxdb/influxdb-neb.lua | 215 ++++++++++++++++++++ 2 files changed, 222 insertions(+) create mode 100644 stream-connectors/influxdb/influxdb-neb.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 8074e07a9c2..17c12de4c7b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -4,3 +4,10 @@ Here are several stream connectors for the [Centreon Broker](https://github.com/centreon/centreon-broker). The goal is to provide useful scripts to the community. + +# Influxdb + +## Influxdb from metrics events + +## Influxdb from neb events + diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua new file mode 100644 index 00000000000..13ceebe4264 --- /dev/null +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -0,0 +1,215 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker InfluxDB Connector +-- Tested with versions +-- 1.4.3 +-- +-- References: +-- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ +-- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an influxdb server +-- You can install one with docker and these commands: +-- docker pull influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- You need to create a database +-- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" +-- +-- The Lua-socket library is required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" --data-urlencode "q=SELECT * from Cpu" +-------------------------------------------------------------------------------- + + +local http = require("socket.http") +local ltn12 = require("ltn12") + +local function parse_perfdata(perfdata) + retval = {} + for i in string.gmatch(perfdata, "%S+") do + local it = string.gmatch(i, "[^=]+") + local field = it() + local value = it() + if field and value then + for v in string.gmatch(value, "[0-9.]+") do + retval[field] = v + break + end + end + end + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- flush() method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- +function EventQueue:flush() + broker_log:info(2, "EventQueue:flush: Concatenating all the events as one string") + -- we concatenate all the events + local http_post_data = "" + local http_result_body = {} + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. raw_event + end + broker_log:info(2, "EventQueue:flush: HTTP POST request \"" .. self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "\"") + broker_log:info(3, "EventQueue:flush: HTTP POST data are: '" .. http_post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.http_server_protocol.."://"..self.http_server_address..":"..self.http_server_port.."/write?db="..self.influx_database, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(http_post_data) + } + } + -- Handling the return code + if hr_code == 204 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. " is \"" .. v .. "\"") + end + end + + -- now that the data has been sent, we empty the events array + self.events = {} + -- and update the timestamp + self.__internal_ts_last_flush = os.time() +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-- +-------------------------------------------------------------------------------- +function EventQueue:add(e) + broker_log:info(2, "EventQueue:add: " .. broker.json_encode(e)) + local metric = e.name + -- time is a reserved word in influxDB so I rename it + if metric == "time" then + metric = "_"..metric + end + -- retrieve objects names instead of IDs + local host_name = broker_cache:get_hostname(e.host_id) + local service_description = broker_cache:get_service_description(e.host_id, e.service_id) + -- what if we could not get them from cache + if not host_name then + broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") + host_name = e.host_id + end + if not service_description then + broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") + service_description = e.service_id + end + -- we finally append the event to the events table + local perfdata = parse_perfdata(e.perfdata) + if not next(perfdata) then + broker_log:info(3, "EventQueue:add: No metric") + return true + end + + -- [,=...] =[,=...] [unix-nano-timestamp] + local mess = self.measurement .. ",host=" .. host_name .. ",service=" .. service_description + local sep = " " + for m,v in pairs(perfdata) do + mess = mess .. sep .. m .. "=" .. v + sep = "," + end + mess = mess .. " " .. e.last_check .. "000000000\n" + self.events[#self.events + 1] = mess + broker_log:info(3, "EventQueue:add: adding " .. mess) + + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + self:flush() + return true + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + self:flush() + return true + else + return false + end +end + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- +function EventQueue.new(conf) + local retval = { + measurement = "centreon", + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "http", + influx_database = "mydb", + max_buffer_size = 5000, + max_buffer_age = 5 + } + for i,v in pairs(conf) do + broker_log:warning(1, "Conf parameter " .. i .. " => " .. v) + if retval[i] then + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + retval[i] = v + else + broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue +-- Fonction init() +function init(conf) + broker_log:set_parameters(3, "/var/log/centreon-broker/stream-connector-influxdb-neb.log") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function") + return true +end + +-- Fonction filter() +-- return true if you want to handle this type of event (category, element) ; here category NEB and element Service Status +-- return false otherwise +function filter(category, element) + return category == 1 and element == 24 +end From abb9e3cfeec6b82ba3373a87eee17580b4945d68 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 28 Jun 2018 16:32:42 +0200 Subject: [PATCH 003/219] doc(influxdb): README.md completed to explain influxdn streamconnectors --- stream-connectors/README.md | 32 +++++++++++++++++++ .../influxdb/influxdb-metrics.lua | 1 - 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 17c12de4c7b..3fcddbafab6 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -9,5 +9,37 @@ The goal is to provide useful scripts to the community. ## Influxdb from metrics events +This stream connector works with **metric events**. So you need them to be configured in Centreon broker. + +To use this script, one need to install the lua-socket library. + +Parameters to specify in the stream connector configuration are: + +* http\_server\_address as **string**: it is the *ip address* of the Influxdb server +* http\_server\_port as **number**: it is the port, if not provided, this value is *8086* +* http\_server\_protocol as **string**: by default, this value is *http* +* influx\_database as **string**: The database name, *mydb* is the default value +* max\_buffer\_size as **number**: The number of events to stock before them to be sent to influxdb +* max\_buffer\_age as **number**: The delay in seconds to wait before the next flush. + +if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. + ## Influxdb from neb events +This stream connector is an alternative to the previous one, but works with **neb service\_status events**. +As those events are always available on a Centreon platform, this script should work more often. + +To use this script, one need to install the lua-socket library. + +Parameters to specify in the stream connector configuration are: + +* measurement as **string**: it is the influxdb *measurement* +* http\_server\_address as **string**: it is the *ip address* of the Influxdb server +* http\_server\_port as **number**: it is the port, if not provided, this value is *8086* +* http\_server\_protocol as **string**: by default, this value is *http* +* influx\_database as **string**: The database name, *mydb* is the default value +* max\_buffer\_size as **number**: The number of events to stock before them to be sent to influxdb +* max\_buffer\_age as **number**: The delay in seconds to wait before the next flush. + +if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. + diff --git a/stream-connectors/influxdb/influxdb-metrics.lua b/stream-connectors/influxdb/influxdb-metrics.lua index 9d7a3d79ed1..ccc86c6dae2 100644 --- a/stream-connectors/influxdb/influxdb-metrics.lua +++ b/stream-connectors/influxdb/influxdb-metrics.lua @@ -41,7 +41,6 @@ local event_queue = { http_server_protocol = "http", events = {}, influx_database = "mydb", - influx_database = "mydb", max_buffer_size = 5000, max_buffer_age = 5 } From bcf2ff0ad008ec34c89a0d2951ac97a99ae4f92d Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 28 Jun 2018 16:38:11 +0200 Subject: [PATCH 004/219] Update README.md fix(doc): precisions on script names. --- stream-connectors/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 3fcddbafab6..ed4ac1bfe4f 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -7,7 +7,7 @@ The goal is to provide useful scripts to the community. # Influxdb -## Influxdb from metrics events +## Influxdb from metrics events: *influxdb/influxdb-metrics.lua* This stream connector works with **metric events**. So you need them to be configured in Centreon broker. @@ -24,7 +24,7 @@ Parameters to specify in the stream connector configuration are: if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. -## Influxdb from neb events +## Influxdb from neb events: *influxdb/influxdb-neb.lua* This stream connector is an alternative to the previous one, but works with **neb service\_status events**. As those events are always available on a Centreon platform, this script should work more often. From 22e6ba4c50692c51429e03485dbbf16cf226663e Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 29 Jun 2018 14:03:25 +0200 Subject: [PATCH 005/219] doc(elastic): New doc about an elasticsearch stream connector This version works with metrics events. --- stream-connectors/README.md | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index ed4ac1bfe4f..21f72dafaa1 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -3,7 +3,33 @@ Here are several stream connectors for the [Centreon Broker](https://github.com/centreon/centreon-broker). -The goal is to provide useful scripts to the community. +# Stream connectors + +The goal is to provide useful scripts to the community to extend the open source solution Centreon. + +You can find Lua scripts written to export Centreon data to several outputs. + +If one script is the good one for you, it is recommended to copy it on the Centreon central server +into the **/usr/share/centreon-broker/lua** directory. If it does not exist, you can create it. This +directory must be readable by the *centreon-broker* user. + +When the script is copied, you have to configure it through the centreon web interface. + +Stream connector documentation are provided here: +* https://documentation.centreon.com/docs/centreon/en/2.8.x/developer/writestreamconnector.html +* https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream\_connectors.html + +# Elasticsearch + +## Elasticsearch from metrics events + +This stream connector works with **metric events**. So you need them to be configured in Centreon broker. + +Parameters to specify in the stream connector configuration are: + +* elastic-address as **string**: it is the *ip address* of the Elasticsearch server +* elastic-port as **number**: it is the port, if not provided, this value is *9200*. +* max-row as **number**: it is the max number of events before sending them to the elastic server. If not specified, its value is 100 # Influxdb From cafcfd378f6d3a45c7da082a1a33380f670b7498 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 29 Jun 2018 14:18:20 +0200 Subject: [PATCH 006/219] feat(elasticsearch): A new stream connector working with elasticsearch --- .../elasticsearch/elastic-metrics.lua | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 stream-connectors/elasticsearch/elastic-metrics.lua diff --git a/stream-connectors/elasticsearch/elastic-metrics.lua b/stream-connectors/elasticsearch/elastic-metrics.lua new file mode 100644 index 00000000000..2cd3bec4b21 --- /dev/null +++ b/stream-connectors/elasticsearch/elastic-metrics.lua @@ -0,0 +1,192 @@ +local elastic = { + rows = {} +} + +-------------------------------------------------------------------------------- +-- Check if the desired index exists on the elasticsearch server +-- @param socket the socket connected to the elasticsearch server +-- +-- @return a boolean true on success, false otherwise. +-------------------------------------------------------------------------------- +local function check_index(socket) + -- Ask for the index + socket:write('GET /centreon/_mapping?pretty HTTP/1.1\r\nHost: ' + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\n\r\n') + local answer = socket:read() + if string.match(answer, "HTTP/1.1 200 OK") then + return true + end + return false +end + +-------------------------------------------------------------------------------- +-- Initializes the mapping on the elasticsearcg server +-- @param socket the socket connected to the elasticsearch server +-- +-- @return true on success, false otherwise +-------------------------------------------------------------------------------- +local function init_index(socket) + broker_log:info(1, "init_index") + -- Initialize the index + local header = 'PUT /centreon?pretty HTTP/1.1\r\nHost: ' + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\nContent-Type: application/json\r\n' + local content = [[{ + "mappings": { + "metrics": { + "_all": { "enabled": false }, + "properties": { + "host": { "type": "keyword" }, + "metric": { "type": "keyword" }, + "value": { "type": "double" }, + "timestamp": { "type": "date" } + } + } + } + } +]] + + header = header .. 'Content-Length: ' + .. content:len() .. "\r\n\r\n" .. content + socket:write(header) + local answer = socket:read() + if answer:match("HTTP/1.1 200 OK") then + broker_log:info(1, "Index constructed") + return true + else + broker_log:info(1, "Index construction failed") + return false + end +end + +-------------------------------------------------------------------------------- +-- Initialization of the module +-- @param conf A table containing data entered by the user through the GUI +-------------------------------------------------------------------------------- +function init(conf) + if conf['log-file'] then + elastic.log_file = conf['log-file'] + else + elastic.log_file = '/tmp/elastic-centreon.log' + broker_log:info(2, "no 'log-file' value given, '/tmp/elastic-centreon.log' set by default") + end + broker_log:set_parameters(3, elastic.log_file) + + if conf['elastic-address'] and conf['elastic-address'] ~= "" then + elastic.address = conf['elastic-address'] + else + error("Unable to find the 'elastic-address' value of type 'string'") + end + + if conf['elastic-port'] and conf['elastic-port'] ~= "" then + elastic.port = conf['elastic-port'] + else + elastic.port = 9200 + broker_log:info(2, "no 'elastic-port' value given, 9200 set by default") + end + + if conf['max-row'] then + elastic.max_row = conf['max-row'] + else + elastic.max_row = 10 + broker_log:info(2, "no 'max-row' value given, 10 set by default") + end + + elastic.socket = broker_tcp_socket.new() + elastic.socket:connect(elastic.address, elastic.port) + + if not check_index(elastic.socket) then + broker_log:info(3, "Index missing") + if init_index(elastic.socket) then + broker_log:info(1, "Index constructed") + else + broker_log:error(1, "Index construction failed") + error("Index construction failed") + end + end + elastic.socket:close() +end + +-------------------------------------------------------------------------------- +-- Called when the data limit count is reached. +-------------------------------------------------------------------------------- +local function flush() + broker_log:info(2, "flush called with " .. #elastic.rows .. " data") + local retval = true + if #elastic.rows > 0 then + elastic.socket:connect(elastic.address, elastic.port) + local header = "POST /centreon/metrics/_bulk HTTP/1.1\r\nHost: " + .. elastic.address .. ":" .. elastic.port .. "\r\n" + .. "Accept: */*\r\n" + .. "Content-Type: application/json\r\n" + + local data = '' + for k,v in pairs(elastic.rows) do + data = data .. '{"index":{}}\n' .. broker.json_encode(v) .. '\n' + end + + header = header .. 'Content-Length: ' + .. data:len() .. "\r\n\r\n" .. data + broker_log:info(3, 'Data sent: ' .. header) + elastic.socket:write(header) + local answer = elastic.socket:read() + local ret + if answer:match("HTTP/1.1 200 OK") then + broker_log:info(2, "flush: " .. #elastic.rows .. " data successfully sent") + ret = true + else + broker_log:error(1, "Unable to write data on the server") + ret = false + end + if ret then + elastic.rows = {} + elastic.socket:close() + else + retval = false + end + end + return retval +end + +-------------------------------------------------------------------------------- +-- Function attached to the write event. +-------------------------------------------------------------------------------- +function write(d) + + local hostname = broker_cache:get_hostname(d.host_id) + if not hostname then + broker_log:error(1, "host name for id " .. d.host_id .. " unknown") + else + broker_log:info(3, tostring(d.ctime) + .. ' --- ' .. hostname .. ' ; ' + .. d.name .. ' ; ' .. tostring(d.value)) + + elastic.rows[#elastic.rows + 1] = { + timestamp = d.ctime * 1000, + host = hostname, + metric = d.name, + value = d.value + } + end + + if #elastic.rows >= elastic.max_row then + return flush() + end + return false +end + +-------------------------------------------------------------------------------- +-- The filter function. When it returns false, the write function is not +-- called. +-- @param category The event category +-- @param element The event sub-category. +-- +-- @return a boolean true when the event is accepted, false otherwise. +-------------------------------------------------------------------------------- +function filter(category, element) + if category == 3 and element == 1 then + return true + end + return false +end From 1cbaa6e0229c0b35779975c772ec41afa0d3d50a Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 29 Jun 2018 14:20:09 +0200 Subject: [PATCH 007/219] doc(elastic): Documentation completed --- stream-connectors/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 21f72dafaa1..a0ec347c1ed 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -27,6 +27,7 @@ This stream connector works with **metric events**. So you need them to be confi Parameters to specify in the stream connector configuration are: +* log-file as **string**: it is the *complete file name* of this script logs. * elastic-address as **string**: it is the *ip address* of the Elasticsearch server * elastic-port as **number**: it is the port, if not provided, this value is *9200*. * max-row as **number**: it is the max number of events before sending them to the elastic server. If not specified, its value is 100 From 03818abfc11fae9468434656192bd954e970c936 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 29 Jun 2018 14:22:39 +0200 Subject: [PATCH 008/219] Update README.md --- stream-connectors/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index a0ec347c1ed..61eeea8e14d 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -21,7 +21,7 @@ Stream connector documentation are provided here: # Elasticsearch -## Elasticsearch from metrics events +## Elasticsearch from metrics events: *elasticsearch/elastic-metrics.lua* This stream connector works with **metric events**. So you need them to be configured in Centreon broker. From a43ff7c5232a1932a9004f0ef3519eebe7ecf15c Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 29 Jun 2018 14:23:00 +0200 Subject: [PATCH 009/219] Update README.md --- stream-connectors/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 61eeea8e14d..828ba7ee7a7 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -17,7 +17,7 @@ When the script is copied, you have to configure it through the centreon web int Stream connector documentation are provided here: * https://documentation.centreon.com/docs/centreon/en/2.8.x/developer/writestreamconnector.html -* https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream\_connectors.html +* https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html # Elasticsearch From 077964a4e26b50df7a9a47b5ef2e788dcf988408 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 9 Nov 2018 16:43:59 +0100 Subject: [PATCH 010/219] feat(warp10): New stream connector to export data to warp10 This stream connector will only work with centreon-broker-18.10.1 that contains the new parse_perfdata function. --- stream-connectors/warp10/export-warp10.lua | 99 ++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 stream-connectors/warp10/export-warp10.lua diff --git a/stream-connectors/warp10/export-warp10.lua b/stream-connectors/warp10/export-warp10.lua new file mode 100644 index 00000000000..a7da343c3dd --- /dev/null +++ b/stream-connectors/warp10/export-warp10.lua @@ -0,0 +1,99 @@ +-- +-- Copyright 2018 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work this script to provide a Broker stream connector output configuration +-- with the following informations: +-- ipaddr (string): the ip address of the Warp10 server +-- logfile (string): the log file +-- port (number): the Warp10 server port +-- token (string): the Warp10 write token +-- max_size (number): how many queries to store before sending them to the server. +-- +local curl = require "cURL" + +local my_data = { + ipaddr = "172.17.0.1", + logfile = "/tmp/test-warp10.log", + port = 8080, + token = "", + max_size = 10, + data = {} +} + +function init(conf) + if conf.logfile then + my_data.logfile = conf.logfile + end + broker_log:set_parameters(3, my_data.logfile) + if conf.ipaddr then + my_data.ipaddr = conf.ipaddr + end + if conf.port then + my_data.port = conf.port + end + if not conf.token then + broker_log:error(0, "You must provide a token to write into Warp10") + end + my_data.token = conf.token + if conf.max_size then + my_data.max_size = conf.max_size + end +end + +local function flush() + local buf = table.concat(my_data.data, "\n") + local c = curl.easy{ + url = "http://" .. my_data.ipaddr .. ":" .. my_data.port .. "/api/v0/update", + post = true, + httpheader = { + "Transfer-Encoding:chunked", + "X-Warp10-Token:" .. my_data.token, + }, + postfields = buf } + + c:perform() + my_data.data = {} + return true +end + +function write(d) + -- Service status + if d.category == 1 and d.element == 24 then + local pd = broker.parse_perfdata(d.perfdata) + local host = broker_cache:get_hostname(d.host_id) + local service = broker_cache:get_service_description(d.host_id, d.service_id) + if not host or not service then + broker_log:error(0, "You should restart engine to fill the cache") + return true + end + for metric,v in pairs(pd) do + local line = tostring(d.last_update) .. "000000// " + .. host .. ":" .. service .. ":" .. metric + .. " {" .. "host=" .. host + .. ", service=" .. service + .. ", metric=" .. metric .. "} " + .. tostring(v) + table.insert(my_data.data, line) + broker_log:info(0, "New line added to data: '" .. line .. "'") + end + if #my_data.data > my_data.max_size then + broker_log:info(0, "Flushing data") + return flush() + end + end + return false +end From 050bb11ed1300686f43bae4a21b38c6be8a1c282 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 9 Nov 2018 16:53:03 +0100 Subject: [PATCH 011/219] doc(README.md): Documentation updated. --- stream-connectors/README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 828ba7ee7a7..88a8787f17e 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -70,3 +70,21 @@ Parameters to specify in the stream connector configuration are: if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. +# Warp10 + +## Warp10 from neb events: *warp10/export-warp10.lua* + +This stream connector works with **neb service\_status events**. + +This stream connector need at least centreon-broker-18.10.1. + +To use this script, one need to install the lua-curl library. + +Parameters to specify in the stream connector configuration are: + +* ipaddr as **string**: the ip address of the Warp10 server +* logfile as **string**: the log file +* port as **number**: the Warp10 server port +* token as **string**: the Warp10 write token +* max\_size as **number**: how many queries to store before sending them to the Warp10 server. + From a74b87e544aa1a951d3e9dcca5f9a8de490d867c Mon Sep 17 00:00:00 2001 From: quanghungb Date: Thu, 6 Dec 2018 23:21:11 +0100 Subject: [PATCH 012/219] update the doc link change to lastest instead of 2.8.X --- stream-connectors/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 88a8787f17e..eb741671608 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -16,7 +16,7 @@ directory must be readable by the *centreon-broker* user. When the script is copied, you have to configure it through the centreon web interface. Stream connector documentation are provided here: -* https://documentation.centreon.com/docs/centreon/en/2.8.x/developer/writestreamconnector.html +* https://documentation.centreon.com/docs/centreon/en/latest/developer/writestreamconnector.html * https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html # Elasticsearch From c7a53556be867cafd689d7353d47cb9bc76f9beb Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 7 Dec 2018 10:36:38 +0100 Subject: [PATCH 013/219] feat(splunk): New scripts to export to splunk --- stream-connectors/README.md | 33 +++++ stream-connectors/pictures/splunk-conf1.png | Bin 0 -> 48092 bytes stream-connectors/pictures/splunk-conf2.png | Bin 0 -> 54000 bytes stream-connectors/pictures/splunk.png | Bin 0 -> 14871 bytes .../splunk/splunk-metrics-http.lua | 130 +++++++++++++++++ .../splunk/splunk-states-http.lua | 137 ++++++++++++++++++ 6 files changed, 300 insertions(+) create mode 100644 stream-connectors/pictures/splunk-conf1.png create mode 100644 stream-connectors/pictures/splunk-conf2.png create mode 100644 stream-connectors/pictures/splunk.png create mode 100644 stream-connectors/splunk/splunk-metrics-http.lua create mode 100644 stream-connectors/splunk/splunk-states-http.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index eb741671608..6ef91727e11 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -88,3 +88,36 @@ Parameters to specify in the stream connector configuration are: * token as **string**: the Warp10 write token * max\_size as **number**: how many queries to store before sending them to the Warp10 server. +# Splunk + +## The proposed stream connector here + +There are two Lua scripts proposed here: +1. *splunk-states-http.lua* that sends states to Splunk. +2. *splunk-metrics-http.lua* that sends metrics to Splunk. + +In the first case, follow the instructions below: + +* Copy them into the */usr/share/centreon-broker/lua/* +* Add a new broker output of type *stream connector* +* Fill it as shown below + +![alt text](pictures/centreon-conf1.png "stream connector configuration") + +In the second case, follow those instructions: + +* Copy them into the */usr/share/centreon-broker/lua/* +* Add a new broker output of type *stream connector* +* Fill it as shown below + +![alt text](pictures/centreon-conf2.png "stream connector configuration") + +## The Splunk configuration + +An HTTP events collector has be configured in data entries. + +![alt text](pictures/splunk.png "Splunk configuration") + +## Centreon4Splunk: A good alternative + +Here is the [link to centreon4Splunk](https://github.com/lkco/centreon4Splunk). diff --git a/stream-connectors/pictures/splunk-conf1.png b/stream-connectors/pictures/splunk-conf1.png new file mode 100644 index 0000000000000000000000000000000000000000..00b532cdfafcce71376b252b6cca14943bfedad8 GIT binary patch literal 48092 zcmce-WmH_v)-~EBAq01~&}eW87BtYf1-Agf-Q9@*jk~+MyA#~q-QC@}P4b*G-uFA- z8TbCWHK2P_z1e$J?W(ornrpV-7ikehI6OE20Dvea3X%f=-gE*0ud?60g7mbxHQzw` zg?Bui^-&@&O8W@}DzqfO?(toe-WNZKcIL&Rw&6bgJ6$?EZ3njGTW^xjc&iyp> zU|^IV!&Rt}p4@MV7LcQeSn<-<{mH!l{M0nq;v~ed)Iml*ok&-rqI($>pi?q^xo^h3 zyv;gRH-0a3n61A*9F_Co1iu)@S)-+RG+FZ%+VRy>-C|EIy;~hjJ@G>Od@9TS(*>p5;eim@n=VEahab(uu`fE6!k!_jq44ZKAklx+c6CoGxt`>M*dyzssVN&yW zbC=rCWH{@p(O}Ayk!N-!#iQUXy)=v||Tb+O+Aq-FIB+i|6G!4*A}^IYXL{acLOx>jLQbM_an10rg?Gc1(lh z;N&HyShb7!`9{-+glpT>rb+uGd3_xhN8FhLnF@reM2~U!pSAA$9|<3mYn68)Aj3Fl=kL}1c^|DT%VYn z7;6=Q&A^9kT2-ceu}~_paxg~Tm^f8gW_UoPoQd(|G~icOd_k<5Nl_7#vF?tKU51G1 z;`NBG43Fb(*7VZbBbGH$Qe<5HiP7v7ZL}vA8K#PXPxChL%4*hGQ89((dnWi=^V;1> z4ol_z#SJ_~4(gW`lMC4WkC0b$)+!b5#|?efcp)M|4)2IewssxfPqg$VPy}-YgJ#xD zE>($h14=Sq$U-K+%B@T0V733S=1i$}ojcO6^Q=q|I}JJ5^3>4_iJ4U6DG*=2$ji}(f91uh2T-hv7Fh1YrR2M93(h~XUQ1jJI4C)z$86~Xl zng4u-z^UBsmMW{VFNY0F(hZub=V zLWqk=3ZN^5{qdQq6Pi$e>#$1L!=6sHEtv}+sMD093XRKRf6aC>mB6OB0Zf0j&){yfK(=^M4@*1q_V z71g{oWSa$)cV|gG1e%v%$nJN@+IF5*ZQ_vOa>d5qV*g&@u@<%=ryPogPpQ|n(eWJl zCM`iKYS89ld!>4?lJ;$3s%x(a4c>lRnD@v&NJwkamuB4cp=Xs*7ya>jVGNk`p@9r} zIJWy#cM?lL_%WG8aIe9a`@qK}O&7p${q=(VW9U#VqFPYB>;ri{Zbc?l^DWNyQ3ZR* zG98W>!J30q9y+G{=gLw5fbL*Ja-g#+zMQ5tf!vGh9?2nn-ldV-dA^B1i9Hp2FP;WH ztB`44?(WyzfdRgg-CN%`hW1CF8)0hBawEJr=i}?_r^l0}*;*a1k7ZGYi@&dCq`edL zDRpX1>>&ER@jHbiHPHl40!DOyhc_s8uaxt)c|G6Q%ZBZz6nWp!g9^)+FlgaXw-x7C z4OYQPadsiHruC zs*)Osj1{#nvuA7OX?!ySCR+m)8)m7rZ#AOZBja4)ptRwOF@S(j&{Vh#p*74t7>m~M zseJqbYww@0pmWLab)pr@pwt&s99viYUabhcc8zp@Yhr}|>H`1Ng>^kh-lc9r-it(hPtmr*pI~{p2^h|M|M~rd!Y^C{q#2niK6%Akdzo{ZqNfS9 zkW&OJc&F8y{$j??qBt_QWoR=DtY5AslR}5WCrWbKNrWwlpwQha6dqPe=G8Bb2c=SI zq)9*Fm}%@;I|(z%j5H@XNI$&&qVuD_GiYxsR@gYu5{E6io=~htMr>2%8+#Q4Q0{taroPrj*xLqc z_`5?_+8|L~*ZyCrs|pgLYwM=(rVAY2r7CJ-;2i#7lw-rgwyEy6GMi4pP}0Nob0)W1 zdPOoaBr^Pffpssm(e&%qHtZU0YglMWBDQ6*PZ{sGp*8k+ghN!E{*(%4|ZTJYJ$CvD2_`6Kivn6^gLD z$6o6QeG}k**9Ber=~%*{%ll1*#EA!dS$gu1nAXNE>b*(9Vo{ejXdq;K5PSv7BF_g- z9|WdFiqI@phNC&LqnRC*g0E^ep}756Yv;SR{+aRqy&6rkcwep}JX#J7JZ6q-76*u> z?vWPX|% z@4naqvx0T>2B-PJXLM_1O=P0{ZOyXz-$8iRZ$D&Xe2r zF=F=*OL>$UkZej5xt^7wQ#T5=Bp>|fyT%CGi#NXrSU#A2pkKX3__kIVagf^Mk#9th z@1QcKIbl*OfB9Jy4Jud?6_4uHHWjW)!C1}dJJk5<%~WR58uo9QTa*W^v=lTuDskG{ z6fB3^NXNRqz6oOWY3(Q}9*YY-g?&S+3gLD|%4$3OnA(hw&vxT<8;^rMTL|WSh)>F- z3Cj$3hNf&SS{K7pW*!wrxX!=aw3%2je!1+{$z}B53T!Fx!*n(Zn9p8+7xA7}p_sdd z6=|)wMDbiU#8ah#ol&P>XR+903jV$9jwP;|HZ!`O1JYpqLA*z%W=bLB7-9!H31+~-iP*sR6cPn)zNhp`4QIkngx3|f@0=ar<7`Hp8k z)_glYam)`^5(=?inZfNk2XZ6Vk7uAooD&ime#h30`%RS+$8J~`jB|Lbx8#qr$w6s8 z^69f*p*I%!1QNzrJWIT?e#FQ3C=-PBhy)3=D@+VCo@WxHHavskOL?^IVd;fWoX{33 z%qqZD1Xm;JiFBUdomyd^bVqec#AusZLmHJ?j1$qWmgEOJ+Qhvg)+M}+=1c-oJ1eSN zEpjKo6=mrR#r6;LQM<4Lj!ttKb;M#G1Df`7~k_}8GJ zaX)FU)xoGhd*05&VTQtqOV{aWK;h$zPBZ&szPH}$7Y!FHJQxKo9xjspf?Ld?ZLeXk z>smY{7tRFJ?%8+jV**sRNg0A)2pNmY-e{bPiiI@CjmOWa+A4P0s00EQc>ROCd?kuR zaNJT9W#OuG6dmZse^;%LjG)DW&3J&h!WqsW^~|+qZC3xTfTR_1GBCWbSFjV~_g~D{ z>6!M(D;aN8pbwav%LDHDkI3QdAG}*j?h^PgZ?wLm%32zpDn9GM#==FykL>4p2J-9P z4N7t!2tV79FpDCy>j}GHKF;7uK}KMu_T0d^9e3;^pk}DQ^$h4~f7kcA@8~7fg0&J= zwFLm^Nnd`UoU-^FAe}IFVp2jdYX~r~1h0Cg+bjWq_W&`_X9cIZgGJA;@|RCAC)aQ1 zza!2}utXFEc)29l*}Jj+jy!?cP|Ws@)JnC`Sq^Y$(!(`^jZP(Zkslx{|8;~(7i9`- zoSJ3#_&wT*W+b6l0c4VDMvZS~!}e{N&!;bhWJmF}GtcwN>d0oss>0e=qajI{xb? z{MTD<3Egq5TKbl^d%u>bQ9@g7K~4iKpOas83n5Yl@4-MLq1opxU3p)MR7`CvszB?J zI|v*sh-v;vnEp0Nycizk*v9{=V3PJE5ra#aN7PQjeP`$>0UheaBrl@l>M6=1%=zW7 z${oDtUH8W=_eTu{*iww!O?5}PX&D=FDbG!5{Y+I-+64yv(f+Bw^!uafTuB$=W85n% z;t`e&&GX>YDqTALF$ zke<)aTA*6<&2_||T^QVx|8ltU16HKw;R3!$tGi43QyH^5#*RK)ETZDpXC~NTg#4Y- zlZ6-xr#XCdu{TOhv*Ki>@kOuvKqIQ#0|qo6f4HPtE2wy1?HX^v+eM|Bj9l0Wz~=klQ=M_MEt1)gR&&@P8v87PHl*EH#{Fm;u6` zF)QF+D84N*#OkD?nX|y9V6Hg~NtGH*&rqH2HUvfeq$p0o3vgunvlt$(oX?caH-Ex8 z_tW|K!+oFr6&)tD!6~3amCsx-zZvWCndQe>fzgo!y2>CdbF4^5<3~w7o`k5_%M#fB zxArPC=W`U_rvvNSm)y=bk5#`faZ@?WmJS_V=)KmZz{sL|Fntni_X)yb(&>0d-oLcd zK3g$G{a8BL|B761vT*nl{!Lf;d)PQuRf%$cA3FTB8l?C#qa#LGMROU+{t}$iKz@FM zeX!vc^=?OfOEDjl@#O;UtHC?yc^RBnKjU9#-GUsZW?R{nB)<*D)9ts6z3D{yl#t}k zW;Nx4o?kCYl&lb&23YP-1}wPk z0^od9{PB4e4T$z3MeGZjY*e;*DuUr*67KK4ki8;JvTspWwlSh0gNi6J^O~LE>cbGt zK!)Un-8umB_lJ z9D<9C;D!c~$p(k&d1~n+nlx`$Q9lnx-6|@5H@rz_r*K}%oi)P(gAP(r(ccEerXs1u z#JX4J>nCG?i&<%RusN=SV)2KsN1si5qp2^;*&)yRW;&dllHkqSJdU>Mxh&my_2g=F z^<&yc&W%3$(+*Z+^B>`uR^;@@Z?)RqTaW^nT(p5=M~#y+T#JGys+> zCFL^K4Du+749ENRd_8UYPS^!!p*ADELC%y|bAXCdD!&{ptZJ>`poF?msY$Wb#UEU#C=!rAph582 zZ)`k&bjPp9#i2i8KNt^8=wJ>XH|$|~LeS+vzEyQBb04pc(VRr_$9BNc`)zf^2oUI% z%w_tU&kk4NEya}g%kXiK#rzHy6}FK=3av=JMsB@VoU9NG7G*XIWi{VSxEQlTD-B0i zr8&QJ`T5?+0)+;}PK85z6yg3;k{-xx!0(aBhv z{QODFxpm0T^J`?0!yUmYEgrpSP^v}A=v&e#PSUX(b7R%{kCw*99#3b{-Nbl-lRi57{CX1}tFn1#?D?H(jJc@Cy(;^nr@bOwSFy#|KSky_W|B}{k-gj!92S{UistK z0RW@k-*^8+9sZmfoPXjDfAX8Zv$pY1!1|94z`s7Jr5mcMx^ev{%vtq2T?VlQ{&SfH z>VHSMo`}B}^x}s7d$1VC;sXGpRR>&(~kxb0kA zTpSz>0-LeTcLreh_xCq7HgezfGQI!_Nb89}dXRI-&dW0%N&QSuPtTSR1a{)qTyAzB zG^miyX>hsRdyVX3-%yb^O99TU@!)|!{~OA!YB-;sGK~b3!2b9O=#T+Xjcjwewqnv{J1W4@^wVq$&E) zo{JO%Pi0$VX^@F4N#E5W+tVm_GjqP|?vFfZuLhwQe9XizeT!=(X<{`t@068Pa+?O5 zUhZ)X{&}y#pjBw&=xB)3<&jV@Xpq2W9zSRxUod%ldwUf`L_`ExTL$fzVVT2H@=LMX zisZzWkFuQC+NYO6SmvvDRa(iEP;tg9EazH8G@Ny8yb|1V!Qf$HO|U|HsgV;{9LM;zVayS&Rv$CXM9G_l#hGGN0Xw}MQd9AXA3l@7 zZB1RjET{(`=kn@mC0eZc2(plFh;4YNr%v2J6BCp2nAKCK;^Jb8h`b1Hv5nh}gD}g+ z6!oX8l#OY&IQuD*VTz^$26A(0w>|95jAYDrJ$ksDwohNEowtWgWq#>*@Q`v`T6~o@ z9u%GJLNx56cs6g0l+2lGsbBSaKI^`B#L!HLcKenfDdlQaK?7Kz*U;{3#1yJRuueQa zI6y=X$Vhj*&lfFV-@BCjV&kIY)g9C}Hvs0m+vGy?Wd_Yjp)%?BpUeB;*=D4}M^X24 zHp!M$<)ZOPA{w6pSj>M1*s|Sb8*~@M2G6^1*U3x34;lMskj|_Il*>nE(QMJtjiD<` z^wnNe`psd|UFTmwOQ19RUlK4ihvgL*7SvAVpyag{LZ+y-sXqdw*D^ktOmdhP-o1Xf zP&b}TcPJ4lEiD}s6qHo+{^{`|ZmFb%A-1-o#5q8eepJ8M-MxWd)Fr96Uy|(o_XBMi zk}x6asJ=!$9bkYcb#*+g%b~Hm^KJh0nICunt01*Y;E}N8OFfl&gdg~T4+>yGDUGF% zahtuFlaB)o2HWZ7(dv$$%lk4f-6@Vw=&!`(0NZ zj}VK;M|4tHn&DH-SM^(!!$~&;b1&GJe+KaYc;<|!^h*J z&Swr_jVGvtt8_1-@1$l4k0vX=Z5oTn($gz-65e6pVME1|WD5>(#H-3*NNuWVC*X82 zu-&G>3`8r>f3M{O##s)PZOfG7$@|nm1gokKene5~v=OShQZW3`A6vmsT&FA2k2w@P zZ;s1`Z0=%s&eZfpG-^)Gc|Xu-0)t@MM0v>nJ#dXSQT!OyIU@gMvmKGa46maGbXHYW zO=DB;&Y2fqF&Z=IR?H3cSitf`hMom^i{FV*ts0r{B>JCq>~Yj%KeFl!ehBUv(+L~4iJ(F0u6pM}$7g1` zWX80&C@74}u1~=1khh3$%)2!AzK^m6i!$ z+*~8l<7&IHy37CY6d;1>Js@vCpc~oq1%O#f?t*+fDvq<;(_LWJ)0vsbod%}Dz zdj=*ZP;|?>L_v4D&RN1hW~^j-)~L=>$PMflD*{X^F)gF>%TUAwj}PqoW^QhOs~x+`{)E zrk_Eho($#PF9B_NdGwHO8Y-$U1(R3gef9PATAq)S-Q77A2%N^Z%N7sRtS51SgTph= zJiC(@V$;xuxNp5)*pj3I|S9RKyT5XMA*M!1b@AnK4xf2~I~JFrtJe zS%v_~e!F>mhIIafx*JeAr{-=egOQMK_!vf{N`r8(HJ`F)K)V^H|(bRUx}FHOIQSmp8W%C|wa~o4?-%Bt`14g)dvn{ze2-{xrI|DW61$@qp491bnEKNkPJ z-M`_@zbyTa4uFPpGmT?ieRcKJlVzU59__BL?BATmD`8WtqIfSx%fs5y(b3kH6qqpR zaJtr6u(_$JsoBQM97y>0CTE$i>(5xEq@;enMYOTDE}Jz2b*!J>T^*XTCD?yTnx8B{ ztDCl*J&iYC%h?*l>?m-S8BpCI7-aS_kPJL~IWT_5$Gib`u~ zh4E4Ip-yLMWFb`|f!H@*Tt#LVh>u2tq+>C(1F0~|dZA8~pg zg~`{l?u|ssUuk6p`D!8r_l=_A1}JyrPMX*omELtGGT(>mcK9b^%KrYI2HW{HpdFK) zdIEDGK-0y1S2!U8B?}1=2`NZb-pp(<{Rem)i%jSZ45Yms?wv%-p%e9&^Y!uZ$=}wC z#ybp9W5^~O@bmMN^A#q;+ymSBD$&w$C|7>`1fLhlqrTh!iCLp|pgD zGq<|QfyZ$|X+GYPK;tC+lZjMb2EDyLLRB2<9_C<0+9Jj%H_KsmhC9W=N6L>J914U? zlw~5>c)@zK9CSb+5V^%rEBFlSOO-D@i*B#OcQ5)~sTLM= zSj$N49$_9ba^QCmMYBn{gGxz2`DzFVbd>@L%FNQyTNodJ>w3-6q9L(^c=U>$I1=o{ z*Ku)$@6)|*5O4hu!W`s5lF=00iqcq8(w}dpnnZk=i;yU7F#2fkY};mC&lM4r>jii$ z^?Q~SFvr(W%!eAI?q-T$+A#)U>8S;ATI{vFV3gz2rTgG|J%eeZ@F;lrT&dcV)Y3Fd z>uAZI$~u%48Y*;Z4EVZGc5T!zMG_)LvVD5eooRx~QV0s#iuGJ}DGo%l5O;^`KFBI52{`qV$C& z=@ijWjnC)ozWvPbYhHrUun-QjRP)I9Cs_lFibVG@v+?lRznFx6+SWJLl$SrWq(F#d z*&k+%OBtZX+Gj3*j3NFL2qna))ja}x~{G5F=CrG3}_bo>0BpP9nq^1)Y)O!%=` zkuz#lB04DQ%i#A8?`LCq&5`t6%!=25>*O+yoEdhTut0D&s62r!%Eq`)H-WV;?lUuR zjBxH^XiL{6EtI;d%I3ag`v|HmOfc@sm#YH_S)LixLDl}_mspmI+y@H+oe2yh_q89z zbp5!j(A`yYQ58Tim;*j%%oZe|guB*{g4&+1Qk+}u4l)`|xnkX6^;S!nd5?=4M!Ung zIff9#B)3|smR*qyKVP|7Yfv7kKcJc#vQS3anvoGvU&UmG+CU&F(qG4|oO|tuPfiN# zVkhgM96pUL9j;2*!E%2GR@5I1kbW1Ebo(_{To`gsOg&;62pFFBUCSmIR@)CV7Mm&_ zrBAlMuQ&%ciYbB)POPF}P?^qTX&nor8*WuAxvYh-pDXtCAUk5Exi3Pe>}D)Wf;kQX zb7#jh9@&&#V=H4I0nDHxPJdS2eE1)zTXlA_Dqiu11prv2ktXW@pvW6@@EJswz#$(N zCtrv-tH8$X5D61T4}q|4yIj>m)IBOVtgO>d5^U>7t0yP(MrCwE8gaGO>ODd3^XE3a;jb zICJpkspAU2e1MAeS5&4#@j?}>f=MOrzrf57yqI!@N`AXly{ z$>cKls=DO2AnulJ5v<0jK@d=Kx3*?r97+3~0$rrzPTEdSh9ePB!*u>PG#{T`MFRd}2KNTari@S997ImyD+JQl zY?Ao*OqN8_J5f84hkdyfzw-G%z`K9@K7B^w{{Z~^8vYKfZ$R1PDdJ!x4MC7uYx+eG z{ZiRX@%EuOm4zG&%SU=1HiM}~g30QhgcJoBT`G9Ll<)`)!mMw&!f9V>Su|pifXi_O zy4!;Zc#UluM_eYg4CAA*j72Y#ZPy(=q^8za#{x1_IRoNi!l@Elj1$<)Wi73t*L`Z5 z{KB!fEmcJjhyQx2TGmHR(z>C-;`XhmxvPV9mHFMWafsSfw#n_3jn&#*dLb+HuoObv zlh&VkS82p67+ejeX^GaVwfamSbG<*^_|6zP;!{#_eZ3leOarnLN;?dIXAuIYakwZ@ z)&Uw%10*jQh>pjxYpP@kk2Sz+c@C0<5W^Bpn>TDPj0W=}y99Ku`TP6RC>Ph(*UzPMxQBPj^*T#m<0^Lvfk zSdzaO_CL7Ke|31>2mawO>)J(vbx>YTws@WV@3P+O|611LcicMo0s4mrtwA#WZ(Qbo zu$_Px+VjXYJG_bk#B)C+)=x)ZV|H7x?>R-N`mol(SIswIN4D8HSyVl3_XQ2G!fr51 z^fl$o_`ws|61xd%Ug0~PU}0- zXu3RGwCEQaQKok$)7znDItK_7&E-DWP4dxIJXLpTNrG6(@s+lXy9S6qn7gX(-w8=N zlHE(}_lMjqKeQEa6-OW`gPETGv>5qRl$y4y6do76%{UPgf%(P_({9ZR0?`AT&~y2C zzkBLF4BsaV%}69a=m|RbeJ@lxds>(9MEu-(P*m#^Pbc!M(yh3du(ekd(|@6*h@IkO zaWNg3irhu4_Li&SHyKL6n=56xXHLsoblHXgMJW;NvUuG%Ty+Tqr2xh37Q6Y)<<$;ohCL7~uh9Qx%o6)xgaD5_t8n%tXY^wP*g<;VB zg?Hmc*TMV1kno6C;KKz}@4 zbvv$n&LO|1mDE)3Ry=&>ybVDvX}=MKZ~HC7$K^CT)-r(4d^XAy5`@q9m4s|j*_=TM zcH;(!RiM$3XE2w5OPZB^JXOMD$9o|2vl}rwiY6xWje3jPlf`@+u6iM=r#hFM$tMVn znr|0P$tx%LXQ;ijhSdfp4+bT;Knyh^1F4im_Hq8*_QDTZmL#Jse78zO5!@ir=)S&l zN@RCiGvu(kMmY>Rv?+qA0Ad3vJ|#b2CpbI9xr50l1+ngK9v}n?E6~1&-}NX&L$;>= zz@aYotWq5c?lHkaz<9^_x=HaitHf2YVWg4*#(K3@Y%j&8+(cbfXj_iq3;nxug&K^h(*f^Mo25v^h!>qFude z#ms7US#M`KWYV(MD)Y=&9ZktRt{NVswudK5<@HpO56Dm4lp&6eVUpm1DkRzI{wFfa zlGv~lC6DfE#*gz&R?ygQ=JV4Iet*|SqZX=uWuh>)+fJ|91z7sj0qh`@6LJp?%~{Nh6eC3WqkIK zt*mvwb0@E4DjhW?`c`~lUbB$)4vAg|7&r4(QxHNBx)0sm=GIlv~)qUB2sEqL%1zxGtM()r4XNI=Qq>Gd}F9FN|z=DO1A*K#jn2{B`W zRJzF1Cr{CK$JvioiDoOcv!vro?uV>lJ=6p@7yX_cl;?mGd~~Eh59htRYb{rZBY^7_ zKzlaC7Df(JpZ{7oupG_R2!{2R=|<2DyPIPNrIUUFLui(_go*`W3M_Ax#+L6T0Rgt0bOgd}rbh|0dzUB`DXt~5h{u`YZbS0mcg+AdM?;I03w0#5c%lQ1! zsCc;sJfDz0?)paH``itNrFh)NPyluATpvl>wvi{kxc90YO7gLFOT*wp0qVyu5&jTN zRt!w;TelxK6PwGo`KDk)E>7PA(FQ%*d}$sGZeV;Ty{lRxW;{RJ)}jLbL^42X&DjYa zhtIf&N8N5r3JszrFFFoTm|MtfPC=Ulcw}>#EFlFVGNzKmuwDjz~l z;b;HBMfviOAq^6|SEN_{wJxsDkr9v}4FLhdT#RTgD@NyC!E4A*mVO1ncSsMhg=-Wa!ik|Aw_?7Y3$)NPbgTVK`NX#0E7eW1$(5D54gHmUVo zH5&vbX-@qcp>d1Q$^Q@L2EU3ER(5Y>Wo0CmwNqG<{g14PLuaDUjr!w9juFp=ALTup zEbe~bz|J@!NheW$(OQww?EMx|Rh;GB)yNomVbBX7cHQ%VR{&lZB{k-35C0*da(~fs z&C`~Hi`*GCp~6XiHVBkADA9w`Ww}^mspXN+P5Gf95d4SNn7zG6I+BvdqE+#8)q;9K zQLU)Ro9V+>hg?pT8}oLGOYpG1fuJtkgpfL4-&0(3bOdRUXmG{Nr% zfNLhEbO~_w+T%Q6Fj!X7d5Fiw)*7Ik?`9De2*+dwXo`--MIkKBIhkk^Z*SQziaB<$ zrT@{K93B>OpT#sZ_FPRbq-SC#eet8vsqT?=yI69emab#n1xkZ{3mfn<0T$58N(u@z zA^MjM>?!zFcl-{qfsm343Ja@ljf0Dx?FZEn$#Q4Se915o!ek9ELQ%tB(n z0+nH~JF&8|(%QO$4X4;^tX&1o*%f+&GF$b4gGMZu5Mldx>=R0Bl-(!#dHBQWfgoU~ z4pI#%AXSAI0cC^%{{7}oi`HJQcu^dI$s06o*tC){tI}&{+Ecr zLS%b*o$3D%(SSDOD`77!lu~_E@#+7Ch(C>zNLskhzu%j5R&@yz{pR^0G+6WGsv`3C zE+->Anfv4}n#t33f3-p+A13Iw=|(zPcJP!->mPShxq0Kv9TgQt+YezBfDk4N8x=xh zM-6Wu&s7BsZ^L`Hc|6`JDk#{gHSjYVtis^)J=}L8KR2JYBTw`4fv5=3t{rr|dYb1i z){BQjaE}gP7lS@Qr8=(l!6^;MLO9!(mLCCU@0hKoVWD6p@L~FMgtzycX{XMF7$L?> zNn;0#pioA$S!MI+XoxWoLiXI3Xa3*i1awt~oORS5{joL4B*1m{eMh4q`7RUHg&$=; zbf$dttnfX0QNDw5T)pQ5b2nzER=ye!X~Bso)-2f0JvGwHt8P}j({iZ{W)iw^~=MllCB_p3$Ojsa0r#E7ui z9noAf7N7CeO1I1;g%eXkbGKIZ;agl?4r!iLF5m3%*m=W_O3hV$i(nzA0{uqmZBw#*7q*J*4H-E9a9;$Zu&YxxlT7B!y;NyD4hDCxqsq$5M>HXX zblck7v&Ko*#8axDIDK?^P>#Z=`G)3dbC}Pf8}ZDhDV{CAueE5^$~UY2t*DSFx_M7_ zH|Vt_c^tlUh?pjen8DrYy8UguwRL;Zr~OdF2+gaQa5hTb%YZY-sRu<_s<+W6vjbn(%HvK!^1|47D4={`oF~@Qd(-WH%xy%r#zrg zK)IkF9SHXpJp1C$5U)iNoIqa&?%jnS)UqOlUkF+@a4+p+b# zjkkrc$srz^iSQ$yMy`VD&~A`Rkx*ltU^ zR#!5}e&Xb0Wn)`vbSPNunRDi2^YoO|q()>k3Ge#p2Nn|cy z0O8HMW`>b`dPS8PxAT5M$Qe=SFrf%}lxF+m(KH0qUA2~pEi%1#>orI45awBaLQIw9 zc37iii1fXOpa)Pea}fhIA`4bV%1x+>i^jq9N*5uFz9vGLhE>wV2=7P zgV@{NvZZNwHR)r_cO5^5T>e7}(W06ZeLhQf1v@uwPV(J-y2_!`WBh?_>*0QT6lSN4 z&y`gAm6xo1?xi1MI4WxIk7RlJ!4Vu%Owa}q3O?sW7j#*tL`L>Xtx|)H(i>D%!&QYl ztYz@T|Dw8&rtmP!`%qr=6o2Fs1gI9l5C+uCwg30IQ-_YVx%uH`r&zs1LIIEDT>p$2 z8>DQqx91}A>Gz$hy2Q1zD!9j{`Ms@7U8gpaUrL+HnFtT6s>jpKrQ-5LsG=ce4+anz zaCMR zI~q8hR7&c(Ob&Uaq1M+sC**%3sBZU*7Gi^VAqLNi=VI4pn~q9>+;Zk*$+ckJ&4LuZ z`au+V_z>zvrxr$e&%0vGmaxCKw==g!mNKkdFv*s1c( z86;Jesn4TP!bU8w2wHSa*Rkn6%ukViSJ+x`-)!S`u7|)@hK~T;=wMT)PZ2{Dqv@h`%2$pw;Rs?9tF-DQi#9Y zUbK|j(`qK${4WpqW!C_3SMxO%>S*GHOuS=9X5(V{LTIhyk_`&KNEbcvY`vfIcnLex zQ@bG;EbO3=)7vJkdQ&62q?1qYR@6a@j^gLL+^C_^>-cd`CPeg97;48|Z+x6xEt)ARS+n+oKU+Ly}_LbaK#a@;BKj>_Azg_r9p*Cd5pmA@B$HV>s zO~wJb*7L9I;(ZrXYRvtrFY(>Q?)t_?$M*UBgW3664}0h5^==5H0o1O|aynMFkN2z7 zVks*qe66$Tgoy5*2E`(JH#RmtI-PtgtF#=(=SxDD8{4ve`_xOoIis1-?bKtu`lFiD^&4KXRE3Z z!v1e7g0en8>s~+YW`r(i02mxkugTnaQ@y!NX&!^H*jUi$fwr3KOTLeF2D1|853t(S zhNNPtuZj9L7?6m#oi28kE_&uzN6+ExaL@to%Qf;QLlWBEZ>6r0A)>Vgy_GA1(U26~ z2D6R@s8&K>NE+%j#MrEzm**WqX-*FxV8~3mwG(qJO3;U5%r4XsNjEHmYcdezPk*7P zTB+8gbWDVWow=xyIc-{Xq-~79rr5QRyR+C#J+>F*!h;@}n2fXXxx*MfnnDj_pHs(; zR9<;l$+}LK!6-X~da!3aM0{8VzEy{cpIu)ia!Fyfoma~we*zb6Ib)|RmKRW4@d6x2 zqC|JmJEo#dxE}15)k8#X9I=hV2&M~*r7r9q8O2|H!U7bMJ;%mW| z(82?Au($9tbdhgJh93IA=>~Wum3#w44gdGPpmhDKI@`P)ZEUdU%`Q3J(5#-oAC4u4BB=o z47h9h#UyGy_gX3239%@b0GW+FJ50eBiBHknQuvjfGT+0AU<$80q(E@v=8=$Mu1R#8?R0`}c`-81U2#6x z;L-Wo>we!K`=iIgpy?mQj$?{9s=n&4p7RiR8s^%$SkPnMJ14C2!F|#l< z;7Iv}f2!@*8?PahY#%HJG=WzyvQYQ5h!mdhV&F!*-n@Ojeck#9|MK)8r3LSz+L5+F zbWv~t)+e5*_Y!B0HYD@IDf@=zWsDIp5Bp5}OlcL$TH7#tl6fLYDM)rM-@GbTO*L+O zv7~n{6a=32q1=G=Dt7ux@pWwycZ<|dh1>Kgr6_fEW{b3o?AL7~G4o2Mt+}sTdA$Gf zH4W#cQ|mHtrs5p0!>09A!#DdnL=W4CcZd0UyIfIGEmu~$Dm$?$pKb+&wR)LlbSL_; zy9b&CBCw4ussv^&E;Ly-(`GHs{u)u~B=CMqo(HW#=-2-(zU7*$e5Pe zUWz_QR-H81*wo`l(UFmn1Pk#TLJ((BKD80KitGvC$5!-o{}7c)EbRa+CtO}Uxi?(& zfN+gK_U6n8LGy0-qL!4^xfC#%_>-WDtPvB+=w<;frexbW(W3-{@7~T-0ra2ESS#We zuB!sk2ZF;saEm$`b&mb86_UsxO_%fMT&si@Lrwdg2KkZw0IdcDj%^AH751p2ViMrQ ze;GjiA37T$SCq{@8%+d61O!Eq0o5=x)UxbA@EQ7;W&HUi>!Ck1cqbZ z@l3{0J1Did2{&?G|E{P*e!3~Pt1XQnrE%EPY+gAnA?OU@Oty|;5IHof+#V2|r_0FZ zAf8)#5HYu`QUiE#1t~9s)yV*<*VJ>K-yqsI5o@`G1eB(;g_Dz+xH4@hW*;yY26`f< zE8{(rk7&aOU_HlG#dVurCQa=5M0RM=JP@;f0ceXiH^F7lEeBK$symB%^N& zOb^iEn@6Eom(~~NjzT}{YTFYU=$bz2ed&32Q^|*h&dXKbNxTWy=Pbx zYr8h=k|i#cBB)e>Wuz%$Kza=d5l|q~r1#!?CqxAWsZs>#Nbg9m0qLF4dq;ZjkU;WH z(6yhvo@ec2zu)`koj)8T12c2a-1l`}=XqY&#D=WCf0jKs+#&G9{CbI~9pK#1|M0=* z8)uj9 zEqIAIt>N)wT&;md>bt=A@`?_zIjPQSp$;m{){_+TZ+C&L=S>3P46w@Bz8Nrc>JFLi zi!y7EcVf}UV=qR~-m4F%aTA)UfSi8EYM8YlVskzztt?V+aZ4hVt)q|XV_PGO&(M!M zv>*LSpiuQ5?V$^zW@-71%IO3P2p5+Q?&8-l+wZQP-MOjH*g1(fMehalHyAc?a5b7u z5B7H7Yo}GbKM)3ElJL#H2jbP0cQ?d9K*?Xo86t{x%~?AiK9{|=J9IO2yKAU$e@T{@ z*=1OT$z+r5v;V)T+Al6yyIffrc0I?8W)~bZSsQF!n(fii*b%0`w9+l;NspR(UG`ig zqQiJJ<#> zf%=?`mqb3b#yXtZtD)AobRLK2#`#tfyb&V7FX5AHakYP%^%%2XW2Xr4)VCiua3v%a zgEbu+j-AsIf%A&)V)~rhd%Hb4*ICF!=ULpgm9-gMy67%^q6eyhfSn&+%t>gk!PLkxpE{oaxa-DuD&~CazddarATG_Ssm)&%dK|JYn?fALZO@xf2F9N6bSzWM8eB$wrObYjuSW^J#nfw=f-h8Y5) zHj@YYw$&9f?%w5Kqo}f8S2&6BDV-vjz!KC2ijDw9>{Cw{5*##%mjFH<(3KN~$8HU& z+!Z}@pj$evHJmFmqov#Gi{moaj0c}7Y4neZ!}7cqukBxv;l%InXw!p(-yvR1Ooxj+ z+QQM>fBmqBFupB%){w}MjX7R+lLUux*&TfMY0EfEWxqkmfpC43v#@q>c>Rzo#6xI( z;y0wsfnNlBCnxOb3KzrP4T%2c!Sk(7JAO4aa)*3A5fK5D0B zIjWtWzY_0KIFMT#6q7?7lhaTdj*+F{-Nec9=}i21CVNO85)^VBGV115GVTy*p*WqO zlk9VD!CX}}IE3D|8s$ZX6jjCy`~F_Z=46%qS(CNObM^@oz8SZvf@{^s8nWaseXU)9 zt<@5{+D=@emtSZ(>kvjj6D}X;;Gw)2DJneJYk-einE&2V)>G?LuqR}B=lx91)e_Up zV2>4C0{uq+&iXQg7j_EQlr>I)O(}Npp5|Cd8--Mm4aHm4MXk}&qZb@)1Y`G|r^nt@ zWIT)OgmkhBYh*-hoNKNhFAFqIW!9eUhlqj?;d=8sjqdJi2uZ0<2Z$MR9YtDVdSxpk zE{n_B;`SooSKXZpg0v3>I^5e_jK~QKTeT*jsjTx+h;#wIq@nvJ-`D!IBmIERm0#!H=k@dahv z?-sD(5hB~q+DLT0@<=HGz+M64He1^j{`h>-{OHHo`KMy8p}|2?)AMtv$?EpiikqRu zgBqI0DPQk>)6z4sH-b8IXTHlDy8K|qdBQbvqo!v@cF}yp+rko zW>B_`sVy%Lk)?IXl4Z3Peq`Ju_PB=TSaJ`8h#^DKXW)IU$;on&Ejxq5?bI=&Z_B1E z^ddh*|D5?Y)8MM}t*XsO0c219{BSYqQ(nzxB!$nr-<+Zv_ANByGHdeN>kik_0%8N| zKKZ#4XrA&=XjBFZA;zA#WrX^*>F2nZE09o_KBO_%?56pM(V+>#6!4y^aHAoNHMM@~ z`Bn~62Jo4(p)zy%dBO3|ex`4p&Ew+j>YANO%F0@D66J0da}1SMq2xg>N3Db>}5a^Ze*C%VAhx`5<=jvWm5B@Y=+DrnB$h zy7`3i1PCU$8~gIQ76oxdP3F4*LGfbsvWv^up{=}C-X*{_sGG@>{Nbeuc<`3IxNJ&s zYT^xfZ>(Wf?Iw7K7#&_Wd`tfPeWFbNQ}Qaex!5f$aluSPV{`Hopq%ArUx2X%G$F{m z!q)M)oIL^R5w2wmc~HO+abVPWpWzV#UB8!sP!a8cI?O*rym1KXQ4p61nk?&-l@Q)8 z(@<~f6pRgs6?>s%!(qkkxHqX{^ZuK*Mur(9NTa>|r3HAR?ejGo?mQ9ka3eVa!>;#< zfgV2deGRen{mM0cjS`eE9v3i`vtCQ5!QUA#4;Ni{kDHiSQfG!h>_gR$qq1wyBsSXQ zH`1*buv#FvZd1i~uf#hW!(DOdHO?kk#Y&#e%dNf^L{2@8i3-wV=(6_+_p|4Vn>pr{ zb^~DobJCQg#;sYzKfY!W}=UH}v(d6*L!-8P;bhaI%Ya zASt%u^Iy#t-dOyX=JOvh`9D7Rk9rN;ZGIQz%KICXoZ9#U0#m8eEzD1I>?()h#jiWK zjVTXDr9&hRcbvfCA;nyNkE==MnU9!7QbyDMVzMStKJ$3YJ%2~FZ+V(>_u{ahl&?Ja zeKGlurj$R=K~|MQdUCIwky++RqYHEU#tegy zO4}7iDWBz(9RA|vVu;7~u?*HV4`qVj{j&?eNWk_B3jD0g6~pnqA=gZSh0s?HJ$AEP zyIt#o-{(iWsBQ@D8NAL1*x1aRm%5^F(1U0A@!#pyvgJ=kbD02r0%X_lABeS{FpdKX z(&5enJv>wqlMMXXm*Ur;{p?opYrrbT4tCvzu{POj>+6RjeSLid$@;7pIN=kE$Gk%# zn4LYy&+iXXQb2|pc*w>Erl4eDX3nOdeUIn+FYOspPB!aY^{$_5^d-H^|7w7nO#ZeJ zP=70$h}v}a)v2K}f~5}Q_Y_OmNUcqNL`5{d=?s;>4Ru?Ncae}7DCz3z`u6QxMFl@l z>BKw2aePrR+!h`m5fv3e#o3_CVHy(|6~zJq>Slo&88}*a>P9`QMX~@<+ z*l>KC8@jXr391bcrM^jFe0$+j^m=4EY3&1lP0ZL4rM7^QD(%goC&qF;9f6}_tCERX zx0sup`;L0|7Fm`p)#7sZ765zKC0oU1=|qgDgoIDKVSh&DT4PvDy1-qMjDakhz`(%V z?Ck5qBQ&5#Q;xSpyiSLAK?+!O$K9E;s3n$Z3A2Z`_L%gLu-CPb;khr8%AV+wUAv8} z6hd@-xmFmZlg{Kc=HR07#|6t^jTTnZ(Xru;J!H&cePL{z?#Jp60@drjvPofmVQQK= zvL3X;3)9YDdv!Db4YBH`N%J41(JeF`{d1bxqb2Uj@p0#$G^R@I#>x^BrA^riz!<0mN5-G)?=D3C$c@ucX4g@R5Utud z6faFl94oVI>nja_bdHxk8uKvSoev2MBQPqR9`E>0r)EJ3`{wuii+vr{Qp}A)A~~vX zO^^PXjn9?hXtX2bWY+u24GPD}BCClqkF84IItu)KraP?PXCWpO*Z5$m5y%1t!%&ei z>1Ht$P-JSWE2gLag@K!HKCd zqb)CPWwzDj+-an!FTUS4DO(C|Xe8H@&dw%@D8z_m`6(^c?YQgql+|sGmzm`+1{6=P z+cz9fI-ts|CVt+Xe47MrDk3`Xq8>|n} zE>=hP=SaS2z~`NBwlvpI48O4VEbsGhb4#tyrq=7)^53=ERghR)TjQ3kMlZk5lqL<{ zuPI&rBtvatL#({m8c+k8XFo6LiP5iSH!?M-$1D?o$XNv{7GBvUF+}pe(ombQ*Df_u z)-Ur3pz7{_<#AZ7+<1#w)t?03_a{iQd5|SdUY`!ut-Id5yw%m_^_p&q zThC-+tcxL-=Y>JadF`p$xg8I^&D_=^7@_na`(cYub#Y{xx&)7C#A6iri^uF%tHYAE ztd2On`cd0>=?yQYxkw)&eKkV-qy3$DP?W7y<<6bYUx!B@L$cY~fI`b}6Puiz?B1W; zzc^M;DJv^GSBuP-`Peu(Z@G*td-6CmK09A(Xi)?|Mi|VGFEhVC%((95e&8pv^<0>x z*m30D@?x|Vytvll*|T?k5x|<=Yka&l1+vG%xLVTX=D_#}`~8jRYI7LJpe1iqM8x3m zFi>i>Mm@BC_`$~~kctz;Xx}^JLd`mc*9{KFU`(6Mz0}ZkhX#oK^5Zr}UZ{ z#S6SDGV2k~0+lEoeXeQ=Ecn1fQ`>A_s(mgJzIo2nqaMYTGr+u%nMRvfU0Rnfw%He@ zLYH})=jI7A zcHUb@ob>z1$|y#X6&Q*Y6!VPGdl0}L94Uq~PeXx3S`6oS))?~O@|$&^)1O75e~Xj9 z>QR4%(1TNSK4o^!UomqolIjH!{`7vdCwMBl)s~cS1C}CTVND5c*__5iT^g;c3 z_|{$#5C+R9u9Do%Wc43db-aj!{>eV?-#0fmy9p}8DcRN1jLKH3`vm?vj&T9oE zU^9ck7vS4^T3Ah6+qzVpn9$qr$KvA0w!th#V1?G!P5^?AG7mFzdTMIwh*zL|*&1$i zpPITKi6}FGzQv&#GEIFO3$s>U@LtsF8s$6HuJe6fz7GP)Fo%nFE90ZLjO@T&65oN< zLB8kDpTAaCK7Bm+0jlP{#^ydh8G+K7IG$;*I}fe2GWhOv0*l4j$py}bPasa4wwa;v z<29}sr(5#cF1sqrDO~B}0d=fkh>Ep`>(H%R$;b>`(P`=BMERPaqrlvmX+$+zt#1k$qsD%7&vTKCi?zB+(XX4^WZJPpQvj{h$ zOGW62^6EI>>0}V(WXN9@oIg>be^6Ohqj>jbihRvm&f)}D`=PUncYJjf4m}MSuTjYJ zI-L9~O8!y<%t&}-?KkW08|z+qSU4s$oA#CbF*bUD?}zo5Xm=HK*ex%zIyOYJjHJ7} zd)0jl$+waR>;sCxw*8`ee7maVtq`Iw1x0ZZz$?%{5;yGoK!ia!L6B=?2x=2WzeS#K zi`H1^E{UXj_QLKYsLt;Mw%z>#CM)YAgBnLmuajt5Z5%$E^d0Gvfw}Pa4-6FBYem)K zp`&f_y7!^$L3Fwih7yP zSjwajFJwPm{GUmuJ9?UYSPyKJdH1qH_=O}PSoZ%FPAnieZv&sm$Fr;HMSN_f6~ zZlEPbh&a9<@wQ>|%nUP)aZQz$(5!q}mK|_RN5ilRR$Wjn#D~{w6k;S&- zXg?%uQ>l1HFvfVFa`TI5>7Qy6shK%JNUgRlJ&h11E602o=f@N1B!<$-6R05p9FSuk zUpA<2A96%TMMXtNXR-RzgQ2NeS?z{Z$<%R6Ld$3Kvjf$lCF>|EI|jGi!#y(;1oA+A zjRdUWzPCpg7(cDbB3kG^-AgOn$)kT(L(RV0qxc2Uzt|DYr6tkCu#=&rLb+7^fj#c- zO#jgyp_Sev>|5$#q{odj+HiNmm}qX%1~b~X)I`Jw&}%)hn$B%~qF`CJ8$Z35fxn>u zJ6w1V2DoKmA!u$D5t%hBZrS})&(}GJ9aVt|9N+~8)P?|iAB4ipk&nh|R}b{t$S!FI zTwY|$4C!FHkT?xt_~KAa!yi?*e2_T?sV3`F;DJh%)$L0%nrwUB&$9|x-eKwnTm89p z&JMPOMBk~qf9%6%Wz)Cfwg7?(Yw>J(R`i(JB?e1k4X*K85?Rz z082>l3bIhriGFix9*j4uK4$M(Idf)od4r#leB~BMg#9Z^ps}8Xsp+TTdTH9UnFQP?NqQ zRKAr*4?%buMj;O$z0$EM8ow{m1bvqo!-*BqW}rwD)b!v%l|hmF0>e4dvG4I6s=7x1-?}EhyL3 z_IDXx2{HGRnIV-X7$aiAh==x*2JJ5?;^K~87iGmxyXRV*5d^R-i}LVHnFlc(i>WJo z^<(UIH=9m|Dk}0xjE1)a$@N!n+8fwV7Jb8x2>@%(eHqYj;85pcxJibKYo`xTOK@p6 zedHI8iL5Ww@^Y+Atw>I8ur6|$&2>sTHlLM zf0D2$trg*9dmD;0P6R~vtrK4cMLeRoR89j$y#DbyGc&V~CP<4K@(Nf(cAFi^qDbu2c9YJC>@r z2=Z)K?lOtE9Ltk- zEvfuPLSNS%{`ZXCx{xEQbdBn)B=XT{%+hVB*=eppBZ+`K88o8oY$WD#wM8zg7Iq>= z>`ou|d9T0U^K|?$_0H_2qTv5RD<~m=HJ}U#xOMB6a=_;6YY{-=rvBUheVkrX%bL*^@;V>ZtBIt++eMuQa zZD+rioyGwzF91KQb-B>ld^4-Zd^ZCNbf84{)xf*@Bnl$J3Eu}3;fV>S>YxPE80O7u zO6t$)J_r9+$i!ThDlQAlj;AR}O0L5l`QWU1k@V-x|!^9SnH+ zn(p{;LO9Fpsq78HSnuwt^d)9314RQMd>eFDFQqR5TO^K`r`)UGHLFbIfKJ_cHM?-S zsIam!D=W>+fn)S85ij4>SZ{Qv?l+z}^RpE^pM4PZe}cJMMF~SNYM!h0C~!$_-K$SJ z<}Q<`$QYKK)yD98U5-&2mRO;twqe}bZ-rS7KP-Xm`tLLZK|yba14lp8N^i6eqghL`O&eGq-Vg zi|6R;J)5)h&MN8fWN7MEC(e<$UFyWMvtPVf|8^qL`68~|R3XA|B_+Z}Mh!E0!f`Eb ztYRncTC9pp1Q&*PIpKi#rcJnrLC-tPts&Y7mI-pBZS_ivG!l#>Ow!-8ueForuExPkXLD-)uWCF;_tA+9mRDVEY2+h|@~z z+|B#q<>%ck+R@(43&zgH77JN))wKx`$hm3O-GtNqZ5IZlgED%SR$fLnbuqXNcr?ws z>z7E5pO+~$==O5wE%Hi=Al4eA*|`)n#;J+aF|NtRz3N*@M7-t9Lq{$C`mfY)OXd^l_n742C)9Ms2o$%9*cah9AJmo90?X)VI(o z%gb7*^(u!CTaFd=T3B@OQZcgn_diLXB`PB^b8ZW_LOw|Tohg&*HdNgjWob1@qj<-g zHO4QU^(j<7Lid#-krmR^9~6@gv{U7{D1|);7dGRd5+sC*$Sf@`8W|g}!PEvFi~$dH zyWv_T+XCs6mi7P!ikSzJG_CBXbMI4LI&@~_xmznl4AQl0qm4qUTVz~87=V4?tN3CS zLSRNsuQdLg=v<{HhNXpzlnSJSgk+B z@s7iC&e`$n$iYVot&morf7QrHIkDV}b#9e3c7l(lF-t(}JaKW4^+#SWs&>Qqv~dzh z8-FdDsi5Z?o?oO4PIng;>7KI$>GiK1eUwT%>vRwWoEAh@k6dw{ViK2`uDaZPNHUqp zJvK+iKAW!2N zb#~`qg1F*O4!)$`%F<|Qmj<9aVq%~V0DBGpU6S(;BHr~$X6jm6@#$s&m+HRREm%)> zy2cPo@u)8a<`w{GFJ*zhBjYddsps5`9_Jy-kV8|EU=wIITX3YFvRS`XY=lyia07ype!H zjWT(T_BIdi7y(nOe9AdCI~)DGO2)vdE&l-wi0eHGB0`p<-`;W-@)=!$h`1Z}H&sgj zFk2phHth5lpi3Aav()(`gGc^Fr=#k5q*UJFTYC1diV=~&XO=*pnt6ae4i{CdGv&0< z-1e@&sc+i-m2NLp)ITdj(Yu~s_fnk)P=HMbgiu$2?_Rq!>%QV`N@l0=Oi^(cptU!g zJ%HTV4;WhbrS1!8!ouHviR)t2D8e~vKBL91tq=IG5b!G4E#xJKicKOoLKIos5Ya3ql zOm#3wdXv8SzC1?7CjU6+MWXts&hm0s5Ylylpk5@!l z>u1LkoebTJ7F*|!VbkLgBrx~ElE@63G6*je7DC5dU1l@rkSrBt@Fgk`R>o(I%Geyx z2EPoZQ)K#hfw|m@bke2YSKt}{*!Sw> zR6eL1JiyxuFNys#D%xBdUihegiod8!Z~QoZmiL@E=Uaa-Oim69!;jykd_mY=&Be0x zCitINnH7}lrM6N4nuTg?hP48Ona4$tNZ24as;HkiehI9v6=B9EJ#m$@yL&~G8D8f;L@{eYqb${ zbaWzrKKwv_;o)^FVi7$)?eFggO2Kw^Y{~k#^NNc0^zH2Q^^?v2#r)K3ojGw(-4*pb z`TmxGNA?yORGyHS2IyQfY3*{-93X!rVUD+R0!)(J8e0c|j;5?tfW$t+W!g;=V3Rt# zx`=Mv2nYypsMIueAa>Bf4-=3FOS`$90#7Xwp?A6&ppk%{0L{8G27t8qc-EUVJDcX# zEky-|u!9BqJ;SMwaTz+8aklyDmGIy6^qzB)L|0myp=V>0+e#Cy5Obf!-#y4EdZA4k}TS5)xlznBg%wqK~r7VjAac z1>?~zZEbDY+1a}Ayn7A(vV46DQ8sj7&|*PB*KRk$%xhjYBKq{fxlqn;xe|>IzXJF6Hkz@O<8EI_j^8A9fM({r!?zLF7b)hr9p-_Ths& za@x;&|HooBi4jEWV01WFK3i&PSNSCbce?awfg$S5W3=L{Xh?Z}!?CvxIM%^;%)@wQ zX)?U_w|T1~lP)z|L>*D`+vh|2ub~o3FLNK_$6(QPYz#~Rv6k-XI8`3n`wU%8xuFA5 z5<*8?b$+<3Ree&NtJN8X^rZk}Ssw_zdcB^PM>H8)>b(1VfK(DU?!9t#-ObKfAI|nt zmhMjNXra^s*s(XMsOmS}r>*Q0Wk8pV1dkI7{H^PiXAnm8>vEJBwUsry?(OXzSOUYC z4#1y#-QC?P1M9PP(Yw&Auv3ZDt^NB>E}|U$k3jbI$0%Hp4)Wbk^KQ>HED#ulo9qu!PO;8%zD06?2>vd|2pz9 ziziCt*m9yQ=;z%*F98XDFDL8T%e@sn1=tsK9Z*#OoW07BgHlJyz2!KQ8vC!a$N&R6 zaV0Xc6syUb#eSKb_fx=e_A0kMl^wReGZaOMMj@Gxm6CQfwG0qM5rwO4E><2-hEDp1 zuQ-g*XCZUnp00$Y;5o}we3HMPd1WJc<{@F~*A>&DPi6*>K*`Kg zc?ehdeWsu4rwhPZ=8Re9XJrBXiqQP2$*cgY0+l}~d;!eS?&W>8yy2V;_d$Xb=eomH zTp;A(Ed=Hlo1wPZQtM{>!2g5B)IssCy~0bdun6o z9!HrrQoc6!0n$B{5oneO@RB{H!LScw^PZwQ;^N*VByu!BD%99V0gzMq`Emm#IS6gP zn1fkfq1Srlf8~%rkTM!vGSZ~OYfsOUrZdio9pZmcx;?T z!W*;PMOW=6d8ClS%{#swn>fs%van0BrH$Q_{G6PvFH<38p_UC*S`c5|J9}4ob<0cG zWNYYv=f_v0kX)_mSru|?>5BW2M!NXNd{S()SiwVCUY^=KXmc-Rya?zHbG>?s{?@>g zKeBwK%O|vym5s_L?pwb+;Wb(Ia)I}#aPd7Fw0UePd^o?y$D9whR!~J`&t^!5j4oI= z0X|t5c316$v0=XTYGy`8KPmkBZRNZ@%?>)oaT@?Q0&5Q(_7c%bxc0xr_yl=?6#lOm zuLX~K8-_$-cNM9R3)sS_-dl{kQp;*-$p*bm7RM>aelq%c5e#Za0JNYKVCB=igiG&O z-e1+oeZ4+3!!7yVnrr7D*h_;J(6TZ8z) z{C-E@licTmsF)#K2!|(~c@7<)lP9weV-PX*Di@1p$Ag%3ExiGM|BhD{kC-k3)F1m= z_`m+E0r+3Ch}M+5Fu_NlF&(gx>3Lqn`+rwgF1@e~3PvJ)KtQa)TJ9nLfAJr`fWohE zDwbHV?f$juulPx4{qbuP{?LFupdFAFUcP(@s0$5^jZlE|00cfDe{X7PYNPs>-QYz) zEeWAF;5K03w@pen10}O|Bt7*8#H@+UTGw5D%V(fc{Aj<4m1dZPtUdz~XYFLx0WGgx1!s;X*g#bIp%J(tkb0{t08MA@9-xzyVM=N zxVZQ_2hbk#$MHe#?m1iFl;8%2hK9<@pSPb`sYaw_ z)F!j^$ex~+!g29u6AaN4yo8#*xRO(bCcqMYW+f&&zW>oAFoE3*I+`!;xc_!{NXp2v7jP z*!cr51$c@KN@fKX1?__JDB%a>$GvBqY`BWLlNm27o2efTeArnPHZK>Gk_vmu0z`)E zWI^EbUBZ?wcl}gP?4AuTBDBzH4jhz$`^Hh8WF2UAd{n@a=QTHNx|@I<*e++NbeMvB z*RA_kkrt?x%&8)!&#z=WudP;a<>sH6 zP+AEtk&&&#C~P#t$!5;yC~k`i=j*P&U9;1Oqiqdf!;L`&TCZJS${PlGj(=JxI>SN9D90hL)P#q3pl+DW@uo%o{l>8 zB}LJoyAt;hMdnJKksKZo9<56z)7;q#n@7xOi9r35b5^-K*CZq*C&;agL&a1}D*I?N zo#Dz_nrHJ*fUF-3Wc@eVX185^`r(%Ag{eV3fUH7R=U2h>O51R1UK7&)1A;U|zl(c! zJe+YbES*%-Thd%a?j9u@HSc+(#5! zv0v;}%gDDFM7=vjm(Rh67_U7Rr8hGl0~C_l6l!-1m94J4wRyO2izNESF#qcf83)nf z#Z_pqPR&&GRG6X*g`9>>l$hIBuep3=Fh<*X)58Yl-yRRaENj5JCK4QN+nJ`yw1Gtg zNE4V~G(;bQpURVl18^017X6b~0AoQ+OpFZkz-LF(T8ADmi70YiyjqlTS#)oHeQ_WG z;iXVANT*$P@MBSkN(fmf=c#{K^OeswGRNxd?}`d2SzVEtC0e2>B7#>=Oa2x617v)n zxvboPOJTa|dF$HY1Tz6tqc|ZuVYj?4C}iWXBB7x%UvHmzLPEkaoZ+RHv&Gkp3E>B1 z3(MV9SqPB?AjkRkK3pT#{rWzvN>jigJ;3jT02^VBh{`arM|?Y8RDoo@R({|zG;!X) z+mP3oOV-#QCJQ2`W+-saK2P|*IK>=&;(*Vath0oHdQ%H?b4%XXbTfDNLBMFFr(ly} z7K2(YR?kxzHahTeqsF-!qs%^H3#C%3`%D8!Z(XYhM!ebzShItz$BWbA4zQ=+<0gi_ zSDuMvNOoh#To~9Fxc;umh%o+7BNH&FJC)(+r7k3cI$y)CKi17x{&4bI^Ha@__>U-k+c&%#-=+ z!y@}#1F@tr84z9YWaR>G*rhsA&|GeKbUv>$N)&l=tN|O79JguWPitL>YD+CqTOY}A zle_IMk%|r{xI$018`mbn{NMqdL8-a&I_fnBDlvJ*SLCL%xKR0UF!Bw6~~IUAAu*sbj{l#GG6@HjbH+(`_Vy#h*qLTQth;1 zsU)zks`b~55s;Q%IdFvl(Jxwo=(jE(862bvdakSt&^pPir>CbRzTynYmZqkxY;3f= zmZ~u>Hhje@9=xTr6l$xC)L>86fD; z>nkfOzsYk(Tu4@J;Uc@0sHn)u=YVg~*Ehf~-R!@KI6=>TaT7PT<_iEJc4w{;Zz3^^ z0W614Ond)KL@hg4H-fbTi&*}<;RIj;>vemr%khp*`P%>y=qR|qw|5(Q?SgvPcNLbU zG;hu@e0T4BoBhk7^HQmnqd}bXeQKr9xY0qCMeG+lkRtsoxRpuMvH7 zjrotYbx235akZjs$JnvpJ5kDb$IbWd1Qz(#K2W^K94?)>lx;Q+cw2a69r*78hn%6J zrUtm|cZa1S2N$}=;;GK?>h@ag8EoWJpbx72kyfSQs>4eP(eg!CCyUOXC*QyVBIhr* z!=g!%plL45ut?PT0eXWaVPel(ItllAOsB#@(K-}YG%|uwJuki-_06cJ1n%bl@h1lG zSW@3k%q=M3i|uv~Q@YK>#ib~58%h=mTyMGjZ6PxT;fgvtJ+2O!u6zjJ zA*+6s$iyWW{YFkCV;F`UtUl{TRpqTxEsQc*Bu5at<|nI`D$^Lay49`NqI9X zpI25J$>iaginm-O;6{aPge~{0_Y$=t900@*QmVfEU@Ypc#c9I({IMI?8R%8M9f}@pX z&A}B}d#QdaTv~@jSs8#f`pfbn9`Tbh5phmoKDVsdMlmG75{=JRL}o@$rRDo5Cxy!L z-3Lq~V$8QcZEeTqK0e7_(%fYziU)zF#Zn6Mq~~nH%u|n@{hTt(Be`n96`t>JGGTA@wxX{%H_bbE3B!Z=Y&68OET^U&USSIi9`D43(Y=-AL0ps<7ztVJQNrkx`hx zIh_bQlY)fIk=<(?%c01JdqXi#1lk!v>8T}lYyR)dr9Z+tzJxM@{=!YZi^A_uS+w%w z1O}iIXTKp{l zwu>|r%T285jDx5mBQ428+fH1E6gQ85ui_ut>dEeA8nK{-fQ^=uC8K0w`PK^qZ_^9l zSG`F`w|+o68XN>VlE@Am9Av^Ah8k0+pAT>+qD)HFf2l`leA<+UwHX`zuBjJD%j^-v zt7ejNh@QSNwQuv*wHpx#w;q;TO)*bOc2V1jq+QZV1WZ?t&roe+tKvhns*1|v$qw7% z7H>(2FC!;US3{(Frq7?N8%_`&dxZvb0>oEVx?kIt$Ijj9@$N$R)w0b!b(&arA!Xdv zsYGQLh;Lv}Pdn6?)KZJn$z*&B9w2ESzyzFqZ-M5zyRcpvensD0}CG%5!&Lf4c7tDNZ~+rc31W^P{qH zN%dXC*=+_);7}exWgKLj!7*Gemb-wqgknEE4^}*47$3r_m?-)Ts(3s`uq&Lmzr7sd z=PLJuK0M-fj$a)spVk{gUEpj0rGVvMES7~^kuTsI5A$H&718~d@uuRjTM?5B z5sjrj$6m)6z1PA@W5fmTbR7DBWx@GBG4I}L-V*`N?_U(u&Zg$n8v`hm=_h%m>6Gxp z?cT*6bpOa>CGKAOWO2CKcfWj`2^gw|W4yKlT<+BkJMX&fs863h*)nza#0famFD%mH zMIxa5runmw!%|nQO|A5WBYA}?QKGu2sEA&a=_iZp_fKx1zi7EwQSFdcoUAgys9*&J z1>MDzh?p)w&z>R7b88<^g#l19H#Y}LHd8k;T5QG>SrS9~DZTiM0NAc~)hGX>1$ZZb zGG*iA;66M6*j z6@>#Zi|qb$Dk4JJfxi%!Tm}~yyj_2lLFt#-mvOaK~9GlJR@9XdME z3$XyUXo_W(y&`8lb0FW}F=8L4d~NVG;Uj=}<#SnvQFGlp+UqPyeeAJiedB`2|1O)z zvcA3!yvr|v@eZ`TtLrwPok=iK5v^GP$ds7)&>cxOwbb6JOxNL6wNjX;mZB6|wU86( zh28H~Yk@pIJ^p4V4yU)#dcS?OPMdd`r1-e2w!4xAPS7-Ai271Ii}j+GAPCBC&h-b% zF;tb!fidXlD=OA`!KN!+^1;Q39r{#~#JP;%l7 zXem1*_*<8hBI=3-DlAMpKUoFr0tDmg7%+^o1J9m4i;0abNFS+Rc(M>clCbwtYHC?> zZOc$cQ+s}yNSll^89q zfSnmw?}T=oQEHQRjUA0>P4IS?dfE+pXvv)0hH_Fvw9E8R>2RMAkg!PihIUn@&?bJ>+mn;vjSjj@-4&= z41eq)yOkxjP+8T2rD8Q0P}v$kGR)ph$-A+XFQVX~Mz9~k&@NTwsjD0$S0(iH4%v{s zFVm}aVkz;yzU+GgEvga{vQ-+m9rW{m)v$P~02W0+2q_*ru#*CGAG3UbYclu@%*$Vt z%h+KiU>EXQAHY!~l$f!`newR)uZjGqpn;sX656ocuM7|c#pcopPh1v#uEj3ZdS*Iy zh{r>?KEzg!<%sKABHdFw`4rs`rKB&)8G7R_=n{#FoBtoK8 zF2hAi0hukn{fnnp97re6U3(^TRxR4ZJL%P`o$Cu2C&O$dA9a4mKiU;<$4%0XvXhga z4AM+2N|qWm18=+p>SUPHjczKChFh0cXJ>a}-VHa5`shgqrHG6zx8mS?%g)?gz9ei-M zh+mhedRdUF!uXLQs%0^fJ^Jo5Opr^Vx;U;#sht>X%6|@kc;6P8P7M4wS0`nqi$u5wfmHZQb73 zY25H`spEn}`O(OoOcC^6Ho@m)!pm<*f%i%UU0Y_s=c%)p1Qw_Cr{1#OnQKldByNl* zBlj=F95IM)5}p_Q>m}lUcO6(iAz=N@al00?_3;)uM5kKoqLs|!3 zX$)w9x3OMJ;NcpTp*`MLH5PpX3`4tDF5pggEDP|C*}T^|^}07h0YfQSw4hUVy>44) zCl?h_c6Lh>KrcrejE8V`ygp~}5UsxRB-I1gHOO=Yn={D-Zywt0=eZy*Y~UAxym#&Y zxe653)ZP#uYtQf{j>0DSqFOV*9Z;%=M@C}1jYXvWrT(cHb+fd*!gnd;)VvitFNR@G ztlqs^r(U?)>=gStQ1m#;#h*Z*+nIh^OFH&rlHX9H1iB6zqaT4QYNUey?ykdW|sogApASyf|;H0(|g8JPQmob$<0 zG#Ck94JOVwBzrb8y}D{4hdKP();h9MoeI;j8=Z2CIx{`FS^t+8pp}k`&xtd8MW9fx z3^1XjW(d-w*!Nt97emjXVk(>Ect!KQZwfa(yV-l~XPCi8IiqON;a}w_H$SUI9IOQ{58mLJ39-?d4 z%&e`qkl=k%h6J92XviYYa40j9UGIqXIJg^CW2TfgzA1a~k*e7gBOmvW+%Gt|jA^_J z^!A&nLki!kO>}lu#YwY~92uETQlBd2r}~-QKhiLx!v@l{@#~5*ATKhzm;b;vtmcIS zGK*P&14=3!cR<*xh-WI~`XQNEUFfjf`r1^0;$sHwz`J#>)m#&d^W#GBqn-vcp#QXs zW%b|q3?$?g{IxN1;V!<6XHK~ zq4rD`U{u5sYWz9I0OyC^6EGMI@Lck@MF936ZxnTesx^8p$LZYlbRY3`KBvCFbst=- zExLKS%mRHmHD~k^=q~JCI6%AJ#PS+N>rP-&Io^VLIaayS$273i?f&R~Gtj+*$H+cR zT0%zJY+>-um7%cyiPNm%AKY+C3;!WRt~SH|joVh;?%D1wGgw~HqE1Fk{dAPhd8)c{ z+N82gU4!1U^%b)Rp&>o~v>8#&MLMQ^IQ{CgeeX}HRba1+@wCx;}z-F@SW z9wcuxI!k+8cw)N&j@8OLykD?enKay;7GGVfV`{qc*@;?2Py6`-VncxLCfVOhZ;(DI zLcV`lHXKfcu*C~CW7{&GEzg&Qz(%U11z!Z=17>wnV&lbHNxocr7$L*&!E+d#%{DeA zC42iUqz=N@Mbr)iChAYu{``GB*(#{|GR3Q%h_)9WR0A?@jDKdB(LdRGm_Z@&+nraP zq?BJb6soRF7W`qwvyM(_@tL~5{KsW4sEEZN^kl7`D(sWtyKASMmdv)>$00(Ht#}#o zPk-JMW)PDy-tyc+CJNc^bzlI;*-qovma4%A2DC2LZW)~NF`ia)jUEoKttXBqW<)XH z|4_*Y(s*ZKDfu{5BGwflER<8SW4rh^zSX9}#!~&NkbRZHz)34Uczt34x4n7l?Om_y z;7}~#A0Cr_dhXT8k=)!4XhV%0Xd}Y}o0RE^W!cE8fqP`TQwe+(j)w~&kTSD!j~4Ek zzU4BUilT3UgEpjGS7t0S?Ipd&L0jbg+I7#U6uGg{(T8SZipB+MTxq*Ieb33VJaC8o z&&kY$HC@H*?QNz`cCQM00i_oOF(OsEw5asnK}tY@ z0HI3nU`0C8g49TtE*(NANG}4?doQ8)7I-^&?|9$$hHw1d7>_?Vkdd66y-(R|%{Av- zC2*6`(xb){#xcB|(wj^bzBbhMfqR-loGm}KLYiv7LyDTTwD0&DpKLhbWP#X)K_Xz` zQL!8hC&wZHb6Z2g_M%aW9!6Wgnx;!ED>cPt=Bjy1C`GKd4%c_>c(X}~La64l4jWsC z65j5iVvmdv<{7f}VI{Wdn!5tR4BAel`;nTxA0^t{9SbbylAKx+5g%LEq9SH(%0OhJ zQcnGN3BCHZeQUlA_TtM{cN+_qM5~8%RAqmCTQ?YDzFf#OC@E+*!sEIN%YW^$IVQthy*@7r!!)S~yfxcFXK*qO6JoOZhysh@8=7(j_;)ph!Isu222ct` zn}#$# z@G;la-`}iDAMjr;ALFDA<&Jz94z%-xPEz>Nt|+2EHN}C*ffm>0Qb6YH6Gz=q;&7*9 z>u>bNIpQ^CU*aFtP_R273=|Cy@F0r3AFz%x0!KjC5LF6aU&9 z%kj6fN-IDR;`mcuCY-3cwe(_1p`$Nxii~?-B#Ux!^`%Kw-uk~qndtEVV^f%>QFLMB zl`U&*<8HI8uWo2~QaJHr0-cKQw4(FkYAI^vgl)*FXt@#HEF~nrYU8Xlc&W+KCC-Pu z<(Pzyg79(ucgChmF8z+@(E;`{zLhOQgUs}m)Meo|n!LOBY#27Q2KaMZDOZW$mQ`WD z2}YU<@fJ9AF;Hk83keHXDxS7wvZ_kd`M)A3KKiMgA@}#7fCU5loD#*#1SM4uoHsfzn`F7Ui4s30F_jC&HT6uMGX@ zWbhVDyDv*}LvLTUQGT-5r(YlwKF>oT*buV1nZCw}}nKZPg=Vufs`#6TnZYEN97 zbu_5TcZ5d}Qt`G$O;&2>8;#I0OLMQQ$C-R9^p?iR-HbD-ky7xN^`uG8VCzv~K^0r` z*jgdQC{V9RE~8&3P0nR)K@8&}A1aFnF`VRmc%?c7^R0i|p@CE@u0ySSPr9d8)LBQ& z7hX1M(I91_lVfyzr5pI!qjO(h(I8$FC?FH0Cc`HS+Y3Sz509Sr?}RV_=cVEP;~Xs} zo|iRmfKKMGONVh!ddB&dGyV!Eb=|fer8O2nBw!9zxD_4MQY=v44W2QGyzphc%c$@I zIY5f23sBT9ZGTPEP5X-l{sb;9)yHqb3in2INnnPx(JZlTZLvnJ@sD;hJu(8W2w^_2 zgOWfrzoG;e9P|YL@K|>$SbR{ToHCDXq4YEJ${0cMp5(S`{yR2FOpGXN$Wg;~P zH&ac-7f4J7vpsC@zbNKB09!KQvT3_7SXWFeUExhP8#?`dWr(-|0srX^%|UB<3Q8n8e6(Oxe5Hxr>w zEVlr~7G>s_!MbEprUZxZMPiMc46nMiZv-Su3-_7wXNh<4y9>xn$=G>kizd1S(!AyNm=-X>`Z1D2B9z*$9ep8rRs}b;gJ#;L5^uv}QYf9C zB*KWQT)lUusIGY$y{x}-&A)PpLiRhdO4a)nkf#CQfGxD%3jxklY5pdlZuNdsPU4p5 zB1ro7O-d|P5v8ZAs|y-50D~K#L<9ad0Eb(qa}0%C4XR`?-&+k<2#y-i7ZX#6M8>Wk z%)^wYjoM99u&(?S0s(kO8!frKIFUTgD7#KesjV3m{vH9f{XPuBj!L80k?*y!b z?skB&5&V`X@>OZEkma=l^bSXMgu(FogyTiC>DG1wnoTphjNy~{3w;~kcZEO$KqTKs zm$!lI^Yp&0WnGz>h-hT^ZOdmp-|$q$3BQ)G+M4EHztkdI@6dEcqEOc4-s5ZRY;3%A zau$z5=g4Q6ejId`3bjmlVnWfG84(COYr?lKpUgx z84k+QKklAMLs3T3Cm2%EI>V1sXGvxT{9~>|Z|;U53#uy0ZlvC{(?bd&BqHs)xDkQj z#mWiYi<-JMT1xESRrVXyb_Zli95I`wD-S`~v_q?AeHgA1XN$A}* zEIVtX=V?p3}k^G$R%xUHPiiL8`VW%|VyCxsBkg?95DaXmYBU0+Qp-ess222o(+ z7P6Y8-nMfDB;S|k!$zTL1HVZW&LF%QQ^H@F%1}h7F7wU!5Hc5aFsXHgR83y&?OdFv zcTjB2^WOXVgFfXz^Zs?J;_G(q$+xlVdZHA4zNvlF-)ALzMHD5Bla{ zFj|ELIgMWrDub4L9WK`FvNG#A^3SNjT7SO$l$*tjJN~5!KP}99A(qVeYtGa(>}`;B zHTz2I`a;)>ySow2YULY2-q})j$_Eo)#i*Ioi?>Kc#so$STYvESxD~>dnEv`Aaw9cw z20oa*<3p^04tE(AZFSA=Qc0v>!kAEwmAwcJcW6T-UxMVhMsto!$OOE2%c-%!9j=lH zo-^EB989oaUTXa;9w(~~rhwVzfo2^OWf_ZK89qL=C12$&%I*oP4D+0RpbJ*qp34*G z%FY46Z=h*nA)!2SUx>mhaAyF5WQv%4lx+OLKS4$_8u?O#1w4IT!)K)Bd$OK&9e0#{ z`}PoAQz~?-^tQXpT9K{2E~-ImGl3C1pIWTKJuD5$y!N(F7(c2zWjmW0cp(N#ESl5e zs<`oukvt!d*l3k=@^(F@jLH(dLo!BlKeUEWb|4_gDPlmFTP)qQIAFw#!*l-8uG38X zUJE+NeC7*F&z0<>+gKXTaNGg7m*U?pW6QnGzfVSDqq_|ra}&R1fZcvFGEk@_O28#8 zdDXokO{@1=r0nrIJ$ZY~&n)ws(j)Xz$R`2F^k84(XWBZqZkZ9%#qnTuHk0eUN6gR* z(LOl{?W|aepvG&+O`c^smaFCiWComdX=FI9k36B9P&}woB~MoV=F{+sm6@3zDTI-; zjfbuE()(KAxSvDW8Xb#uK23pEtyOsBD-K<-ylJiBC#BO{`(7Qnl*}MLbm1A%HFb|e zbtN&OCzxTGaS;k)(Cl?}#jWMJ@K-}ahR2qfX)ll{-;sr91-aaYA{mLp^~HUmS?qjN zfhT<|Bv-RZ=%zI;1s*;3hvpTn-?jmDCHlU*iB6HIllu&kSk>#MMW_f zH_NQN%Wr=4R(5&nhXT8Mp!&ArcEZ3pP1a3;qoVMEm-;7?02Hh#Sv0_gF^$zi6k<)S ztQ^ZnyR2<-p4D1;oMawA*TzAR)N}OlgG<9vk0Wk%`Q23^`p_7s_MS|X_G6B%q2aX( zOv#FVi5Iu3SiMa)ln;(uix;sCp{EbR&Ua81Add1GZz9aMMmFvg$~^J#FyIFcC)=OAE3X9dsF(sgm>?4(`Pu9LHt zP3#KdvlnF=Rq}rIP*e+|Oi8S>q>%-^^gUJfMDT3NXx?>r^{G2J$mg3V9W{d31#nMu z%H}B3dJsWlz_Dld2sc*%@;!aegIS=gIyAny6d}A{d*VR?8DXK%KDB#*{vu~@pU$(+ z=o8lWQHQmQ1qu0KyCn?^j(JGtw3wL3!B+C2gvaE(%>6cBokA4U;GcEOtdP)XkYDP7Fg@?# zj_hgnGCTK%|LUzuKI3kq!}B#MKMAoL$$DHf%Y*r#Qwpe301qFc4KP~g{-<}9baVub znm&(FK3K6)P;F6)PWAO09TZK};uHb5P)faqa3FztMCnTg?NFYE7x6{&((qj7=p9}R z2e+L{0#Td?B3*IliiyO7%R{7s^0#LQz$%!+)KSmtFWb~0;mrMuIbLX73iLUJSYm79^j zvR#CpaUe@Q`MAwL6dA!=W0;U@n75qG)G2ajT18Ex_}d`?_b{VYPJmeK;o)Ixn}6W( z8t4jCRJuW&7TSG9mk^fDof5UBJp4o27d1>8DQ+P?HNZ;9D565S8xvF?AL!lpBL22c z?Cn;|2icEVca`r8xEI2*S*;!z&kzuZ_Jrq{OT0T0kZ3s3?ii?Qv$R+?&f+({5pKY` z+#o5V)2TxhBskP`1q|Ce2N8|)rrVe|vF!2uDN!zKLSxLl;_3WS{_}2CPz>jzVp>0D zn9kaBvlrvi#Y|<=c_F#S!62S~!_2Pe=fi~&e1sW3bg<+g*Ar}<-iYmtZ5|7QL=f9i zeq|-}T`>-MgcQn>X0)nEz5m4;p%}kQBh?tfi9RYdy3J?T9SUKcY$APUfRbWS^0vNC zQEC1W_k9T5gsy86IzRGYt3?e%A0!QAdv+a@3@sxVTcdZhM3ozE>68kU~HDy0W~oGE^%e;?iHf6DJH9!a3`s4K`qu{bkND_F9KnaR=J zJX{P%q@Uqm4fS48O4KQC^;{_~ivQxV$Pn^Dn#U@@rjbmFSQoQ!W12Mbjt(a8qO5(0 z;J1(3!Og_7mSNl!c;AgTdY>V=#?sRE55UUsCeufP3~cdkjnsxe#E#0wZqG!nnJ zb8P&Iw$;Dos=gb4FcDj5E7&dPmn?)>AI`X};er95A5 zV-L8104jUn0_>?!CsFo~@q76U5hVG~^jEB&BLF-Bx!`b&H=&)e^Wr~NWe@)T&rFnq zfJj+}keuT#;1^tY>uJ)ly+@doiy!+NAw-WFECGw|oL7}$O2M0v5IxK^HrG^=qNmg_E2=Dmr=za3)^m7;ANN5LA!r>*`vFd_f{L zbaXQLdd<@*Xt={ftQL_r^eQ`zkiN@nUb`o+Dj&*!6d5bt7+r>(e77o8eZIq!KoIhC zBho2ct_pGdbvG^Q@nTn+XZUYRyPu$wIhgX{72dF9l}AvhzF4>leIgcJ7O3**$&)ew zkQHHs2K83r;lVW3*0Qxb3L)O_nUU`Hd#M*1*GNyzTLva;8<5^h6HX_qU1j5TCDd*< zs*{biBHgxqk$g_Izi27ML@hfAO2CpG^|*;?htBF~<;0I2st~v&*8rZMD0gB<>;WA-~{uS&^L*b&ps;d9%%r1|`){ zbB`}D=X3zKU_DQ`vWoElc`Rtj`O?D(+m~H3=E{7kjlY0iIACxP9(jM`rUe5badC0u zVr~Kq$Pbc}LG}QX%Df1qEi~gE_aD*|(yf@6+6*~Z5ry>BRv+ri(!N03#;@U^%h@33 z#~rXgII&$>nl;z#v_E!fJQ<=L_|egI`Ld{sQjG7Ft7b7wOzj#^b%Vujo+T?4@-&S@w0n@>+RLc`G_rcs>_1lZihtG+piUa`VJC zxKNgR~y>YMqW+gV2 zfXK%9w8?$5`(NThknXaxCa7vI{bI`PH}AU_p)$vznG&uA{5~-pKo;IOT~?Eti*m#T zX^QN7P4BjT&?j*687MH2s=qhYM1A@*AN;?)?dfP75HGamyD`nzanv4966R&#)zQ^C zTARu8_Ob2m%>zg8?dXuu->OjtG98rM#!Vov191;AlY&|F+}=?VkmXQPQW9WF*N;B( z7pzea@3-7=GunKie_u_VMre$lo)scux<2Ea+e|U00ZjNm`+R6|vL06@U3v?n&d5!_ zMW^W@o?Xgy=*vISt?M4UswN)>U08wJ|hr0 zz%*qd!o&A*o&{oqBO|)X%8w1|eXM_NuaHn%dkDaF3-cG(lf4d))XT9K0)vLHKOnR( z{XfFh@c2?n#nvtwS+6atK>oNX^39}ok`TC0&qkQ;!OlCE+|z|xhb^|`>;$OGwuvIN za$>%l-LPrzN*)0|$9e9feOsy1?3BnlF*zi#g0ZoZebCScU` zH&ObxZwp2-pktj+)354gJd}KTrd{1$L~3w)6F8NR0tq6hNi{b&8%tOs5YjR-lssl? zDnoB>SaYgXZ8eX3+%-Ji+qB4d{kWZnr{mfCSo(x%w-amJ=XV5Xe0iqk4)Fb~kD8>3 zn6$*9d4btZFz7pY1I0yQZauKvi!~mmwbiXQ!l-KQ&0grSZ6P23=Q-b(_HO`lsh&yB`!|0uC|%HH#d?ad;)*Fh_= zh(2=+KR54t_%DUw+EUo)*CnHs2+!+_6!Ga^VT~#HBYu4n{)&9Q3~?5o|IcXs;lC7y z=f8V%{I`NaSceWhK~LlCMt6qH=j&L-$U*SdpGqK<2Y-PEE{mY1v2f{J*nS=eu*ZV2MLuckB-UR!uy1l%G3NsXf^StOS406bceI`Cz8CZt{u1} ztj+~E3MJPWv&wvwoUxSoXiZU5h+Cv`wg75*D2n)bkJYlO;$1b$H9JgpWW{E4!H()i z+9)SutGGTmF2eJY_2Bo4wAeGXbDBl|*^1x<0j!M+kQ#txu+PI3F|mFJJE{~lTU%R@ z3`AxVo;2h5JgRU-`AW+PFTbt`LiMX`a7%%$*859}*2s_blb?}4yOx*G3{KRK~hmt~m(} zy^1T~EzVVISkcsGmPK|p#f8TRGCB2SgtjO+@fli94rzs1b->(9d5-I(!yd?^T~un# zBf7P<^4*b8K0dyF{qL29kJmefAhK0L>odY!#@UBk)%!_2J$)E|Nc+HG*FghMU9}*X zw6A;%-aF21Avwn0+ytUbDdDcufqZQih`F}ktGL4`@~9jdCw#o&#dNUYhs6XZ?;l<* zwst*+e^On#z>0ZCf78af8alv4ETgEncXJCZ{+wxXwHT=co1J%n!?AR%q(s-eho}kf zQS7s7KD>Cfn7V09sp3H+WA`Ka^VigA_AJ=+?BX(a3Ie%EvzR^1cpRYWGS?z zj%;q31$4OT=+)NVud<%&W(g6S*V#LqDwUvz;2%d5OaH_P6c7CU{5H3?B2yBIl!<_ zKh#_+J$TZ|T4AyQ+cFuCjGVqhlm?kcT15Jk=BEWXu(4-{#pVQcE>Z~paQB5Y_PBm+ zW;yR2WX!>x%}HE1kHkFKVsrkdKQ&|`GlIS^KcvOon_e&OE>ZLryT7g8ynFgRC&Dzety6e6)2~mD?$+FtugnU3J|pn2#x11?}6>>+XVn^$?qX-ya;MrE6S2o`xoe3UEoAV{PHR~;?j z9=CN^cG<9{m<6^ukSJM9y#bpqr*>#*XwZ;ujpuZ0k7f$|6wK=TkHjfAwsj?!j*r2>@T;8feJ?Ux;3OzJ8sQ+r44#Bks#CocX2pjPOX&#IbG5`TsMv2u8S z<=ng~-JlT9=DwxQ+8v^@gWUFP@6O>0kqTU_e5SbAjQ5kwlZ|{w(+M0f8Pyt!ULxL@ z$<|$ps8s0m>5IF3GOO;8K*WU_wTL-bh|4$Cp4@aZhP&wIWMyUjV#7M-U}*=0ygEic zuB3`Ktno2hVp=*$$HU5%#e}AjR`F2|@#U@22g|uKbX9KM0z~r*`jy`;=jI|%tnq@2 zPi%a9wx)U5b13=#gUFCo#$!3M9NV}$=!WSt=}9ar89gvAvi@)#U&4L;!#_}FMFj*2 z4K>fMp#-$_G+?lmtpiRjjJOn+9LP|IhO~jiaffg*TiAJG1rObg*i^Y&_6e`CbM-eb z#$Cv5M<3NBmhgTd%G*P+pF5}d1=Q`!XN#M+FTRmYP%J6rbRKvO>`C^-4#s+PIt4*3Ac{D!4gP^-AGS^P^wF)397) z*cpqUGz&lz6Wv`4_$%PgYSBtL5bI3Q2yN5iW1VvoW|+e>9O8}jyFUXGY!<@tr(KI@ zn82IA{sbj#TX-zI^WO%@r#<-Ih2{pi=>N~~NvAr;(#(t#s;H>A)&mV=!-to^Pu1kz z+c1D&OUcWx^yi{{$h|@1b-UCvrw$Af3CGfom9Qr=E3i8`@txOSAIJBdA|dYP=QD*Ma|~yM_D;U<!kcvjO{EI+Yb7F2%{8+$@zDKvPjCI0yGDQ z_K4T(zzkzk65jU54gW?G`FRB15%^1b)kL?Jbk zm2cM+sRl=Qm*#I-zo<6AY_3J(5Kda(k|K*~22YO3NW97%cQ%VMO$1IV481IGEloUa zu5*iLvAlC_8OFoR*T2@;-p=|k>oy{rMN5RiNyTb4c|~4z!#(khti7lis*`=9&R}Ho2 z++%Ajx4zssf*OV5>|Y)s@30F}+h*jzO&hl!U}d1t9rZydD>Eq(;#$!Y$|0k#NuM0- zCu)!v2dVt}XG+Di#glF4>l^~N^aoM%5l^LSNBwWu=|x8|5@!f34e1`&Ipp+p_8roV zIk>kZT8t&1olkf?SY{}1w{vKzsfD9Zx#65ZSQ#tq);GIk@H1^%(8UmrOUS2!>kkk7 z7#FVel77_-V~&p=(5KvG)!9U(vA6%5u1^H*2F0MZM(W&1$9cSVJzSoQH6*z)OSV=2 zV5Xm{oUQ6ow0bhc&m~(Af&<23MtU~IptPN*s+oTo!tpuCwiKD*SnrA>9}kbXe;6^v zoSC<@9TgrgcnQK@%|N((heO3_H{H0?n8+Z+@-6QtK4$Uc?4^m?N3ssxA@r7SzxsU0 z5WziqNFN_NX_;b;BJh>^=DxQpOcNYX8JoY!d%OC;BHm`wjp2ZK7JAw zDqT2#2=bEyD_lO=WPadb2KaIW4BD~4C;^n80MUm{XDeVY%|kcZwoxp&X=0?pTt*XG zG?m9%KI$|!DY%xipiHTjf>PXYxLKW9gj%U2pL?lzqDIU64 zf%ZYad--6|DjCVi^=o@Ej4i<0*U!%nhyh|XbrwrlPkSsUJ1bS`4(HQDAXZ27Zen5$ zK_o;`yY)!KWaAp&>0#}%I&@)>hQ|4{xl9pRoQf6`a$$~iE$HUWI=0vlE~65!=dxOy zi_)r~z@HCYWrHM@ay7s?Tc!wKPe1hgBi*j*m$~03m2YfB*r41b24`?(Q1g-GY;k;FjP{aBtk*-Mw*l_s0Gv=bZ80 zx#Rxhy&5ofckS9$MOE#!=9+5~q97-Z@*eL!002Odln_w@0N!>30I)dk-au=#^a-;7 z0JJO*6%A)41Gmrij&`ON)+V2w-R(_2n|!x01pvN-OOtowNO>xMzI27M!3YW^!@7@p zoDHusD2lQ#7?@8dh-yW2e_HVVhZm2Wq8>4Go7_DszXin*zcjl1Duy)pWJD_ePs=F0Ct{t*- zORk5_XdD`bXG2D|r%z{&Agp2iCr&v;i2b735+e%#1@l?5{er;U#%S~PFo%x8&c6A8 zPmp?)k9*=N{c}v8)1MYLXC`|tkXwR(OK){D!Pq`cseSNbo$O}Csr$!=lawGMIJ91) zolf|a<2iJ`#;Ez|O|wkh>JDBpJeDP>*F}(U< z>BR)n7VgBz)>AiJY$3*OV8~wCkRVcIa5UK_9tNCQ0D4Hqr}s?LS>4|Z!>M*O3?t|y z-&5tqIx2-*YC85a<)=Cgu+@=#iPcaV$c@#~x>ofo*H9kGi`TNUk^z5goM@WJsyS)8 z8aI;XyPdSM^FPBi=>9E6Xfi#Km%%Lgl4QzWHO#%}7^a|Q&=aSoXEHOxUQufjMBoN? zPtbMq^fg!7_|G7;?>r3 zuyv5)7N9Byq(6owx%KVaL<)W(TI4_2tGMzqiI{5`IJ7(V<^6aa;j6*!#RareqZx$(h zjqG=iKlX3;)lR=%YhIiXWYFPU;X3G0{p{AW;RW^`je*G1Zdhi*b8{`o% zAQ?iE-N!3NXEC#^jn{2u!PTT;DgQm3q6&kVOpkE+yABUK|4@q6A4xUavs=~7H#MBG2hfkh!yG9FjP zNTvpr?=fZoxEi7$fqKs#LbCyU4KUzD8 z)wAo~t7n_?fE7{^5ZD`j4dlmo62~0X@N_u-BTLJ<=&yWhG}-DE;j~kuMtC%%trNph zdixelVjj3Gwm7cr+-=31!b&vH#JJI3DMRv`o6>A4FGtUn{H+!R*{Azm`?bDDkTj(a zeLlAU+zl$Xy0R6+;EGQ#n-_@jD-7PQ4zXm&#EBx4%ynj%72tl9mMYSehJ;+315P2{ z_#+q3pK36tPMM+_Sz(7MZ3g#fSAF-_vqayTafixp3^E6|Bz|V(A~zcFZcL_{R{2`% zHL;n@-_m*pJ~72d2ji`k1aYc}c79AO8b#jhAU-4?2fUnfFmf- zb%<#on8d0x#B->m^8oog#})agUpc4a9P7=z8nfuHFbAnMVEkr6~l&Ak5MBhc)ra3oRcs=Wo{7u*$wK!YIprw@q62mTlEF!Un zQt6^5^uGVC49+KQQ3MsBKqJ z?$;f zMC36@k`ny`3Xq;mt=FNnF+BkNBRY=0jw9aw2;YJf;RnN066*O)L#C7Y9nxD8HuSr_ zY$eM^U?-`m;U5T4+Z`7%@`G@vwDr59CAKrc{))Bf^uxep?EW$(bEQFcB%T-xTr8x1 zgsAH9>^F51GY0MD#Ft7E*2ET$e@xJJJ zCCmDUdyuVLm%3;sD6dbjm9*&%Lj!}KFeWN5g3d1!h4`!YfZw8-?PmkOB8}UR*3?rb z&>S&6g%j7LrUPp%8sV3(qS=+pr)s`Ce_#}K&Ahy*4eZHMD_r{HYn&FL*dYvH7W#2v zDTep9ozq!5txm%0>-+UxuY7C_t~*;wHg=5MoU{VEy?0%D9OtWg!4o1ZZ@~=-IvJ%( zV!(HnR4}pyd45#F22@GZkKrFqweixEkwq}U$0>@VEprZm_YSEU;`hA0xSfwLbN>K! z%Hf3qA41?mzef;<)}SYs-K*WcJtFdE*|mb3XqUlaH4lCh_EwuWE(;-d!qMtR6uz+f zJQFJ@9z_R^Vj^7Z7otQ7( zk|?8Z^16SMvM72Gkd70bAay^Nqc~=mDxU$WbY?hu*TCMSdrqb0a;YcW?n(G78w27 z^)7yl(I&Ft4+dm>Bo3>6I|=M@7XsnLo?~$q=Hr>c*G5BMlb6@69MzOxHbnQT(}E9i-c!-QduqRQtUBl!#au1c z!*wxe<2q-JpEz>tbDX$J)z%q*f*J*Pin~lunlIbB6F>TmOYg3isX%^{^@r#6o#7#4 z-zu($f7T&Yv23*^j-uY8v5ijESqFimzRxHDwnqTO_Z_VShMd3iu(@F}5ec5KgwZ-A z<>FXMfxJ$Y#?d6^7%l@Yj4#;451B=!f_yZMjpg)%()LNTpQ-R5;tC@jLU=!`%uwZv zu1P?#61r_ogT_}thnCr7kD|>M!m$Pm%!7Bx`95?r@cKOCac>)N;D68_`_CCUo+32?}d&?Y1ge73xbi69G?pza`Ch zslince0_MTZcChbysuVSo0l5P4Tb+0kEFPETCBr72X|QM`6r*T{29SdJTrIq+uAgy zsM!?8A!FTiIAuEUpV{I)w$;8h>1`DUToC=&{euVX1-u*yNVhU+KND$R;hgcL1j?MJU z8r9_Bqq1J4w;^yn{!OC3@;npGT7trSKDzrv@7r_i7+#iD1t;)9OG1CTxl6Ki1*Pv8 z)x_E@mkI}AR6CC6X)=*6?xV#n_@Z?EuT@PMdt_z7S;BWtN--`(%#GH6FpcL@Zt8A_ z1&;6T)9>>M1Xfvm=OzMv#f=#km5+C+Ga_!YQdW)5f%?Yh7)-GC=13=-lAc%K5mgg+ zbmm|ZgqHUmL9ibkK9(6&o3XvXk}pzT^_tI^K!bKfdkIY^0DzwK^%v%Qu7E4F62Vzg zRuo|a83B<1em0YX0|598kQDi<@*RA%?5m?%2I7RZu*4*l>CYD(U z$r008xEQ~L$KjMsdyW>9uI57gOJhp%er0@RQ&tmoU8xd$EmbKVxJWx>g({as33f2N z7nzZnrQPf0qi;LlO>W?Y$Mbp@*Dm**%czU2`epjzu%e=>>KEvo0D$Tsb(1d!7_T44 z`QNLN!@Pbv-u{a7dIh|T`FrUkd%gHEBEDWe%e?Xu|G9jyyP?%JW!8O1VsEZY}I4CLuvB7R9|D+(2xJ zQ!nZ;0N3%8K9p<5q6Vqra(Z><{9NHb<_?NH%49UzTShFglgrSExn8W> z0Gv>7SPI*rtl`H%vNIv<@@1}cj|B!8>V~4q_ zaQM?>_i*eP#w{JJSINn4wsY}+ z5ZDLG7Pw~D3)f4_`Yp1R2>bP!H{H1E&^|KSueJYx?WzF}?b>hr%pPX_c5bDJ=kNhE z%n7_cu$sWDz&0Ho=qEY@3&v#MZn99Ij7u!!X-Ydv!Sh4U)(q2Gh~&EC_xn>dLc-SX zlbeH`aZ?-w?Bb&O5;3z~Nw^eQdb)Rhvs(2&+kF8Om>wDZ!@NK?r}Z5B!86i^?8~sbeow%ekf*uYDobpacfX$?jZfSmU!*qS@I-lFc60)b2f{0_A_#sjHQq9P@zL6b+psMCqe32o`6 zDxm|$D7B%KqJ=1EDxsXLcoFOxhol>46L*{?2|VpO8hJ-)1wdfN8HG}pXiiDF3~qXX zOWLqtsYS*BkXFj3{t(QDM^rpRjCV_yhk3GV#N04Py8|D=ZnLqG`}&@@%j1&kN>=W> z%G#G-Y%&afT2DCPs)_eDm_>f4QrjXLB(W&1jr5|5!MXGl^p6Qw8N(oz6D2IBSu6MB zg;HLZr7Gmk+!M{~&uu#bMpnYu;F9_*qYSk-J$uO!kZpe^c8Fs%`I*hkTeH)uOw6MD zv5dAF^WC+s21HisYzy|8HT|yTH9Di9K1(i%p>iBx7s?e zn0nq1^1hI%MU}`u6q+yphK)zD)bHOGH|4< z`N2;TS?PUmGRuCGM$S8~s9}x)>4i>y68(wKtaImSM zU0<|l*^pDVXyMGKQH@w5=A^iZn5-Hpldu3~GkzcUfKdbbDu_tUF-#smECru>e1*^K zrD?9xC)A9!J;@k&2&FfDYL_oteef9tIkHZJRXh7x365|l)Eye2pC6fKp=f0pU+)94pEORQ}|hgX{P;FBHe zqnK|oRn$AkAewv&JthDh-O!?WX z!RvRx6026y-^c#Y@$UU~6@Y*F7s158)&J!&{(Z}j{j8{DD+B(o_oleA#O04J%9dj@7=4g4YW9--e z_VjHqFvkd<>eu`rZ|{0ZIs1q z^aBnK0s_Lr?f8V%Ycs(_=UbbJa|i^o(&7m%JZW-3MamD$O>T8srDECVkN1~#E7(ix zplrizN12GtnDcPT*XMK)PTG=K<9*ci^@%=w*g8HgRiJeITWW2Wixeomyu9AP!qQ<& z&)k}67tY+BhYh^2*?HavB;0JY$+_QGg<8dtVpaJZCb=kBqIwKNN_X3=KB~NSUB6;b zSzWDCJmc~GdvO13skW%`R{w88LPG62+v~G4g+diRk1I+lDvKJuTro2bhRe&)LwiJ+ zQM2f@mzBK=hDB1|2KuFxuWW2g3IBqaI=1{`dzZf_YF80wPSZYP|Z47>3!4uk!D z7td7ha(KKLY^w1u)aV~}sT4z1J@}Ze?;lvj<7J1=TJn8sD(*Bi1t0tWY;@)3=1aL` z@5;1r{DJI2Onhm|sdK@=4+0L8XL?20;Di0rJMLz7S>E@sWC2WY_R3XQrnDn?K!Rd* zj)mMX{bp*l{JbcrwR1s>iHnPCZEXd8EL5Nri@^Ir@;*RF zX|&Qzg>*?XV^|fym)3C5@ibsm7a$tkVa${?J>jH2jCbVQP;PLY7?KBJh*}DhHm>%PD@nH`Ry4nuRA>@WJIUW4?crLmu2QZ7RBw-2;wgJaS z8Tc6e$1U@&TCbRaZ)aipm6lpR!KVFb8Og1y#+WV|O5hEVp~?FO3Z&a8g6tM2(;Z{s zL6(4Q1gsph6!_A9O^jq76YJ-aV`&|6SgCRKavmNYvokX_dUdB`;!#NMCMGBTP7wY14a^)SklxK8d9IX9 z@4>g?V5Qhbo;Ru_H-3Hq-SScO?i0+l!-Cor(l`hTQua$y58? zE)o)u0L)sD@xbHNWBC^@3!o)1CHP~Ho>q?T@0eM68=gkzVV1D>wfUmeY9TGv3N0iQ zRT*GcXM@fw7txBTP8}YZjM4PO%(SLnK1n^^!I@tMHl4zDxa`(r;R^Up%7n730tTw{ zwbRZs)hImh4~y@2z6_5Bk0f0Sb=QU?;xnJBhkdM@N2+qf+2JYUmBygps%2K1YWKJrOtwqh^3X)%mMqd_%=S*b6ai8 z<6O&E5W{Xn)5@c##Rk6J63%BVcZLzp=_`OqHV&9rc~|b8B9jyV#J9YL2*g$H%_Eol zc&p;|Q%*z$9K6R^6I{?`h)`&XlSr0Tb1#vL66HdW6)_IJpwXQe(TnAGXr!{Z+zGTw ziHK|*|HZCyh=Yd@!Up=b=$SAr(B3Qw!*^w!8;OWmyK4U&;6s6+r2m>8vk|NPu!AQ= zA%3Xc%nHRlL}e4O-@UxOX=rE|=%fdGnzVH#Cslj*owZ75>2({cOP~EbDY|)9hIlP; zfDRuCq|#QzyKUz$Eefv(#VtvNKf&<~9g!Fc#5F;(4SBNR*6=3#7zXa`9h%wgZu*&| zqrf6ZDDme<>WM$BYvEza);Vl&*ca)@+vdN<*X(C#X_W|j@7v$n9a#IUSIt_lG`nB! zjBqu%<)FAOmMJ~l*S$EsI+M{5_^kfTJl_3*|Vwg)F?3#bM zq%{0p>X9don6F>InvJEaQpZup6}7hNWh!j#?2wa@)nii>7ZtTWoHUJ(kI%l@I)iI# zd*mvb!`weiw}0}!3!@3tU*bPnrVtUyf^U=xc=YH=!5Pe_)&5LE;+8l304w}Qg4Xg;Cb`qD~n zOMJ<;(21CbAm~Cb|1%u^SERf~z9xzT*sF$u5OE49VJ%qobq9mfHimhp)l^NqA4+;o+g@?a48;ICG1NWGXnHt}I*D;6?O;cE_y# z`Nwxq88pB5U>?C{*BmqPH+(|HAg7V{{TJfNQK0~3wyah_LKF3O1o)Cr}1U6u)|^zid6 z=)^rn)AzA3KC`h6F0`rgRt{rtkD$MT&-UxxXF&Soq*AHc;rco;2^0Zg{r*iV4n;=R z%Z)Q#UBbCa*LxG$IXO8q9&yRUF{^ql#@h$mxchCW^9=_KtDMQ$MbDOtv#>j^sWyVk z-CGcE{FWOt&y{rRwlp%tHA@<;Ns*k#BWb@=|l&!4%Xb9**tu%nj=Qe6ORAr~ z0@_i46ZsVnxw6v8r+pxDtwEjZVbEh^5jytBY=zK)-?PMeJ1A!gTBJl*Z$F$4p!ZD-Wf)ZB+2iO}C9%$C!XuNas_NnihzI$MFV(Rp7< zS9f`boch_$twO#Hov3(XVs~I)dL^~W_=ce8unr4hiNjT6tldC99~JF`L*h`EPt~{4 zXp@)YDzn828WlnXo?U{)u+#a#N?OFV17G^F!Aog81MOsVj?;p}%gXI3Go$D#WRmzN zBxMX}*)YUNAE9=nV~^BKtYPFLr>1vlEA3c^3}N9p<%g&FIym(Hic3yK$HBx*sB~`2 zbqMnSrVzW`B)*n{#1L&{Lzb;kIl17lurTSw`o{JRtx*qGy@T%9*sY)SOjVcRB)#%D zqhowY0=`FyTka1wC%aQQ@amWjOb*bWflEi-V(Af;MU@y9I}qJ%G6PZCgSF7b>HL8hqpi>j4vtB?L|e#>vRyQ-bxbhB`7bI-Q9j`v~+WZahD-cHkR3xISL)eCg}? zLe|~WBRk9->>%y9)V%;|m6Fi>Zh>BApRRtgGQ>CK_gY_7rA(1W78MdEt=VYe@;X!K zRH#7z*e2Nb+_rDm>2JCvEV?wXGz9Wc=j|_AKTz0?=w=H096^Ofwg6}_;`|rI$>mR7 z4-KI4-?d}W>Jm>)O;v$FCxh4A!=6+5M3+Dy@a0*aKjTwYtfTMd!4+3ZtQ413(Q}fj zh)YAW635;R0H|o`+`3y#KsS3xp!XmR1kPmu?AfY}zpAzJTU62yr*CCcg2v;qGiyJT z@Ov%WKE6NYfc=@Q{DWW@Mt1I112g2*IyCJ52EtjwcjmYpx>(eov4{gPjtXEV<F~o}diV=%ia*xr&6H zd_OnIvP+B4$-?V6ux4bImc$U^eAA75yu>y|GCh(yseF9c1^>7l)MWz$GC#gTo+;Uy9UX(6iCqyAtay|w3dZ8Hc$%)wmhR)N zD){E9C1Pb)gxyqUFviN&JrnxB0#f!{{^m~4_V4$Leu1a-oAFE$pyC1hp?Wl@SBz>( zF>HImUggeV39<;Pb|J2=0loz+*&9rDb!8h!Ie!@_)vk-gW0T5MsISZoo}Qg8nkkim zNR9l0Y7$mfKGP?=HYcejbMDqWuqstp&4#}?(Ue&$On1}+>Yr&Js&0b5j7*302a_Fx zYv5|XtRu`~1xAI2v@aGB~#$!%fb*P$ahIJ+~S zIeHT#vd2JVNHxs%Q!cW#{<9g?FrP6aEuMgwNVjR7XreU5IGoBz8IvO1lbTC<;+`3@aN4&Bm z`PGQgd`Xn}<}2*cu2QYViCm+M0p|H>Aj?aIrL#0umZJ7Zm9~seh=1s8o^NuZYn$)o z>*7@}SmQ)mnQ|uUs7G zKG!1anU{pBuFnnKlP~l9HmCKhtsvD)73PlwDDKw)nO4wzub&R^tqunlM#&|9r6ir|hK}A{~P4v_+qOd245d z`wk%B50uyNDqn4Rl)0JYx9jk4Q z3Dh>QZRH~oTHHf_Is5)J;Bw!KL{$IOC4MLm8f`t3W)7Q}RLZp!C}d@;tFu6PUO&&o zNnzY6qnYjt;_$6K@R?#iU8%EMJyFee8Od;usZB1^%q8Hpy@amHz*41+>rQH@c8v}P7t;7abVM{H7ccQI z%hw(gRyfZYm^X`8T*yd%ha-szs;u8Hwm)wQ>=m9(o1uPrCWXGqSwu{9+*Xi&&Qdi2 za}jF|1FIm87^2SXnPO!Oxnyb^FfdGAv>-wok1$JK>rtrVO56xLq@Yv3Unj2G@ML28 zoC!AzJMN~V8s#!M&246V6T@V%zuu+7X|8$vdqS%uihoS}Fkdn^yKP~xIl`FE@9AEA zjFeRO7z_x_bjklK{25Q2Ha8%_wsg98R2Cn{l@22^4WVb87~n?}C<#<_nY!N3t4gDR zvXD5>b0ss+eyfsi0QMt=q^6~VIrz}XW;D5B1Pj0o`!`@9(Clj)neqSnm$Ch?*yP`_ zgFWV1781Z&3p&$mOenJU`Ix@*s#yk@bNp`+2Q;GDmQDT3_*P>o{!K9cH}U#kjPC2L zHB%*T`vsN(`BsP|ZkL68E&4u`fkx>v zSZO4-UuL)Ywbftmx$E=8c^np4qNPYO!URt2_)p;Cao#_s(pi_4;GsGqNjer^y-iNH z(!~okSG36`g2hu={p->z-lf>q+npD1-;0k$PFb+ZC!HG3Cj0*BrZ-FZHMR1Fy)8*E z&b+>MTX7V6o5V)#X&(BJ3~2te9c8%Q>N=l4YV7;gS|vf?f%ui9eMdDKp%%AUd6~p@ z2Zz2#*~=0KMl6CF!{S+-FqIl2E}yq$IOs7uVNxc>)I2PA?p?179@mO8y#ZgUCg!d0c!~UiYoMc-GV^8$)&I_6gmNc`c4<>P?M-W!^O-^-VNWIs~ zdj841Na>N-OSVB#RdR7$E5FoSv0s0Pok*xe%s^3Au4KtZ`@t0o!U$@UZ~~)?so~oB zjNS8nBG;b0FH#bdQYAi4{G_h0ly8{^Yb&_bTh{f75AR=pVnngW%PM>9Dhmz&d_K5O z<69-#NiNca$|shgZ~C6+K~`a>$yd#CMN#T+Ba*MLbh)W?1+GVyHarQ7IaM+}IolJ} zGs7mhWcu(<3ADZY_r`fIu8@S^6y1)d)ULPhJ;R98I5UN&?H>|II;|XS(##<2v{c{p z^Hq`DYqdIo@2^u2>cISHa!J%Q>X6>p1rm!<>h-`+%ht~oqN`uc6vDZ_0DQN*RQT4ncDPS z)djC7>L~Kz!qK)KSYP9P4%&xvlwza@ftKnrDU&u~?WbF1wS6r6pjz%eW1Nha$EVe= zR89{?-FjLc2mSdO1a}hMS~>?Tv*M~1eOsO+wvJT)(Eo?2M9W+V-&GFl3GjD;w2CnAJ`bp7Lh8)@it+_Xe&y9MmSh>0S-p}dR z303e2!B2aGaN1t^txUV9`cyCG;Gr+XFf;j)n*M~hRLjOFcovq8byt5>6ZRmz%U;8u ziK8U;^{mDvqKtSv<+W}uK4X&-?;hvL&Cqkxd%D!zL5frwTm6Ync<$2mSn_Om_`CDL zHkC#E<%JEB!#>Ui0j33@bx78-#%vVeFpwLYoCodI zG}RBN96dYLJ|3W*Hm5w#hF;aF<&4#PzDaZ zDOkm~%Ee@qitzaTT7G17{%q^rWCjDDa?OVM*MPi)-ZAgW$`q}{ZZGH7*^T#264$@k zF0oGw(V1`(Xgrs*KAc-9X;5u2szH$pr{@rGmH(?()oe&PGa{hAP=!eIvaRru^z42)G^1EVsOW=VAiZ=xG#jfotIx0$d=CESd zcfjoeYF~@=j53HCF65zqNU2I;H+^zHg<1G9{+4}8Ny!Vntt*CdA))ks72|R4n1~l7 z(#KZ8Pom}oyD!kB_+d2VG11e&olLRnY#ZnORR{6c4a{kI@fSCe|IlD(n{F8_=sK@pIi|R|ll2Jdf%+zwvVoA!LumMFX%9LTRq8znQ)90q^_mgylw; z%d4xavorgLspIXHu^as~a_{ss^fX6W-*I{?r>5r&)5~6AQjRH zSW(?Zm(d%KnzFwUL`a!@O=BBbE;d|5jn0Nu zmjeOZcz>N~J-rHPR&_GrvMVnU^+x9TP3pY*^)nN&Faewwkh1&^K!8Z|s?C}6_Z`dk zO!eG}a<6N9DqCC~#fOf;e|zo7B?##le&X=xmVcWM4P%RTXqbx^N?azaYR&O%5&8)b#Fpp!G>m)(qm) zW;`|<^@4zV*!e|AZwb@WgLp3X{GK>~V>leA?rbv0WKn?0bR&O6)}WVefJw&owL5#0 z=XEE6r4}gD=)E}A$*);`)tOJRI8kD)UB6{Y&~jxVbxia@EBSgLx87nt-UM;3X#XpK zV?2BpN}#t~nQI%8RN=pFLiO(va%!9U3@wpvwR(E_XuXfS8FeQYZg=Y59eNf*m#xz3cBZuskwOmmRf>80(C>JzE zNep?C<8~8RNmNYo5q`)~*Lx1jPufaEA@~ljgMm(FmQ{Ls+Rj{Zc$Lais#EMo^A)5N zbd*-EV!wmuWhqqU&32CFTf8N|pInJ9UA%u4XUX0LjXm~75;WZsJl=)|ADJl$)C6mw zmhh}4d?d=7h0cJWzrXD&t(lsYUokDgW!jWRC!vf8r!@D$Z z;5XU6eAE7Vr_ACKEnWwc7GKu5JKsUHO^~K&`79r?xm062s=dIc;4~-vlu*#}#>ZM0 zl2eVSX_Rj!gXGv!dmk{SRmC*>q6D*v-HY?O3E*M1*bIGK)~{CJ+LNt6Jx$D;E+izq zOR>NA7m>4L=wl~Eqox=<+*|dpLRo#+%|ryv#$xK8sU43azV8@pVw0If=_ZyDrjk9c zXJ1pOo8gE?At{Gxgk$@r1t+3zV}s$v3v2=!?r(x8mV#lNJzkBAtj7D@sPK@GkkC+f zat6bb=)1usTIr)|jpd%Rk34j>%ru9S0zi_+JqI9mh!1rt1;Lwohr$P0z0{nVMG=uJ zNt&I&-N-0t2O%N&8G6ugRrPzdhl`Yu(2{T=Tt%3f7~>{PTKd0q5qm!`+)f(pBtAR= zoE`Oj?oH}!mwEa4_;`7jo}E1#ZmXUqx&oOuVZQY?{rp!ZL9hw^3wH z4r)Vq`_}n**8ll0;;;TlK9)xHaITfxNUVGW3`x3{-~!Y`pr zDBVxwbC>xlqEO5hyT7}83lEQn48M9hR_i!ae7=d3Z=C4m6$?rj^z+PnS(ns&8uaux z6{tLnyB79&=_R1JiN7Vha9S;Pr^5;Y`J}xL;b&Se;FtgZSL$HeAMQ5W{E)zQ`fM`N zbV~o=q90dux+MV3wQquTpJrxn(Gtr}a&^MEVVvb^vz7udF%7f+77Wu}+%mbESF2PA zSBsrIJj9|y8z0Ts6krIjcfl3VO~k;#!i5&BTkH7DbLY1J^=WgHi>0LhhrX}*yzk77 ziGcxCcj%;en|@N6aVFZs~y@m zhw@AyD}|4FkW5?#U^Mg%(8WiVv=xCMSuTT)A`V6_r)F43dQ z`m40)YlBB!16LjEUpeaCg)S!nEh}(Q*mXDe-vPjcY<^JqS|VMmI+FiThqIXky}aU|~!gKA6}_3mnvA_vCR@j;3AUlLS!q)GAxC2q|nVOQDWTq;#sd=gy*Q z-(2r!;Y^`jdFlDj_z!utSSGo0Iv#@6kn%kqS%gO%yL4=F_FJlnj8Ozz5F-@{2wz7R zW)h1E>wOObk=NS$cuNSb<;>UYyWUDOrdSQ^7q?on=ud zXQXLSD8HIvB|`R@xwdS~R6XE{Xf8DP?uO7g%%ZHqpa}*T(t+>zzL?Y#$GUFi$+j_cpEW`rJ4% zX`~q%N9Kn@8*vYr`gM4h@@HpTw{AW3DsoP8Po0zL5W?`abO;N}%_l z&h5Y{W1KdiA#i+Pf*YuK8y0~wVrwCVY;^F();WT(eG|%HX==J#Xe!aKEFVZkgP5V2 zCCeJu{mJbj*0Km>hYv@zRbjzTDkld~p~oZGQ&)35kzOVkzy}Ur&+U;h>h6DYDsv<>bKF-~=B;fuqEUD`dSgaqw#$KTE%&xls@TtkOa^On#uIwA< zT6s19>cD0p>6G`y(>~91t<`_A!mDs3g=6<<Cx=(;U{SyS1 zUzY8WKMLU*a91&vPEXz9OCf3^-@D{3q z#zf+_U>aYBLl&Qn<(_>Ovu57oo-5L?PyZzA!(ByqLFT+~Te(aQ!U}z<2`=t*VKA-~ zyuNk(qps)fzQDf9MEcKr7BydxdBvz-Q(siPuUXad~KI` zuk93djqZ&zf8zwr?EG#UFTZ80Y$xhT9f-?>jVALm%N5U{;Q{g0D52>rhY)5HkQuIXQ=Efs@$M|CjDFl!(e&atx)+Uen+*zC6FKdF?(dV@3AX-OoCZ4IQ1 zZ7}N3sVomtZBquN{%@plAiy_p*VkW-j*dQUduf$UpaO`O5j`C znQT$QZjS3rc~$Trlz&9FY|*2G#xH4R@6!r%rTtQ3)vL6hJokJigw5FFv`cZ)ZL()9 zeUDDbph7tNsj4JLGkF~|^tfePG!Ofe=!g3Tj`4=h|LAIYg-Lf8(nw1%})^=+Ybqgv88%0S1+aV|*AX(z5fJhP%$w`9boMC{0jpQU*;*i6TBAX=eUUlEFNy9F=`6m={3~g7V>`L4lmk zQ-Ot(hS4a^O3jk-v&xN}_pt&aRn7*j+wY>q_{{c$Z&U_~o3&48<}VrYNOG|4_?pqK z`VB1sXV#IKm+vq{Ody;Nc+q!h?>Jc@0kNMR@IuJjTf7o}y2B zSHVyMLH^5n9c;oyjtF75V1kjX(aLvx$vod-WpljxUV!Y&h>L!*%I zb>4K*Z*Zr{kV<2Bx)xLiaK8|=0D=>0fDs<-D^ULW=l8lv^d5se&-8xG#t7q-**8$e z`e?3oSC0w(dEds-ZsPb=06N2{c{U(!&*rhkcg_p6umhv#EvDb;GJA#~&U%OJimdtP z4l40oo|2M$7Z*hF>Q$-MRhoc|1xy5>X36q-)v)(bpjM9Tsq(o)OOPj z0t7OTU3~6BZkH-#kEN{-9PxB0#wIE52$1K)#XcU_sAT_an%hchhH+7n=g*aPVbLWj ziDdoF$6hnbouiwl=raa2x+%vdZ3@ODRjrrD*Dj0TroZ%d_RpLpnY;arL(m^n)B~Kx zCRx$5ix1cpXz)&4fYkD$Tzz@Y?a{b3E!otg`sQ3G`>$4AFLDDj%qjsw(hj)X1T88m z*se*wo36+DisWnQR+V6A*%m?t1R5HZ817E*mv9S*foE6llaoi(bgD4BIhO|}i@ChnqBI%(CfLWK3(JTORAu-O?3Q}<0+v2&Ba_0rWMrQuHt);*O zDbw+KUXw*yf{KcYWMpL3;2GWB3ri?WuhHFCcXgjk*QDdxKruC!`^11MyAiNl0zU)R z|4rm0SpS2f+HtAk%H{GtAik;ZGWR)`t|!w4;1+;&-%A)QBc+DP<}V z{E`7wUhNtxr>WLE#9|g62Gb{oL#u-6X;55yqq~~f>#Lm;S8DF#bv^>`s@1MkvAT;r z5atTOU;IZ4SxYyQcfSQ<))S}>**fLzbtU++??oV`kPm zvy^k=U*AQ%@Nb2Wznq?b4^n^a{EzCXazIyN32(fOhz`U}!fGEPhqcejkR-vxVdA9sDvx{`CUuSOCp$r?WCH!Yy#G5HLDrsOcrT*#mUv ztY-rWzl&I$qqG$-#Gw&7Sdg#cIKWeWQX21i#A@b8EjtHR8Ee+K-}fmH7MBO zUXPS+eFOb^F+zRXi8H6aD8Z_PIo}@f8c&N2B$)k8aF+7A%4kUV8Pld%rIEZ8kk4^o%vG!+1?m zE0W1Qp(cpV!s%4&A^aoRv8%dD^P$7idx^&Ck#W=W@#zBhHD8Bgrr*N%-oY*gtGnP@ zZ1mfUI>2liH_K7eX{5ldMk^#QaLhfY4MARGjX>|R=y5t#M1}+yGASE-(-L?R&V>i$A}b&^I_l$N-b+bERDiJL@`bV{J8X%R1+? zS%qccVb|`)4jr7zxlNUOM2lo?HD&z4gPBT`l%485m7v#B(H!xU78Z^(&ZIj-$`5#} zlQkUs*HU(aiR(r0y;Bj~)}H~Xc%spJ_fvKSZ?0T~_16!ERe+yG}j* zqY-;%h^hj{oi*aEx=&OEz;HB7LmdGR_73)TcSm;{3bsJymL3CKZKeg>o=7i=xYA8kl@qxg)>FLHyrbWB72K02KM~;on++6_Mm!Z~zfVhVQ zsyz?18C6@I%46|sY?GsP)*8PLKe$ukt*gluIaRDvZ*G12=ly)t?xv2GvX(v@Ka3N1 z2EPD-_$w=QM=KjS`84e^x{9@EEB>UF-QPcG`yQTHH&s#qXGFNUx}cCJpK&Wr&5Wvh z;y%Z&Y|GBgsY0tM&Q3qf0;=EK`va5;U*4rBYS-9q%Dxg?W->b2mVC=bk+m2eVcB54 zF*7Riju{l3sw2v_-EWY&Ga_{Mok3kva=V`ff%z%k-5NC&%sKWlY|duui3nm`I&MU2 zwz#gsL(|e$B&#^59#R>mIpSEG8xr_hv61D#q2~=G9zqg(@0%D%%#9J4n)w{!=zG`q zE)KUpn_zRHYCv)X2g6KDKNJP3mHRa|G&D}Bt7JMH4^2m&Bz@B2vf=d{9x9&NYpNN! zW&Px#r?t?8w0VC@{n}SVbAY>GT_5WFRLUPS+`;?s5rA!{bgO^7J$~bRp#bZ6Kg^W> zhTnpeaZ^t%^S+NU1 zm|bja-$zTBhsG+vt$t?_6XT3|?}pfRvo;RmIIRh_nE(M_<-PHSfJ3GHg|DtM-c1S|<6GDVpO}J+Nc# zL5>Rl^p+RPLozXY2pX*V(N#vL2#phSkB=Ym&tgTs`j;1AMzx;%kn3Q%hNa$Bb@g_C zJ>=L|Vx~$+fIgpAU?O*|eOwH6g=`O;gj5k!RG~jjWtPkg-ri+hBpGml9Xn-m#KUhi z&w$S}c_W2%AI@B?qSRQR&H`upRtE9+`_oO1`M-56lgT!=Yh>;tUF%=AZRzXG6yBMW z(rK%gi~s4c>g$49cUzI@=g+T}i-&CXlk`L$@0mB~ED6q*A7mmuUs8Dim)McyR&M$2 z9!7pWlVUc^%HFG^bMnb@Ana6wH^8S*j6*d2fVE2Ov1K*KSuC2~3*SV*JKU%95`24GjLzJqP|4djI84QgE%z9@rQ*S!c!X_x!9i zE6{OpR@US)m(;ESj86EE|5k!Z_nA`Xr|*TyadE$MmrEzf$7%e=zq3FK(^|BwkK}50 zs*qFfwzo=}1+?JQmzoCW^lGpGbC*Jrz#tiZKu-OScR-*y&*PbU=qP-$OHc!Rg8$3k z5A_QNwe-!y#k@P6iyO+yhET`e-UPstAbeSPaU+_ejHajQ0<8rGHv{WiaNzKUsKaXS z>Eh!2{09|5Ivo=HsGnq9Iu@5uC={TFqS0t);S^8?V6S)Cv#!((du2F&xW@nD+Yt|b z)8ARR3Pdk(ll8~zk2ghexV_DRQ6`0#iH>)NZ@vcc5*i285KGqHpud~Itw52m zk*KwwOepikr?hFwKZ^hCrR3$+M)ye3_12JvmVOU!6|x_`hbnwQxwv`rp-cc%%9jBd z!MR-~on|oHVIk^M2nl&=ghSizrlKc&b7ljqyLOnbsPdcdi~5=3$H5to_Zp8=15Fd- z5)!BZ7da*6ntg5Ws&G!jCGOHMllb^?WguPRHlc5h+4bw!K|F2;M!CZpZ{PkLkRZ#4 z)7-tQtg0G!G;G~!^u2U?ZT4i0eNkjBTAZBx6*p;Old%$}B`Ko;U_{+EDlghmSClvy zO;$L20&mAa&ufK^<+quqcJ<1ZPYM+#yV#&{_$jiUeY)WQ3knV8OOVHGt9m|vH_R9C zeZf%}6{e)5#Lo{rA?^}c*&)^$%JIwD!LL`6dh9(N^b$l)Sc2dAENKaNBv)*QO&h6P zMU7qwd7ZYrL+5u_Iy|WHgooqq{bt(;Km2V?FuzI~9Tpa5T&@uj8JuBcY#bbz@fdW6 zgb*bVobf3j0Ma=7gY)}xyE%GIRP8`gcg$8LYhnFBO7GmjFmC^t7$;_M^gFB*`F7=O z5X_b8lskEb+6_DH^z>{)8X&&#+q^50Pxmn%HBgn3d@onXhM$9-x9#~{<**7SlNBu7 zxT`Gch`ilqw}fH;e6`(|w5T5|YH_w`w1?f^j);!Bb`Ld+*=$MCC@Ak#m%O^-$Hg(X zpRG>F5-MK|w$?qL+I}&2yg@JS;R+PqrBnmucc-(Ey%iVym7Bjhev~ZZ`)z>!52V^0 z>*L1*zS%iui8+3Lew9^Kx-uo#q)j^Fw?!82M*4G+3QVKspOy#y(3k^bdooVtRC}wS zn2sxIhe1WiX1ZgflSN#y7H{9lI9-6byT0A%a$(`8M&h-i*)p7~=X1#?jOE6UlAxGn z|8W6Pp_lh=CbNn1{p{L-&NjKWxhXw+tDGTT_e9X;d(KD3rs>jZZAmz>o;&YTi$hqo z&FoX;!3yi1lh6WWuh9jdfth6RH^Df!JPnC|KdX|Sue4&6d&3CM%FOI0F>!rH>RGNl zwd2%jrCR@ePh0C4!_RCWgNiVEqx-IXMX=DKPlFr;vipz#j*T%d&zJb51?Pf#PmI z9WtoxNw|yn+1!W(4|-f86tqUv?j5MqM#a0DpR7m5jYi)ph+7USj}N0R*GCte^*j>y zz?yV;t#YC_W9}o1XCFgh2@UT5K4ygctBFMqaXn?_=!)BNfIJ)UeK3_nDl0wx#c%~X zlrJ#xXuuR9B68f*Hi)j0RB@|lV+YM1AZQ9%tB~*f`6xYIS^fa|lzD0$#}fh<@EU3q zm3{~>$s;e%h6N5jQ9!AQ%T(&MQpt5;xsoI$;b6eqEv&~WgI_bB9~W;7{^4BTSX|t1 z$_Ix7hrcJq`s9h@vj>=YBV}b}DJdxfgGCjmix%}IBE9VFst7ZhqCve2yOk!*>P}a^ zu}?m)4httU1+4otjAB-e&#pNPyDuGx%r4T&>0Z=mzE=sod2Gay`hk4(#I~;CnGJdt zlG>Z%$?SRC-E}{+Kn-ygdsNOeP2H1Gg1xf5Bm;>90#nMRsLsx9hio25W} zY%JjC4hRU?8XVa|`TP0ZrW59SV%`$#Lj4$7xp@rhT8HW+ww-7NOdigN}4$#pK?6^yuY1K&1ry^B{;7-8Q_VA{p+S^~r%bQe3zT7|2|TuDW*hV0d@~I!WANEH zU}d6Y<2n_fBAK=8r1LyOkMb!rV+$ppuWpPt-h%gTk?V9XtD< z-_mBR_mI^k8IJ+&Y{T(B1;yjwJHLYGzH8#70;>sJ4`AJ^YHTWD>yuR0JuNqsP5fUQC40O z(P9{!8`9AmG&ITe*zefI$zb~wdv|EB6<>o+oKz1RP{5|KC(6xqvaR}4kK11V9wUi|0+r~KN z!nKZu5@WfFPEAlHTBKvs-kNpC@~}Ei=*-bgms~EHZJv{-vX&<1z#um;kZZuw^2U#r zX555P+OuZc?p-o*YU4ZPYS>m$e#kL)t zpPyzGhAPaIc=fo@B?{bb!MfnoXNFSe%sI~xfWk^s`mu^%Hs!!HN>m9<@`SGrU9RV3ROe&&vh-KVU(vlbCSR@qqd z0&*t=YMbaZUJSxv0Of@1O)B>+OX{_F^XUy+N;`&G)aDj!3=SBVX?DE!N9gkulEG6f zpq#9Vinc}&C{;OrVq(cpQXwh2%<7vVac*zIzT~UmzBu<;Ew9UVE&651Z4-g<(a}ju z0fphC^*n_2Xmr->t6@GE!lY7@tE6Y0+kpp*KJSmdbD{32>pW)#hTRJiAjjg$z z)UkShKHS20ULTF>0-s?&5k*qi=^|J;L zdg!b3v2(-X3LR#~)zI)e``YmR(*O~$?5@VoULR@f!B~as{(~>#LL((IoROkjm8ECI zVam66xVAIa`adIwH0{0L;%JUl-{SWTOxV$vF2kz+XPZQ=IYuvmUL zpG{ZB8qiKrM4!R9dB0~*k{NslYQ8laHBb~`9U;kV(gt(C2-G4o#fH#2a<-jN%Sa&) zoJVu|6~gL_Y-Wi01QH9;ugUEgy13ARuiqOaRzAZ80^Sv;<-i^;9jopAmu*Y?a_`?w zg;rV&i*^Q!Q8uio;c4LMtHm3sGW9dMYHB8YG%6~<(ixmVasR&X15!`hvi$t~j0_Mr zcWEwxkXfG`UH(sW=bstOtnExcIzU zS7qwNEJ-z4`XxQ-u7RYRx5S@RzvvPk{d2x(VeUzAfL}0`16TN;7TD=-L}9Qt~w0 zjTWj%VP8iF@*B3_IdvQzd1I5Xh7X?8w}f-DoPn@9#q$&$#gM|v4z$Q9L+)^Mr!Pp0 z$<)p4CMMF<&sdi~nl7vqIav5+XSla&bAy23=#4ZOV|4C$H~*UVRu+PPmqE}qaMF^K zoxPx>1keG-GxNu{ZW9yJ3OnWrxf3u0OetlN*4OkJ&nP&*Rr?gR%yca<3_;pBP?HoC zTzVl9x?^b-hFBy1NL_%swHw`5$(y}wIVnR8Y(#MWxizb}+35qc8^6#KR1;pqg93am zdxR7ZgS{%T;j z?#JnJB^5`RrFyx|@2~!0@Bv~}`fqAGwh zA5mOsL4D^_!;kY95Ak=<@K@T;u>sNQ8^!4;7#e0`g@uLd6meg)vi?j2@}w~|6nNyV zKp8SfaK__1nC78Ug=uem(#p!k6%iV` zJzfH%cLv%PA!6f5=|$BmC#?ORPSP{^YCffSTv_#al2zW>s1B_@K@OHsDl5{+CCP(M z)RxXR6_pknRuhZIRoTMLXoN1oH`mSR-60#_Z}W66(}UGEdSV-ogLyu*5Wo;?&h=+0 zBI9%7C#wB zNf}fWQruKAhZzRQPC_YiDRSQzl1iUMFO%XkI{@SesQw}dfYM4mDJ7sjc6j5-r2fdy+i3I6OJ<)JwL4K&$e;PVd%i5%Gzs!dBoBGK9!7BMUhQXuFW{sB)!mb#*oF zd799`^aE4kz`pdO$*GKh{;@H%rMvFZbGbZEIpD{_0{`zTM}Jinp_Z2Kw+|L?MHrw5 z_%muEQv>&nu8H5JrZ(eAn6e@0?d=`m>szpWmh#5a5U8C67TvFbYkvgli{nieBeL@9d;$aeuPUOz8`aLNod8Rys4gc38 zKsf=R_wOvUw6uN3<@AjAqyPvPQ&j^HBFM$iJM{FsD0jws(rAOGt#1ojhu{M0c=5N> zouoBTo}KWV1B{+n`Ft^U(cNTW$}>AN*QM0r7kdz2zT1Rdyud^tcAq(p9~}7zW`t@Y zQ=&DW0)CaEZ)w=F!`1?TU1BBly|3w$>-h&SaMuv>D>T=?l=&K5}7udU0M>slkrU$ll|6u_n>BqXH3BnE-T zMLY?0d)+6Qp@Rnp(%CLu-&O~Sp&sagjPq{R9zCjYJsQ&0Cm1+V$FM^*_HgTA2d!3n zf5gID;UR6v7)hh$Tu#MD#M`l9A^~QKZX~@nYhn&-;wN2a(&BWpVk^j&VFqRDyLF#f zx_o?%C_$5;S~Ee!q@}f`r6ogLOx0vjibG6}nK+C~RJ0+tfALWcE!cCbPjQrpWh#4e z#nK0xQUvp?oG0} z7kLzlR4sASKW+JfWN=+yLv!GoP$6bOZ2^*QgPaTJg$hDi4k}gMAm=89@k2&BiiAQ! zho&u|AFLDIugT_tf0SAYJy78NWR`xOyCL|*KlPT|w_!$p?!Xts?QT-TFUz$>#^#kM ze6caFFRZ%*~(7w!lE(EOKiv z!A0VyPoI+bWt`jgtElq`+taA|d@B^C`ArEGprO+1UO&(Y#h~NmN4X0DvL;jJP+q(?C1b`&XqEea3}SB{P+=I!xt77-23<48};?| zB?3rqhtSv(k34le%%r05{p6R^$ObG;K&ZuThxgmPBa6-mGG4zCV5OL4*%K(cM&xNq z7g4@Ft&{s~GW7wH=(!XwZ^>DHgW}_5F7A)7rRJzgzn_Gwr-5-*sE3c5HzuDk*(5&B z_fxgwr;15iSBx5|=$}*+zHhe^v-0v#MQTfZypnN`b?vsHZiby{!q}lr$9v)1@~Ov4 z5M}H#e%zy|r}@0MsZ42XEymt%+z3iB&hYoAh1`gyL)_jN$q^&b{hdXG-x}A;_F2Jn-u4EM__%Z2*we2)y z4tA>Na`4*{OmXwvB##UZ4z?C&z9d6y2pI9jZUleFaepvP2I|KFC*8SBKHQBxY1X!tUUEQW5AMy!;#PQ5cq# zREoKBjqt=nyc#*PH(iTGe|vukPl2mLQ^0n~%BTT(cg*oDL{XAYp{KVuG%yh86y#8= zZR|j&tc$-;-cKjf%gXH62hs_B0qzvQ4&(`RpW9#02%ApJ_@J1#Tk%C_NW6ya7rCAK3y5VRFyNL15i;FC`kN?<4*bT;N)A6#P z?^qpkY1n&=oPVU%%-;#)7t}dcxw=2 zXPd4^0?-Ryucq_mFZ#|Q;J*kMDJ6gol9E)SAiqiI>rN}B)cXBT=j6-}N6+mZh*+}9JGfYfQ6lPww=e#VJo43Y2A-Fja~FU> zaAwt5{2VyvEsn%bDr!cR+e1~#jWlF1fs2C2;}Nc2hl?))R863%m7aEGE>LH5r%abm zu)B??(|mv7=%nEjkk3JDeB9p2yC>J~zG4|?5ajZ*LZ|JWIg&Ami`vXK^lJ&&%z6Ow z)PV^8%Gvr@xc6s$`(R4;d8c3xpw8C60lJXFaO)+xQj#_9lgYazhfet!^!hs`5$&$K z?H3Kn<-$D%+{eLlW_DvF*mKFd;IwUQXD<9~r;cB4yePr=d@Ednzwv90xxK1t>#X0Q zs?8*{LHrL;T%<;MbRXE${<{Tj>Vx#)-`?D#r#V?hgKhLg7aJZw$YWJv(1}Z`*B>q; z*V~=Y;slNQaWztZsXRp>fs1W*dJuD5m;AHwc(&f{(D>3=;k`x00uX&`F?l85G9Sr^ z>2BhxDk?tt`ROPjE|hafgm=!T*iAyjp9ywzzrIJ#J@tx=CnS)D_X%eEri;p;pi}NK zmeXXAScv^l@wY@pLB8V-KI)_<%d^$oM^xbcBtZ!38zzM23lwAeYN%ZTZtgfFb`u_$ z7bn8u<8JdjQI>R+B#~zb@Z1fFVFJbl0lNo&S{;`*b88d(R_p47llpQZo&&H2VXZ7EeM!0wd5NH$z)%8r_ioaC; z(jenUroaDBgu*_&&g(;A|bAAZ7OzzXqCnJk!xl7KH4HDX#W6MKuwM zHJl8N7OY*!Vl?jThJGBp%vc3gJ7Y$y->awuXc=;yDtStosi*)B?TDb8x51k-Lp#pn zBO?<#=pV>C#j6d|dnJ7r#+bTO%x%BEN4mNPV-urXp&{!EmB3Q}0x})uN^g9McH`LHR7pcy5d51gU|#fm8Rr)T+))H4 z3x5mFh+Lae%_21~n2|Kp*QBs3#cq~m>8I3|UQK0l-t&uewfMcF`~1J(XbVqhb~OTH zUGcZ&dD{1^e!dTBW(24P>yZ}`tJcsMVf^6hbR;qO-~X)SE2NPzx18-YL$@cmH}PA| zA8_-?0P@CVU^WPq*NyJx^3V6InpP86;FsTh43>Lu(Iw698}~ z|1kr}QB+r7Zf?~Xir1awygZc@oeRG4J@~%^W{w$u0s9^;sBtB+REVOPd|z?SDoGwxtTYv_6YT=*8$zz{8PDE7S(KotFw zCh}GBUO)*{-4Oc6L2id6L=kJYL@Onp>6)}dtpW7gtM87#RFE3E1#2ol$KeTBAN{Y`6Cfx0B1XnX1|X z{5oy8t7Mxy6pe|c-q&HU*4<~!3^_oLh0iW)!`O;jRfz2)MuPRC;qj(sJQ+H7`F!GuWNh@g8dd$UHnvUuu8=;@NqS@_6U zopvPkR<%*?m-Axcy0AqPJTo{gtN%{&=w`9I_Xs19&hIsy71KXKee3%A_3PN!Sn3%M z?j(|0A{8gGaM2SGP$fBWR$p!HfwjtHtF+$x01H$Kt%9}XgM8N4O_3jxG?;!Tx3WeP zn(NGs#Kko=Ot69$iTP-0uk!rd6BXDHk8)u$Yjm&*mTBVxS>Jpgr!FWoPRe_#X#GW) zt9sqHl2y#&^@44e~`PD57>NiU!8`i3ddQ? zIf&bJ4Ts3~%`W!d^c%5iS00;UeydTaS7Q;sHj6I5G_p;>3NmgcZYC=hs60`3!$WIb|qSJvne!$X>yzV zxSc$0E|>B*#X~{zy+!v4Q*_{$sG(N{av|o-*|~?xMO|hX&M}Gt-0MuQ0;4o0uC0VA zGw7+D`TK}Y6a|AVb3e7sUGmZ}D9BAkw=}osDQjfDL(c8tW_m@tbap%JNmWkjV(Sv8 z+HPS!2!!Ja=sGruMW| zR1gC`N3z}eBvU@A4Y2;@lK{}m%oMLz0jwaj|AHkAoSB`yT_?yx5k=V}MXAJ@zE@fv zpI)~(9oc4=K&v5|do)Am0RTB+JX1{ghH_U_$YT|O!`AYM8#X$4bon&8$vv`>LzbT1 zy!pDbOvvZ?xCrJVC2o8pxj`c7TI$pf)=W9)6}r?i^lTn|czt%~AmUAcvO=VeA`eQM zWo_G)y(rTMf@sfOn=PMDxE67~Qe?KjDD>3xAQjzX+WbISaRwhKPOj8pMzx9BI!-;B za&lPAXyf>(NJs-R@#@$mrE&3uJ06+8<1=|T8*TYqa+}Pk?Ou@^_H<2PmsinqK)1na z3o7Vb3lsKG9m^YPU=B&`@ex?aAMDZ>nohh2ZY^RYY%38;9>)(c#(At>kt6^a*1!C_cr=<@<#Ki7z@YLg@ z(Lj&Ruvql}1RoC13SwpV|mk#Ox}n^=YasNALxy^Qt~6?Yc6WxIP!VqOPey`#KrjopZSBT#@|| z{H6J*bYovNnic3*T-KS8nxlUx*q0z=@lC4wxvuNYeUuIe4S`G zDj4!a`CMuyho=W`T|8OX_!2c028WyXv`ol(Ip!2rz~S9^n&p!}1Bz+Gx!%IueA~Ic zFV(huY!^TUonejjTeh_(7(>|VI`I-P#Q6-}#d`GqvBD)^BR*aOot}SaTwV(V(xxVH zQBm!P9NiMZCr>PaoON~mO0}cYHSAuqB6P6sI&Cmgq$;OKUBvgEB`^}fc?)2#lx&~C zKJ0L=s_Ixf^t&29>VExLW-<87JNv5btV>Yf^;7@d_?+-p@XB^C`iE8kefs6c*0O|` z9Nd4rN$QuYbDm`&;Er}kCWGfo0K))__n#COU7u%wuIE)Gq1l%d1fDPWCp@IHdsP(2 z^|}bZ8^OcSf2h_-`{f8R2_XH;5t8`y|0`xHz}5d3W`be4DgH8Z>KxP}V&dbaU%f(a zjpI3jg5qLvHBDjHpC3jIGTZQH2(-ueTz`c8@YJ=F&gBSzLBo9HN)#VkyswDE5IfGt zA}+Jt;e3faNPsOM9*q13;#lnr9K6!}e9h zNO8bL*gJKSP;KaYJkcQ(LRyEg81YzK@#~4eoB$=1czekW{EIMD3r0UY4SWswK?3Dd zMJGQz1u|VK5<(RfLj!~E?(VuB!})bsXZ+sTq9yXIT*`07xl;$+wo$q(HvH(xlLNi4 zqk8u9-dlG{N&>cKaL1miyJEa_a$In{+r>LcSRONN0%BxTV2K76)8Pt0L9s|&wS5L? z1x?&+Y~3H8{zki1zmbc}3m{Ut?=_m%Y;m#)*74=J(=<_GI@v14^&}bBnv+>+#ep|d z*`9L40`WqH!?@S-y?w7nNFT8fqNvYp(@$Y9{<^!K&qX^@U^)JT%iXgvVpN3P_KH>q zyH;G*mlJCI(g+q+gCdLo`EAm2%rGnCF@W+w8IOYl0e5O+SjJ<3KLl=lo}rCh9Jbc0yabTU@gx zxRdR02^S7U23LtKjihE<%+O-@lsBM&u2!1ayUxH{mD@Y%lg#P%lb)PPAh(!B*6NML ztGHODv@27mG>1>_CE35r!~?65)F$@_|KH|7mf;VK4)=PE2*R^AXD$=DnTHfJ6*_q;LL= z^yHnFW}kAz1LPnJ@*nyz2uyOyP#N~ z99IEHPt_nlVtSFN!*A}MCE;5-cPpL#FSN+Rk;=dZOt9_YCmHg=M>U&Q-bKU&XEXq> zC~*HeLiFgDBc*=ll>){Mz8AreA_FrxKe;xSS1WUoiKwey<$xkhW9l^b(u((D3pp@3 zyN(-s_Q@d7?DO){bPYlp8*Tn;mAOV8kbb=96^RI?;`0qRbM)>-z7jSg zHar_Sl&ttVt!$#AtJdL57t}swDIp|$W%lmley-%QGhapj1j)A8=AVDk=g?_<-by&^ zPbW#LAujhSeG0}0Vf!HKXwIhIZi9up!L4#N8;qbl}VuQ@rW z?znWMLK}C*lp~mmT#6OAgPs!Moh&tztpLxoo_q(z1o)gkCnV%Q4#vAJv$NZcHuXW7 z`_j~zD=QJQdr`i9nUvU*?R2TrSmn6pr^U^D4|+9@RI2McLfzQb0Ox08R`M9R^1BSl zHX{Zr6m)9B>dw0&)}?59kb$f$Z4oC2JT~SeE7VK|w8+Zo$lT8`C-BD_E2rqL{Yrl# z8)%X2MA`fT1m_o0n43v7Krz&|m4cMlKsnQbfRgPyu6%1(Chae4V(EW$kq5~qs9I3nd{>ge@B0}6yx!0Hqd^=Odx4%bJJ2F zDaXv+am1v0Q|6sbrdc9ltz5H6%;`Rud*-BD%O)7p!s~1%KJjlw2(Y?DK48q6+_@Wc zTM4qhE2!_;~y)=b{-RPb3Iep&YfIPvkiSUZG)l{O)?V=Zf!urhSLgZT5$om8 zFR*#b(u;~#Gqjp*9RP&tFYR}*BU9?44zdccWpHbJCeA3d38|TqOEHf(88s_8ELIbx zZQ0ezT&#)|HcBdUxo>8ve|&`5;s;UxNqX*&ClkWaED(4f2_YJ zk0gp;5u=(CSHsPLd(}_L8f#l}mZ_DEzAYb(536pgCk^dm%#@0PusRyy*|C*AZDu8w zvfM#6;g-m^Cv6vI5p9Z)m5mG&FYbqGk_uk#YjOY0ITIvCx3zHy4sOm17%|&YOV&P7 z#}Ja2=5S6Pi51y91YL9TR82u|dNh{25h*iLi3ATZ8T6@_esU`YzT$JP5pL#6+F#n4 zoY#9jB~2BTkJU&+gO}eU+@l=pXMl_ATrUQwSxJ@u63x_V%OQxPPVv4$$^_s>t|1Ng zI_k>1`CqmeoBuaH%lnQC>rV;w{1Lqh>u>&xXx>UZmSPMdz~iCcSxOp-M-6DrOHZ2% z_D^4u?SIuQig$o9c@fC73TZjNyR>u8LBEjrg`ZXt!}Io% zB>G4HiJ*^o%)I7Jf9XRRyxZ|nYV`6rWXdi-wBY5%gwsBPO5JpiB>>!l5E zzPptvg76ZFRTw6+-Ml zx8o{;i2pwj89}R2KnFbd!&B30{0;wu6$nh>k$Men};@L!NC%4kJKQltf<0#XW#kvWjS|ATGJa zK3W#fUEavZNS0_Bpmc!jwK*Pt0TvrP{(=Mf;e@(PV1?tLg5zdX!+Y|44+o86n_YGX zqB%(=Hd~J#|A^?g#>k{X*|a9%59Bvv8X7Zg7k_YE&Y5+>rYSXmz9h0R!2zJWr`UU*=jd5}sr zFb5@c@Moxx=;%0L4)jK!XsAYsXPoq10PO_l8e9j`M<-z>R(i?XGq98kyLa|!8M9h? zbTzydPG~BiMyKIt_KHXoMNiA8$Szc8Ux;3k*!r3H%y%GtPZBISj02hj%>nfx=WpvoRp5G3oR}ON`_*NkVN?svP(l_M z_|a%=+vrdcHvMzT|B-Ya$Vb^PfKa^l0)eEAc*b>$$Xs?rI(}uqp`V152H`49JTcO7|1+E>#ZmE3HV}N}ja=F+?dO~*34Y(4 z(2BsoM8^p10;(_iSINyx4PYq;GEbn+!?=8d@pEv-db!p;sX=L0DapS~uO(l%7RX5F z_WMn6N$#&ZuTuqwgn(?y%UJ(cHNXA z1LP>}${ugD8sgu_jO@Chm5f*e66L5WePCYZZJ$k*X9x1}@tGz%uS5_kMbp#lxS6bZ zB|Mv&jmu+;h;x-{)52=_^0zx{{fGVJIoA{Gw5B?7M_trCeuJu`yWEM{`PlA9`Di&R zL)J;$$LPSBx*D+gQir`#Q2~r58IOS?DbfH5hGa`~b9i3f zYkBzwcrgQ&#rb?e#R!^PA(J0d04|(Qv9se(VtANW6gTVlcgjk|`<1Fw{rAq|( zOWm$H;Jo&+UiAXYOMOuy7fl7-8hEc$)X@P4`9xjdRA8S!reDSYuNr{ZeIH_#*S?A? z21tkvtwPnU*ttiOgLCuL7WO8|)m_g#1~HV@J6vpUq3fe7kOHaOzg;agT;-yli~#H6 zpK$Q{Up|cIVkNXr1DUl*hHQK!B5)`ApS-6(v1fY>BF>?gYEg|xs64*71;1-C6=U>x z)`5H<*~OgtT#=}q7KOTaR1ga?=|AElmBRUmhOLKqc_c0l||=?5GVQc8gB69}H7 z^oq)!I*N@Axa}jo;`20?BZ0Sdp5Gi81b>d=XQD-c2H#5(XgT`r!|##*vQ~Wi@c8j# z!0QKxceJ&|#Kxi~B1)T!&|qgTFZSPFAE$2Iy?_6-#5VpYTdk@m`&5qffvpb83_srU zM}WN&RloV~XlbTtY@Cy0MjG&a!*P=~Gb6*?+}y)tixHz$x5e(hy&c0fJ8=Q-YCa9B z^)zg8nkS>hIctV4Mk5)Zo7GDg*ul4a-dN!HVu@=8$TuA5dEl&4wUN{7KcQs)2bXCnihHj4TdE5$9>HeWZwC%Oo^AO-tti&$VXZxg)SKnE!1A&i z&tbt(cJmv_6Ll*{yAWN?@`G%CTp`w}=UXD}Te)MOSm3o!bm2gc?Lv?EnqzrIgl64S zi`Cu@(#~F|PSG`fXRDM=b(ePxw2nK8iA@JtdJLaD?6aHXFeBMS(`$S0PO=O!{&Jy@ z+{25KHh>Ii zzQzPf{jT`tOX^--o6j_$+UdkMTVp=e4^z?`rr}+MHGbN|S4Av9TnyN z_6yr?MLVYKBOV+sTy%J84M-p~Bp~+bGW(lEg zgpF2|KXfa*kc_$ao&$RJ36AKA!Zn$%^?H7}VS)EJ_rcd_U{&#;e7BymdQyd2oYh9%YC2k~ z*GwYGZk5r1RfGKWnZbpeI-{r?os<-Ot z(!tw0r$!G04z8E9E=lkTPg#f4o?98 zwC_iTWQBQ<>XGHse0^o2p6QN)XF0Dsq&k}eUhI7?y*hb)NY|`7KR)3PkN4ZRubc!Q z2}w`sM~L5Fl;$B;l;vscxnh2LrtPA`TXj1~_P4@p(wKH#ln%P&gShARVF3nV;|u_p zYZVJU$&c`|?$!u-HTTF=yGgT{(6&MNu!A{;3*#c^SuYomUzcNtIKW0WI3dO4Or)|# zEOG?!*acx>FOK_VqYVm?bDNd=9NDbZg(K_Vs*jnJE339050AopB=oJ>+Q68+kuU1G z9H3{PBuRZipXt=Q(tokf2#{nzfAjhC18(mApe!J`e(=BuECJsueS-wVPW+4&dA^wq zneAgz@=}(d`LqZdc&ou=YoW@MEYaqenzMg!`q|8xil9|$k~uikxKj_Yc9?5^ zPQtuvFMx|&gR3g@E~yxdVTn6%H~pkC@c&{67XL)cxEve+Rv+L#QWpxIc6jobBCT!E z1K06?iAyhB{(EM}4d>Unsd^4EJozm}4tMkaY==%CHZUGrV#R;8L)?ck4=;h2;=t$h zz56b>gzP9^CxkvBx}Y!q?|Pef+Clw#VMKx9kM|DYs;iU|vF;`?X|k77Uu2Mt2J;Wx zorF*$8q13hd;_lcZHl^A<3KXtK-5}lTNZTUenxo88GVsMFD}e0t>BK*alEJ;le}Df z;upOPG=c_uE$eysl*pEnOlyYZ%BBOk$B(Rj4z*X z!$gcGyaAhr+l6Z;@pr1&h-6q8)6ocFy@bfm=sl3icSo7U>ACyk^-D#zt5Cn2O2Bh$c$^*Ew5L@)POrbP0S~J#?l%;nVG#cfJ%z|? z(e9tS6*$Jhc^f=xAG*;LjZ9r!G@nBQi;Q0mIa2r2EoL$ZlJQ8vBC0r`W@M#r?Ym4o z{X=+|`l$Flx5P)H68Gms;jzN?WuiqdDqgt3awqDgOkb%rh@cPw#KZJA1nNYQ{kvT7 zp}wX`cQ?Tl0}HpCwZSVv-@Uk)6s3>iyqBK4C3ekyrR`bi$G9>+N=|Q&0?(zAqA?TY z^R>8`M_AYk&|ocTK>#T3iHj$s78!!A*J!>qoy2x!W*ioLO$t^Oyh;h#;b3ZoJyYiU zBE=(rEDQHweI(Urb^Nk%$vh-xv;Rv59~lK7U-DsUyXB@xh}z6tkbiz-Cd*oZWUCc> z)`J{lT|e$X>Xm0-_nvCcnsc$P97GH+qi&R@y<>yyGSg#7_G#fcj-T!x;kx?Dj_;Wj zC!Mdjgp8Yl&Q6a5vuASNT)uvE>Q{&q{qb{59oc6?r_^$jqW+3Wj3ODmE9?rLU3jL6OJMXl+^OjWD>cbdu^MXTwD^w|at z$rC1$<)wNBw#s1xv00LXu`6GKahmB$-S4I@b{bswT&wh3Xq{1h$98js-5_^BPEHQA zl*7Zr?#C+}*)`sKY^U8z5>s1k_k5PGqGpspLM3LTAkEa;x|~|4e*WVP6$yUsoPG25 zS6ugEAaAq$>2!ctF2cRLeY70r<=8J88XZNWUCog5KFK2GDuTf012=wKf@XHs+y1BF zPyMrV@b6I`Wy57Kh*USX_lG+uW{-EdHBVcrhD1~zHY4i-a z@?lS3dlfhfl}o8Xx>9noSku+{Gy@A$y+WM2%5E7>EbGV=mWPBFezy6{LNes4gmi`Q zDOpjd9DuXwn+7Qp81tngsDS>t01cKH(D9gc#Db_>FkMt*>L#qq%uP*!Z)*PJk5DdQ z%bzJa)gJ!N=3PzdL`1@Bnz}2_y;WbcS}CeZa=%1EmQOX$98kV&##ss)+tXzI4-r%m73I+IW31$8w)1 zcJX_Zh_6LP6hotWW>CYA!%yIbQ6=HQ(YkyuH2|R8!W8Y(&NGpeMskT@kyo{!F|IpR9_b!o5g5(!}~2!H~(PA zWAM&@)40uaI<3~;-Y$^D&SO{%@CBq>#RNoE)wm`B9EeTNx83-<&fXStnGsbMZLAJ{ z(?U+hupmCaGtBVwn&LBEKPQj5o1Ug?F^QcHRWjV5LT&Wf%n7n5 z-ETFVE=8;POKhgNko2Vbx4uA=+=!937p9Gfjk>1CrvMu;VrI>kT+)GMgdU1_9Z+`} z4cxsPAg!s^5hU!r9OSyamC-WOM5SYzcOd;qRh|WckU?QAt^>6tzb`Q4F_+x32b`1k#%U-yi7PCTwY9qbw(eN4WZ z3I;J2l1Ir8;Q;v^h#mOIk$#RafXh#2(7k=@L|T{ZSZ{QGMc6B=6FH#=4`#rVg%Fr@ zSGgo(RRWbs6w7L`5&m5P!Z}tJH4_zV{=>#mgS&MP&Xo7_IFDy{v=*>6rIM0OWR@j- zGFZ`V{`^jrbA|eQ{u>;q= z5;LJctY9>^KKN@qefh}zJq7oBQMfBaD~jOjp@3orzqLBu&WJa|iWKWBT^Ro!f%c#; z@8Apl{sm19-+A?-ioSBa>h_vDc6^9cL_wf`XU?a?MO6sWEen@A z`osE03@j^AEj{Xf2v|2A67tOdtCqZEPl_neqakQIwo<+q6`#F0v>ZbUxc7VX{xqz~ zHDBGzX3CPEtAmG1V4RU*n~`j%3tXKf?8eLD6>YRq_Hm!G|PNW*a4#WULrw43nbxJ|5H%9}O&< zh78d8w6cp5E2(r0dGfNKJj!}SS^q8Eb#g59M*UL=awkDIc;!q|&GP5+3>$TU|IpLK zMl#{?FGJ0x0+Ct4G(&Ii0L1)le<sm97)CNlh)ngnxJr}>NKmO#6B$F zc5+ujvr<_?v(@bTy~s1l#Fsfao{Jxi$L)w{?mu|SyRJV}`fiz0lFT>wgpybCV7TvT zYZq}il&ss=Jt`(N>~4i-j+b5P#+cR2Gbwjl|F2mjuIV=_1Ln95?~rI|!U4K4O&qgcsSn^(BldtkK%v}}-ne05p&$Qr$ACyD9EqK}!ft`2^ zcrz;|=>03*s;8v4o3v-k02YVb<)FpCGkXuC?6pa_v?wtT(5~<3RD;u@t*7Kjl;3_z zfgQH^G-id4E)G3)RSu2a*oi_XTe0*o5EX^N#4Nh9))dI}GV{CLjss%ywK9V`9yh4* zmN8~bF{`C6Fy5+K$~*lTNkW2W)g;?{ah z8JHI8f{E=(qeieIlrD{FO{ifDUAoe0BUL)|M#6elOoC2I|BR9xPqH3e6#Vp(xKnOMygUgcGam?*Hr|$vkyio znRfTo{OJSUAsdvIk(>{@ge(;5{Z1q(E`0m><1=B)yh=L31&$b+(-q!B#7~!{j!0UG zb>}J`9v(^DOFgA6M4~r6i%Uw5t)uxuy)1oYJnE#XrMcyJIM|y$a0OQ-ave1VS@!Tn za&kv@a;Y@j4K@tnNPT6X#|+D1*h=VJ_J(f~ zM&8=rQ2Z=NAu>1M6u4GV$;HCmdyOcgkWxgGUH8K>J(DP{fGfuA*vzwAPDPP2f#c~< zjo<3;i3tUZzfn~bo9V+(^PVI&HBL3V6C5*hiyR^l*Mmd!3(~=Inw7D}pYA@OLw)}I zxv_Ej=5@ogUf>$!a-MH{7yAbJjHt19?zR%0U|37R#+|i0EmvZewHA}ky6_qjDAlt9 zHKggy7}_L7O``4-Pd|#D->D?XYMOQ=LtcwXkTn%ygCIW^SzC9_QMwHlHQC$AOBY!c zXApe#D$j3{kW`yRo2-t&B8E;%WySc!hLZ=4SxI;?e0#4=NN-!mZ*+xj!c?Jr7nACF zQ_5Y3{7LhnIXel7-B~qMUN0;jD&0I^bgQKY?BO;D7gI#Zu1zG0R+p`H#?f_2yj}b* z6F>YaKLk$!%FoYX&9Hk%-S(F z2;hr$(AXK#-mZl%5?hfS%+jUjxRHhXL8z={2KDKK6%#MRt?0$-y6=fKx(i0MT=BHp zf83``OG+U+BgB!PJE}%HNFlb!XLU2myKBXbD@nLmH_~g3e)jyBBe<;oz^5{$^vu_M z$-)TZA?=K+l$lVdUJ#%^%}c;a^L$wSL(fv42NKwxph$tj5AXF}935U?9Fu(=<|Qp- zdT1~xIhBx_V_jckSxxCe4|zJ+mc?(!u=6hFAeLI}H+V{x_=BVbzIw6}vN%dvXQS49i$?Tf)!YXInbs+exfSOIljVcoMQ+AWhmY!56i=mO zJ~P3w=kMBcqp}!5G#L0oY7(TM|Km^l>5pO)m`xt3uf5@BF&>C>G(q zb>Z51T|Zxki2Ls^X)92l0)=W$-VjdNaN)NMUYw-pLZ#)Doe=OLygUbAzBRv>qrpEH zbOgQnJKo0o0nk*eh>zD}bVU~c%2By~erGXa3msIhWoggc-Q7I|MEy<<5}9bVsKC!& zzp*M<29k>1VXa@1ir++HZiCqSudB1!zt~u(<-d{2{*{%2S66h^Q@4JPPf(E3FdGiv zRp|ayiF-(pqZa$G(>h%`+8Y9y8r#|eB%bbI%3rBxn>7oDfEXg8m)^mk43Ta`FXi#6 zg!(Z1AocR1MQ;}3gQ^+s{_ZPE^CLB{20V@LgvYAN*PiZ8;O-LSaH^`Rnwy)uxs^@% z!C207`TAOSEH9?QU=)FkpRHg)cge^?+hSSN_>@>VI7axo{~GRGg{9|a+z3wxC&#dS z`%8q=``i8rqQ_~A;+1}p*^%mk8!LlqQ!eGyc#?JgTW8D?1o|B>@O?<9v0X(M(#~P7 z+V60We@s!`2~*a53~msQ>M2G>#+FbP78aUF=F!PdjM*MMisO}xdM&}S-3lrN+Y$#q z1D@wYbHBO!yz0fcgEb-XgWJR<8(ugOyW?a(CsGx^%TzVPB#6vg$ zQK907&e=xAV!f&G$TZJaoi#5tqgnzw$^5qlmA2XZwRbq>Oac#+e8bDmFekG`W2lnj zi252UWcHaK$U}XUEQXCRfIZ7cCArkw`YgiVcDtlI6|WBm#tX9XT{54dB;?G|t=yfV zLZ!aPL-qFHd#+NfQe2xy`NvqGTJiDS+%9j?`dBL$1O;X9B)+c~eTk%dev<}j+!KCT z@{-%JAGXU-+~&6+{_2hXR=>E3a*LT5=9~79_XbiwP^OI@jfRtH;f&D^U>sT0Dz~4) zb^Tk%xD)6}GL4c8KT{>eMV{g#WhbX1A~rYB1B?Bd2FQ3(TwcQ5(MGRN?w^yCVLqhRc5QVfPS9f=Q_E{IxJ{cHU*|KI^pPw% zgJL6D7CRP$7C7^p62r%qXRN0(BlC|>TgHl);Jvz%41L;Gr(=kGQG@YK?Vd9W@DT_a zTJu`u92uhoag0T~{SEMXpE6a+OUw%Ugx;R5_3M?R3@mgN)81gEm)@UJq~!t8UwwPg zwa6%jkE+tus(&`^9MScKOpQ`Gi)#<&Ryib8_4lT89grJ7()&&Lz@Ys>05 zmHjo&dKDBT0o^2^V3KreHGJ^NWxA1utC2k%kd3Y+3Eh1$8?B4Ius9#Jat+CWz|4uj zuwHOa(2e`Kg`brq8Q29SS|8DkB%dVZ)r*eYA7chulH7^%c_X5z@+Uuh@GlY-VK*$^ z$BSo=w8emln)#POkPuoak3y4~I*Ax06#EoV52~00tGH z%`QNbVJ&al^h}u~8Tj}`DpH_t)(&a9Y`jyE^!-T7FS3=4T)?+s_1RZ{B6*3Vx&p(3 zSwkliR9VbG>>TWmN15r2l`6{B1+GX+Qbg1nBD+l-9+zV!!7nrK`BNPj*duPWvD{PN zT}|yf8H@W6=kU|=&u2A~v7`tf_rYuxJvP$ZVUYXtQ56n{+gD6jDg*B{VC?Z}#ZRI? z)Ir}ar1UaJhVJmON0jzrOPxJxE83kWYEGV#9{&_J5wJ5vjMCZJ6->}3Pol}*lr2tT zwP((*!s+N%Bw%7WJ078=1R%)k8a~bAPkPrKsQqGghZ(nXDReKb#L)OWLO$ZD+-MI} zeq1t7Rv03cw=zdNb+B<;d;oWati$hT$$@;y&sT80wZ0*Pudx6&%5lWA($f$5@kM7M z(Yv0H!GA$Rs36RNL5<`c8@G%Hh9zOgBk~NlI^Jcc!4if*8fpI-w4m#$)-m&aQBgg~ zuV23+M$JL|B%O0BiY%f%tqF7P(j92*$qdT0AS{_!BOxKd$Hxb&Nu~LA4}EXkxIr%y zp(so4x%vFsBLnT-z(~`dSnW8Ig-bXb`-$N#;O_gmWp=Nv0R;M^=GCM-hG zb#c{=*TkSWOt)r+h2vaAxC3y&^Y%kl40r1vuum`lYdItIiG=*?`qMRsKtkGIQ4xs9 z9w55-rFBa0Py87LQ%lV~y*n^K?QsHq5RWV6-=tmwc;7K&d>kR!`s!Ey1sB)f=3l(< z0Fk09zamB7Mg6_$HsebCPr$+eb^*kK*;m=!kvfkQ1*}XW9jLa`E1*9IrrXZ{hUwO3 zKoFR26TMXc$h7IEt2_RB=hk15qRLQ+$3LO)U&R&T<#h-WvekH!|502YsrZuLx+W8L zufN{kAA*#UWZ`%G@uVJw+4b&C6%!TmdHBmfBAz9)ujp;_6`(Vn3#+L5deyI)=U zbvFB>-4@@e#>)u{1|!|)jnM1V${ni$Lu87)vU2&DZOczh2ohi*uK!+UTm7>hu<+OB z77Z|UwI9#-a)J$=622%JFYXt$`;kd!bF4){h6VMQC_^Phv0|$kmp$6>Ew9dcp^utE z%^GJqO5IxgJ!x$IroJ%DvZ26fRhI*iVN$N+w8W%S?QY1>;7D!v*L`uVxQfFRelLNj zRkC!${jidaF}s))0r7Ly2}iX8Mo~aeaAM7!UOGhfZwfc+s!V6iQbSVR=$W_I{iA5q z#9qpAyq$i%_gpb${ZZ${!o?!(CbCMaMfyk%f3A|B!@bhIe#=PKMSW}<_T|VupiT46;uD=n~WkS4{f9cJo0O@n%^;0r{d(+#ySx~0vC zjRmneRozZaM7)?_)%?X9R1+E^y9*xTm7k%l{XVZc-z4KR+HBd<<IGW?3!9S~9c)+5luf!egV@rRIT}NkFf{++bX$ z_v~=J)q~2JJd${GT@S0;J%l=QH-_=8Lv1&+*F64c@|{cAXbY)o%+o4Qj-bSIv7jW};HEm3$@$lNwFW-wZ7##qqU0P`X4@lp)CQWy>ym{^-o2@B*}7 zJqpa74sE5IE_m3v;H60UaYCu_8aq-8&1l(AM?ZY{&xQ(~Pc9qT`TOpxzuMa!`KAT$ zrKRRa_pqk0iuR3re{Octjd6)z_ce2tpTCMhYw9`6TO?VK*gvdVXqpx*XG_|eoKNAD zcoh=uqm}S;bvJ%tuzW;mt((NLVyl!T+o|I&bnNRLz$XZJDaZFy!1WL$AvWE$C z)xjPJEI-C3xAP+N5+_{`v|U{=(`Ghk+DR^6^uUMn=Zm+}FBZcQkKS1sBn@r0E(5^= zaCIxIoQX97SPXatAXl<6kg)#6>l%|?-hOq@Q>dOnP4B@`1j@FK{^ZC`4?|2;by!cX zho4m91+_fZNzx1rY6~yXJnLnJDA}yDm#>vfGNY69s6E80*?XC0lWR5#Af4+eQ44>J z0U=~`b+Q4BAG#fq5o*39!7NZD;#ae{@DW1Vdpz@X44jNHc61TnNfeTcwIn5VBgHFL zu|wK6q>2-a#@$fMZJ2vwgh?EZA%9I1tSq1dU#1(wiQz>s34VGe8*>DO9NgR9Vggtn?}Q-@+mRPL(RH6mxO9uoFuxigVLGz4qdNdik#PX8! zdCNxoTMV$g03|Z8pt5jqkVdhnWm&>B_4V_tV8hnkZEYYsYS}sLsR^d*>*Q&B;>Ny3 zhde7}yDaIFoy0xm9g=wQ z?qQ7B$K+(&lCiL{d_(~m3CRj#-Mv~tUOtRWO;r`>KL4nCs-TRqO&;RjzvTtC08=t6 zD-uVw^kQqhWsUZjlgKbEWT6LUqfl(+E90+0_fFgYTa+Z5xl8SCtEj}msgT~m`&GfX z(*m~{n`36tT*EYeo8jsDGe75>X~O4+_8S=a!K&Id_i7?~sZ2ndO2(?tzelIXrqfhoHlI>|1=bP!LeRo=!q8#Q}3DzBZlL>zjm&6*~3{v4h~lK$Yv zP0FfBHvosM=2uMfyoD3-fLxuPGzruK*GRHx>mk;4S-nBM^B8l``Zys5=H5~b?cCC0 zCl!WZC2A2*~J=Qi^RnU#j>_(GCJ?UODJd|pR63om0Ii+@y=!W{w>ALLg z8%3x`dOeWAKxDkeL~-zsR#O#B3SlLQ;pcR-{8*vjG zhy!VrLvFQ()-6|MT*5rEeRbnpmm~va&7aP=ndPSGo*l$Z?Nwu3sj=~EX~5l}UEo2+ z@bPSBWq0CiU)1g|@1Y| z`2nS>GYGwzX$;)&y*J5hgPZG!nJ1|Z%XgPsbhbTG?j|Ya70_}{{!z-+$utb_B+V0j z!8u!L*1pL9UPo*D5M0_C5>Dh!QgPRSzCi%J|NBUpRpni45ZOBCToql|xt?1ViWdW= zF~VI#pGelztf!@lb=g;FsjCD31OpQjMgP#!Hn4PsQ3>-rFuTuqewF`jdOdHYCu*f< zq)^Py-?E4btP@E&KKBh>K-wqpjb0-t?*D+phF;qQNT1`O=i45^H_jjEuZ#80#nXpR zRsI!nZHakMt$EJ({9nlRe+T6HBq0n$A(0D%rIkR904@jTh^xH3JYcLrqK^Gv^Iz0< zBgc1B0J}zF4;9jauz7|@jEp)>JfinXchNcC?;<(opy@ewdv3dDEnBbI{mQL?a5Sr`DsR@60vF#@>x= zCAPKiiyjmuXR@1&W}gFVj*D$~RAwnXxq-$VYEJAf6y(e>CNa zDlkv+`8ggBD)##S%%O$HoPQp7*0{zdCv7Vx!tdq8;^eGE@K{_tfC(iWB&eqME-9?@ zDV8sdmF#R16WS&Uc03yJbUd6@@k3apE)a&EF&3$7gv zp;UaA^xu~(q54_XEI)O_>?;5cICxzTJ~L-mk1}-IDtS+sz0{-UQEve<68;rP{ryyh z^TcyHqSYDdL(1VR!7TV<_=n9GJYR?DLA=)N&?}D=(sb&!dF5zhp=$Px*?X;i5*BTO z&`RI{n+EcV?n2us#O{cm!@@r9%z7;(24W4ftfDTE>p-(BD@T8e1HvRkD#+smP;Dr= zTi0oOn5!&U?W^J>RbE3X#maiW}zreH8<1E>yMf9Bz&Bw5aCH_ zHJc?V4(cJYkQH9etL6@;NNPR79y#pKOiVG=2B>@pH?-!jWW&yTzbcXmJ`KGb#pb}l zE*QoDAVtRSY&|{n!0;p0etTlEM^A(k;eWub^wZG@!3=Mp&j<_;jXKjlP zw{muDh!HIphdFrpcb*8VU~DWa-kb(Xuy$FYRLim!P99DGe02u{zfR$M(Mq@o#cR5% zv_faxeHgXb*qQXNxaJy=(hX*VSdRWT>hk0zdH@lQR|TEk{_!b*z~};L+>l>V3ZXmL zmbLgB^;G6W!T2E;iT(kqb`4Ch+A8&?@GZ4(&I$G$PNjCp?Khou@qC+ZH^?fzC+-vK zE6VE?{`l7loxb+~j$jhzJ@xYkt1sS7M8mQ`(wZ#TxBxQTAV|)${`(`z)vW@xdnYuB zd?O^v>ipML4kVkj4qs+~Etrt$Kgx?eRIY+NH(Gyvk3^L}vkh&;y!nDU7et^(rOn15cn@U4vuNn*uDVx; zFPefpb6~IrTsx=L@&vQ?^r7!orL4-}tnBfL3abawCyA}1yOY+ms5h0@d=QjL_JNT_ z#A!o2F3=EFl%n9b@0X&TZ`^*?U(>oZ*lb;u{lrhyzlT!7+jA;8l-#CGEb{c{O7;?p z^YTDRy-_(MNGWel{0&6P3KlkXP;dLQs@R^IYr1sz^pHjYw3MTKtjZ-j zXmWG^y~!5yrq8*lZRZc({|9qTqy(s(XV1R3MbQ(hUNboP{Y?m&=IIPQ`LRUYLZ5%A zbxv*x0gWwKVm7*sE5hQt(uNEoJyKj?nwFU+j-)YEKVZDe$w}G4=|j^k25N zrW=BP+S<;ESI5J@g>e5ZBGFOls72F7ObugYZ!b`%te`;od+O7_Y;8G(XyyN$$5=Tz zX+>Nux?` z9g04gMpGrFaNwx`yP-f`l``#!1^MbEWMnol^D0DG+jGIvhg-A*BxlkM$sSW;J9PeO83B@~ z;;O1K`wB{KRY`y@NJ_FW(gA1us5HyRgoNeoLk=O`MKQwZo#s7c=jHzRlj(BIWWDX& z(J*EF)WNXfGzVc&GhN*`C0<%D*TaBBvqC=s@4e^s2HZpO%(Av5w09k0YxCW`j4k@W3DQJhuGxt+lNDsWNxz zv+k>*1Qj3LyLx+j>xKQN(Zd6}5QOcA>Q5k>Qp~|eCk8Nox$-JB+HROqa#qwyhUfsh zomkC7|8cyO6o<+CClNSU{3z&uAH>>OD~EZ*e%)ChHm|)TxHKe4>k>aRRg%KUB zRaLmg=^9euI;}#s*5PC3t!d%lVXL(=6puRmaZS&%^{9#y!^u}(FwJig>NeBq(*iGz z%Xj7Uv3G7CklA*Z5Zqr_awwDAEm*RseKCSCO3p8{MFtkEC=^SIh@g%HNt^NSh(Exc zHAu?@unHsjPB?>(zP`S?dS)z7F+3<))T757JyI%rygVj>Iz5PZ&N1giHw{G_eoZ`@ zKq)K6wA4njpOu~x#3eXT5!2{!dyJAKPp(o43Sz3R?sLFIJQgp%_fjS*64|nEFA5G~ zx7gA~7Z-bjbA8u5KnutjS- z?z*#xw**hR3^Fs3d`k|lO<(}AI1G29M;uO_AXjad!^PW4}=+=CZ5~r0i#DcTULTkPKpbK-u`C+ub3Qv@*^4)pkF*3Fl06Sy?7a3A+h6*O=OJ33~e zFb-B$QI2{bRJ&p#AMKsV4qUEZ&_}O#!qy2^YoI+0ITFOTiI8jW6Tz_UIlHM;y*9Y+ zOzmTTjI~>Ll|X+P+ir<<&?!FHd|yB2+~gL3mUn%a1k+_?wAkw6w_y^b79U}t+oi30 z3I0+F^q~H|=5gR*5cSAnjC4t_UU>zg^EXSku%yGj58dz0A~{)i4VIz1liz2U4Ilp? zA=;$z+|<@IvX`%z=3uPgVy($2Ezg=iV?fXE2`b+^{G)w>fv()ts(|q7cx(~je2VYp zd$LaF?>fZ++iR9MQ_ThFQ-IC^!$r1ATDdlxW-kA|TaV!K3JO#975&M!08|FMEyaBg z-d~!h&P@wm%tB)(wq~I%NRJcqZR#1M!-RKQP+K>)d(?U~xf}g_qIbMhS5neGxoped zpkLKnLZ9D9L&v6Asp(~6@FbN^{8__)+-J#P$eeCiT8woZqIQj-ee>d+uf^2k=6VW(UamA4nr zmxLSXkKX>T7~8@I1-%T@HU}@`p~ha#H`e*rf%eXffad5&+yv9h>lfnXa2SNJYr>R$ z;g>I#mX;trfiFD=%xSW+_rRDcX7__1GW7&4C}I95OnuEAhf*(+=nQ7m)jU_IXp ziyB01e)*=;{KL4Y&ro#+!KVv8B|m&>QaHY)LL;nC%wZA)FUw;rgjQ2kvo~FB=tSWwp#W zT7uN+KfOL6X#sRl{7N8HjGUYtC>S6S+&G)h$7%^-;XJ7=lnSa<@*rvo$M13y3S0uXT=tbiRy1vV`#|~6-kIPBn~?=v(qTz8Q#oM(sU5AyoK3X5kLOk~il3%{~;ZAn(yxcyAT02|EFSN=08a$eMjpap{7XGf zXA;yR8eGF$XQF4^go~M3DVz4F;uDQfDP`pG2UovBQ|K63cKsvl5*+OPs(X?IFH*Xu zof|_`C3YNMXV>)5 z{blQh^`6FC->Jk?aUYB$ga5KMsQ#v}(&=%twd3gdLWY^)#HbWXti*q_0G87UWHOJ! zkK;JbYC0jjEfU2$@r`V?l}$ZPF59Vkj+pcu7Yp$h2}`yj8}V2M{bo^>iIFK7jEGqW5seV>!bJm;^e{+yxY(*~k=ScTo+x6}T3$^#LB z?c7rVm52>ljTbEsyaq>h*y9udi|8P2>F!kCEZ^gSW)=3M?mc?ofX=HeYEe?kkB;W` zT7>b(BoWXKy5I$-`EI_}0;I{0W)q$FRV=;)=aZ$Gw`*_a{}f87=GPpz>pXMTTeexE zjyx$8{Vq|t=0C+s$K#$H)CN(puVURf3TW~7<8oOb^!5Pi-~w3f*F63)rrC%3MNNqx zi;bH5rS?f}?-I3EGA3nPY;xo3lw;XnglW2oW1xifmq&&9`EFJEITTuO-Gv&{;>|3b8gS5`1wD_e+jT~ zsBvbw2O13xVRu+5U!rf{S{~QgHXjmNwD11Reva&MjcFuM2(=0P?)@Resc}GSkL_26 z%@JGpIpqm*VQ_E%zf+gXmyce)O$E>TGR`u6;Ri)#81r-Y{BzPF;n!}0TmN-gQq{@9 wNhB`Zmj1=6I`$*Ve(9AjnEzR*$kwpeuiZ4F(ePyI!~Lq9l(J;OvsZ8b2TcnBmjD0& literal 0 HcmV?d00001 diff --git a/stream-connectors/pictures/splunk.png b/stream-connectors/pictures/splunk.png new file mode 100644 index 0000000000000000000000000000000000000000..f20212680917cdf7e7d8ebc2980296e038b2d7c6 GIT binary patch literal 14871 zcmZv@1yEeUvo}hD2X}W5?hZi`+}+*X-Q6MB;u740!y-W!ch}%7?kC9J1p)#BNlH>&83F=w_T&Bs9L&e(q&$cg0s`inx2mSA zvXKX=qqBp#m8}`6tCyo0shOvhIRu2~O0|a8&s`q4*G0O0xX+>AKxo?J`!FsqUe0{% zl@;{uKG^&-X?56t1;1r17zCW%z%n_HmX~?iHJR%crw$S!O(s+tD2(iR>o2A-wA>^M zqP{3MgR@z49UeV9%Ef9(u&s~ZQ1gUue8}#6LUi8NBoBX!bm7Rq>PtRs zv&m!b>9sIiR((+4$yi{a+t%*~e#9ZV9dw61%hw z^8Lvl(7>*|H@xCqq^btz4tqroUQck~u+@khn@*h^axVK*Ao!#(<+zEa9 z?&>S$`}oEClI&9>&k$O%EG~?=<)-}nXwT&Hv6~|nX<2#}5zHN-1@s>hX$R+F=tjf* z?N>O%dM{=O91+McHp*|L8tBE+%YbCGo+RWzK2-}k!oV<9FHtEba;(3kU+TE`arMNs z9?5=?L!o}s!+~d*gw*ii|eS% z0>_QuhVd@TAn8g5J-6kndG*cwl{CHPvv#{@d#igKYwjxHwRZ^4cBLJJa`q|xG7;-L z-C%jX=@2~B9Z`riGJq+AGRB6Se1pmtNMSkk3k93C|$q*035%jQ;|O^D!u=10AV`-q7N!=^yWg0#AZdaUH{DE%7H==7fn%Vu z3J7NUY&59si8+UB(<5&a!*4zI^_NK>0{p2JH%i4&vCH74Edzis$AiwNHLZCO4Bc`; zZQ|6A3m3KlV+glW9P-3EJH+xCcdR;WCH!Cxj}uw5o1q38NUTCsqK=u)TRMcO#51Qs zH_y{kvc1bf?@?Meu18=CIV+Nrw8-G-jnx9^Ygjh^rzoUo{NM&e9WO^^uD1z6?@cYBK^H7N74>?ddZ}ZX$o<;QfiTH6YjN3sgT*PONo{ADOt84*f zpgCpW=i=Wir^OgAuz02?-&cP9Axah9|EyPaw|o@H{`*Z6eZHf#Y|gBpcQ|-oYm{er zhs_WcSJp`~q{y?w0{Se7zE|5+_3Fs!CEQ+W`YzD~b8Ru5LSZ#4zCzNv64f%?CEhvP zO{URzECxx>h=LrCj*j&LQ`FzWy@*p-LR+!O0cu7Rduot}b(+u_Sw9l!LY~Ki9TE=2 zGF2@3ZC5e@Yhvn$HAPjp8wYpFHH~axFFDWig>y8kb78u(V%s5jOA$uMa9=c0yT}B6 zyQP;a>=V$4n_q02fhG-!5Kt-7*NGCdTD0T+G$lN)ve@P8C*{w2dp&lOrP6lBmxlMk zT1kRV=BQ|#1i|$NwN@hNJrw!(_3tkJ=3IroyWL`>XZWiNP`>v1jd}DVkljBdLI;FJ&}usKwo%%Ao6{KT)R)ez+8P zM@cOg2nZ&Mf7d6^Vqy0WBCM;F+;`Y*L|Awtn#>^@s}B;M>vv68F$X(4GkaGEF=sO) zS2I&m4=YzoQVA(JMU5b290&+f2q|$9RnL{vP9IN9wXXBm@*{cfL@3#B7$U*a;=?eX zMB;IL|BP+bN91Jp$?tD%JM9cI+GFZFc;XQef$${E^&Ne=5W5O}U=D1V&LI!^dpe;f z{``TY=#BhKN{dbuEZR$DBpD&YEmeD;P;sQMww~iE8W`Cr?S_Bp9e};Ah$=_rJp`@SxOA1|x1h3iy<)lSXp?dlv zNc?F}e~~U`kq!w`O6Ewd!@>LBEnB1k)TO1YKIWwqS&+f5=&RQtf z_Fvvfnl;W#COH#kXPHwO;Y5$yEKf~NpVx%TgXzvYChqHt*Y>=h-3?5(UaucZOOHH_ z9uppchOk|4vvDm)NYp~Ei544P8Dt`D9>wEbL-!GL87U2G3PfoLCK<@*yKU1HZ5~po zZxs9iY`IYms(O@UuLi!FxAYa0Z$T*UPhTSlitC|8S`$N;UWP1x2)D#3YJ)Tyyavvf zwQK@Tg|aRDA9?!r%ycDR9nB6D2HCe#1w+@R6S%44_mjrS=n5BRKAiy%Gcd9R2vXZy zCGV%2X(ZU4cZd2gylKDmt@^LFHUVbENR0b%zrfv`3C$BrWWsEg{sg+7~y)Mi`@C? z??p`)rQM5vS@m+-YIdYBx=oCVc-Z4QLlzc?%|X$9CHb^UUpBW%SGiDEV8pT)vUg+^ zlG#;suXi&?cISJ=>+^mEEwQ)vQn)7ee)|3+5{?oMd=ljlqcDbyj#uvQs^UOXK2URMsNvm=G=|>Wu-$x@*qoJEM1m8&0 zwF~cK*x%kBZ4rv&h0)()8*+#ZP0!y&xZl6oGkJz}by3Xy5IxP)ST;>QtzXxk z6nkcVySJd^4i!TbBQB#&f4L zpNYhQnaa*H+745(*xmcOb82HTD8RL7J!wvleT=`_O>Qm+xELZhL8R+_K45oxQCPxj zo-Ehy?cB+0=;EK;KR7dMyv|B%KDw=SJU>i|CFD&iO;ewl64ix2NBX^!H7sU}&~T6n zFPq(xSBLQ)?SuEe%OyIVb3QKC?0nZ>zG~w`aP+nMAjWYgNZVTI*K~b$?Ac%kzPP;SI$x{PN;B{p9*&Km0E| z!AmLilZ4F>@Z6)~tBX?ZSIb!R$8o(j%^w|wOU$JWdXd)@7x?(-W(Y%WjUt5MjO zbEA#WOMxr@>a_-|1-Vvp99TSAaHQ{N=>lh1l{Zhiu%yWclye{X5iYLF9NOF4tld{I z`ocVPvn(ua_7x(h28XTJH}SlZi!6)iY$GrD-ZSMckFDcI0Vm7 zCK3`N6EitNzBv-}`10t5K^JJyZPZJ^1JVWiq&{KZa;Z040tX}G3j6rv;7BIsj+*#@ zOt~dJd^_)Qf?wFg@sHp5L`FkSA5Z^m%NbjFM-AuCc zVx*C|rpGhDq~lE2`9X5Z1G|cY3TJvum(Po}RP;?A`OhyGQed!oZ}a4H^3GqAD(oD# zw36;R*Ah>7o5I_Xy#-5sVVVoF8wLd@ZmKvt+DpPiuxjhc?uByk(z7`9+bog=etsEb zCHFw6?b7bqz^|->{66btw6A$~*>zB4Yl1-QaF#Rl-5aDo*51yNW@RyYG6|W0WV}k> z-Q?}t=vN@vZDBECTC$NDqMUKV>=h`|bl%W5!rQ{^aW`Ifx4xbp+rdu<&I?lJMQmqJ zSP`86yf1Ni4)vV5 z@mBpE1T`u*ELt#D{Y_hsQ2LTi*Td4pLYjCkWY8kwc`oZ-m6$vb#t>fL6*(43LCA25A%5ASr3o~bR?X^C;a|Q;kCN4-9SF> z!#nR%u|=*sX;;yJIr*nK)x|+NmssX7PQvr5#Yj4riH!Ms$7YicS9SaF;b>p?8)q^* z_w~r@&WBY%+wbfaF6gwETiqaD9jKtM>+~{^Ja%2U$QT=Y$JwtU1i5xzH`gFLBSbDo zL$>Y0;`1z_eyaV%i%N<*G!qgg85;SjgbKB#?CdB9%^AjymkrI5Jrk^<)m|JvbLESF zgilJJr@3jRj^;$7ggId0Ph|}8Hi2N6(*O8>OcxU2#e z2idcu-Kl5a1o4Fi+G1sc>n*`l9P%h79cA&~S5cI+KXjy*4simy1CpVMi}!=&=g$=1 zS99mr8Va$PhstwR+^{yok9m(fRtvrv@$NlVK42-R=r76k3=3uze))pOeVrbg1@D$> zqj@tUCZKwc?U0%$LEzJ++uz;}hwx)h`DF}K{cV(R0TbfUz3pIMbX^1=*M1!)iO-r` z3vmM7ivg~;B~p?K%O>9Otp7S?t@>5JB`9Q1WoUdnI2<3pZnKSo^Q1afQE_R^rM&vo zq)+7NO6GA{MOqLps7zqt~vda3Ptx3#*Bkp<+G+}LB zkRL+2iZ*J#QJ>h$&J3kC3wO*|SHdN*MS7h%7Z&GJbwC@ushLO-890M(N7_xMI0F_= zk;k4VNrPT9SxsyU)27Vl>7a#EKM>}K{L`1uL0kv7W51>ERAIr-9$~xU2^WuNVgNsf z{gN~17JX5*KYwS)kFaZv#PY*~XL{(4rYqiizmZn|H*en-F6Jhq1I@gonHTADRoXV| zkK3yE2$z8=#hiZNQDf|L#{J8}z_kXjd%vy0pUTu7PxI*8+$0^I_zd>Niv)yb6%GBR zeJ(Ck$@izuIXAbSa4!>R+pJp|&#J~CVSo28Aoy?Q@95D->h-Q!oMe9Q=QzLVJ)k`9 z;`!-gB4C|h(T2N5sk3bB^~ethp6QoR+md;)2$t%4Hq_rDMclBe%IjC#Gm9D4@kGl} zebiqfOR-NVi`d|x@XHXwFt%;V9WIsEW7XpNW55A5>7E4%Pts4d?m{QAhL-jFh|%5cEAb(CYfK_a?`SQseX ztPxgi<8Ter6`wEYlU&N$aK4T?S7qAir$67UM;O(Js!{RMk_daQc)sOUsi~`FiF9dy z`7(E}5Oa{TGB9$h7ODoQxW{P_WC8GB&oC@fTAil^U~ua@g)@7c?BKOvvhBt2Ttmb|5={RX9~5|jgt>8xOc8N zNNI;#bZK|PUc@Rt-{MMj@f40ZuqA!}op&`0x>{zjU{y1sy0^`AOHRVbcLoz;J2aGT zhF2`MM^TM{0SPj;HOKrZllVtj}9eb$^M^5ED$?B#4OZ$7Tl*Si7RAEWwFt3YtD;Qmu{kENX_Y1 z$JD`hqEC@#EL)%OKs+!Qb(?h_WR=`sD@`q2p%2HR6-yFVraAvsH|u~)UjDI3aan|q z6hg1r*CL{=y*HZ}=4T$g?4}h2o672xk;n90Chni~t`b9SPY9iB!8P0uCO4=8E&JcjKj$&5T_n@ zdwLEJG`@b!*eVp_9Qs>Ek>?5`GIQ`3`lHlDO0A!ii5X2Pm`8xu^hOK>@ zb>|`B`&~`?QyJ{S)uEBzuJ=qqjF?_WDP2jYI=J-@CyOjbpG|Sx5w<}lQ8>}_v6PIg zD*guY=L2qxKGztlz5eQxdiCK@*P{vKVh^@N%Z&Jg#XmQZU`vi8w7DoRg+nM7*URHq z#*D<~D_eZ(aJ<<~)GYpu7Xjywqw9X~U|o_@V|=tS5Ic^|y1r4HQnh}|-M*1Hd76GZ zy6r)g7wniZ?fDaSV{NoVpZn@yMz?o8aRawb9|{!K9%#bT^|W zl#ZuQR_#EHMuf8{1u1PKWWT1I%>X5b_cbi~hoHKua{E*~f|3}<`tWz7;HDgLr9(K& zVkDdUXzP&O;MvH4KOC6hSCLkGDZf=phOf3% z8CF&&Wb4g=_SV)jP)*W&W2;o(1og9$QEnHiYTE(LoqE7WP)__-KXZ{C{Y*HT)vS<_23TP1pVrg@@iSheSoD%U zgkC9ZCyq@)RfGFD9S-(SJmPQH1t7Uz{_dqis8M@|R{1j2_h`rsD zx1&jWg)m|8ZlR7^$6|a($pD+3T_!J}k=AWcTP2XjFTE3r4CABiD`PkT` z*Y(Y8me`n&da5vU$VPgtP>utPeN4H{SAG~DbyY?mZ#ma2u)3gi%q=s--8M5NX`(_Q z7bnNT$)(-Rn@2!k<$UKgyK0$p9J-Y~Ynk^kHVYCvjPDKlRdPG_?@b$VXXdU=UrJHx$&;_v+sxhVhsbv`}PU5wX%L=ISI zjxXLlcy`pkM`30(nX5v9YmyLxZ*Tx(q^z;V8ugv!n=UBdg{}PdZ)P&t{6pjUva-(b z{MQq~EuGR2q`|%y|E!rM&nuhJ8-kLiTroesM^mCeWU;V(47)??08J>lJXz)_V7fwdt}b-Z~|aX9X5!N|&loSoxNFfBQM$ z6x{_`L+X1$Yryq6+idpW0GEw6%l1>bz7=>5lhH}Y=otlHSqO>ARH@D!TW z4Xf!C7UA;^l)k;UCpH>>H1;@vpmpV)7JCn%%EsOJEd zM7(98L0R0vaA5sQ*A7NvMBlMyLE~k*s5F zkEfv}yRqMK#zHFcC*>scL6Z7hiTmT{wDHGEE1!p2XL#*J3oJPRdroxdW7mYi_caQh zORsMDfK=)Zw-_(kF>b0aiQi?@Q}_#(Fp9Ju1ew==RO!Vm_H#85qCWo`#_=G`*lyX9rIPU@7LO~ZR1 z1k%3^dRlzf?N}9E11Cxs~c(5mw)CtU~KjpDOI5%{_ zoS}t#9#j~fv4wkUJRUF7n0D~ibJIRGKAqRbpUAGTi_5Em}b7oz&OsFbc{nQ}b> z`iGlQ@^A97c)rkt6$b=?->~;Ot6Bh+S3u`IFe*q{3+}UC+riU>>vi8Srjft@E*yzF zc;J^`6~`J|e&DJ+iP<_lP3+B%3>N4&u~)3^6G5fepK@>Y+zE5-F5$h)>d{h>MB2?c z37+H0-GVC}O`-e-jvMAAs`BR5h1;(ixm=MJSFeB1`-celFoc}q zpUiWq#;42WPOrikvhL94mv`7=1td(7>nZd83P$BE|P!UewW3-gnt^m_W-Tw)hzg@xXiAWtECT-l*^3gr=u@yr41~bm!bQ}j!ya{sYtIXNevZYN7A#uP7i_EVzUA?ktV33k6W3m z5mqof&FM7A%$9G>cb#)pcr(qpHB>iIBjRZ`4i(YatV*^1Q6zxp3H+(IpwHK#XX~M7 z884JzoQsCux1H4r;8p-<>RwoRoyGiz^NqR-Q9_E)@Z3R#(1@+=fN$WWc7lcR8F%yM za5&MB9n?adr+4l3{46KXa;swh#!TB@9+-aKUXhdY02e$ZZyn(sU{P|*ozZxVnSdkJs8cRc$tzO=mat;X9cShKxdC{JZAYRr)6WDr%O`{9$W%BxZ3T; z$KUvYV-J}#8^~yVg527?2Xw_=KU!vsF-I<{#&fD%diZcNm>FGhS@j5lPb$I0Hb7Wq z<hZf5%4G3;#ahkKg!2)o?d9^Rv{KW<@Aoo;s=|2a}q z-F$!&YX~}7r32n!VLJ%OE~n(u9mik9@6Wj&kB}jzJ73*Eqs{68HL0V$>j)<~a`$$2 zqUnJT;FTS|1%(lhm|hGuyLfs4`B<{SX^@IVIqWFVUM184-){|@W0es(u;I0qwBvVp zY7fiz7SmazfAJP#-j?Q~z()2QLT~BAee}BQdiHP{kbQMCBY_m|G*IfC;Hvd%U94&p z{#vf~vpqtuyv8c7g4 z1!&mF1RjaC?2ymh@L*gsshQ#BR+FuV?_-XmjhA#1rfxaO{usN|(lz{AeDS#L-~UUR z3hoGy$SGhYLBf*%P7 zVp>vyQ-tJDIqjV)`GNB{+heA;~HDtwudIf1OS(yMdC zeg76LB6RX|6p-C~K%>>pe>-Hd3&z*yUkI~$e8Wyq;Lfv12pBKIO0B9+5M>FmX$MSU zredq1^0Dc;WGvh|jIE%X?=mjK6B&#*$*g<3mOA)xRw{@G(Ham}9b~EV^;zqzy7*@c zVr_3Os@Jo32U_u#&nDK(*+iq>d6dwuh7PJ!y}hVbiIL4W6m^9ih(0%s5{WDFFKhy+ z=3S#iR4^>kO3g9yCL<4{7XxP z8r2X%hLJ1blF_X{BQgX(e%1uW4v;KObf_~LO57{Owl_0AO68mxcC*>n>J1k9y#fSS zfCD?T;}#Dw-vvB3V{8Qo!%xm`PKW~~cVC`ox-|#dFC^bi6G$$nCGMK{&B7OIjBkVH z+;FT5h*xR}nFMgm!!`HS#@^3M0bO^KH@tR^X0-zQ{=#`187u)WbS3XwCQqT5Jxp3_ z>_lTN9{61*-SDcA_gE>HdM~So4{jG5EdL0ZGcFI_P`9!%=e7N+ANa}$N0bRc;Q*Fi ze43C2&OjAL*-2HnmmjN*b?-RH>UHHT28%>{yV>7ngSnan?F#6|j%Av!_?C`k4JLse z%cI7{(qhqE)jDtL1uUxYB&~ zTAYX}Rx8-5wc_gu4SE~OtSW#zojS0#E4 zHG>NPE_D7ch?Dhd(sZ)LiY5DqGCJxbEQqoSfOsUL;wlchbY33t%VldSSDN8inbcgQ z@sCqvxJh(V?`0}PtHtmT;An4?5WuF|>fG5-T8Cb@#hj+vK&7FX;OubQrBY6HP7m1| zR(!4AuD|N!qt^skA^~0B{}RDm*oV8%8q$?Sdoadj^l%x{U+5gcyjydm?g%dE43hSF zYi%FezK+Dq#QW-Rh>4funfAs7@v4dK6^jGm=&0~}%7MS~NZksY*6Ujk$wbyflii3y z1y4PhC!-k!u%V_sHfWjgp9hM6BD)?jzqWnM8H@jR|0((L5(PH*;P8+ok7*UkWS>oy z#Ass&xn$$*OKsH(=!QmlPLJh8eiOpsNk<8p+uUGgbXK#~u^^MxBz&z5B96xEFSn1@ z!4V}ZF55o#TetrwI+Bl!-B#hu+!F3}=BcwUmQKhr#T=m%7_ARAkK`CVEVp3e>^62s zbcwTHYYWZ4-BXRuPZLjyS{4LVXq7MHvFN?*v{$i5-vn^yHJjbXB9Pjb$qEOf7oM37 zsRsXI(vyXR?^{UrzEEDVbW6aEPnD{7>)pXB{JsUoWWhM*RY!>TaL1yDw*#~>3n`OP zpER{Ce-v)vL{}U4k#ljMNwe^8NFWhC+kyv7d?X)rVLINPjvtC~IbPO1U7k*rkv}q? zqM$$6@gu|&)tCTk+>u<+F&~G}`8CL?XJ>|aVOaSxK#eS5FG)2nX5EDLKc1mX1^3N(?P=z0zy z@oE=&-50Pi-?U6)l6V$gO(Veyz*VB7-PgY@oRsE^)d6ZTBE5oTZcUsx88N{I;bbSjme;`^=AR?jL!V>1Ebj=Oy1-Sx-TR4{4r^6hthCiM(;h(wgf7+j$V@E? zrV`NISpA&O+V+}C-)8&CG>ny=zOnabJ6PSor_GG4TDwMEAcusBHAh^M28hh^)76s_ z&$`C4q-uTCCMxk?8HDw}y5-VozV1JdauRBi)Dz*XUp5sXndQe~_r{HL%3t}Qla{ z4Qbw*SIPW4vwuX5x9X_33?!+liu7`FpgvcvIXOs`$L;G5q`ohc*vqL?m{jbxXa4qb+`H#}*5)>8Lps-aZk=)`(Q|fhW6>-aa6} zQS4gYF3`i(*%&|7r>oWG@hkdb@j`!l(?oqQEN-~eoGKvx+j}z;QmVDL=Zwh!`oAOk zubfo?>Dc^6X)u%8w&+oa@y(pOByPmy{0&YbVV-4c-DNCiAz}6PPm~yDE!^c*V^*u6 z>i0`vDc(<}@zH1dQU1{pu^cZSWhQ3E-w|GU&G)y~rYiGv zU6&TuH%m5#AdQ!OMS!C`@imPuh;}vF?@E)iVbOoQy_?g?tTuP--Jir>_!K^1MNWMs zOs1%~b3s#1adn%|sI0|4j!RBf%m~-q7tkDCdt9aC`>UD9V?jM0>0O?#tTW0j1`HtO zl=Z=*o;8_v*NLgV!F7shl_{mK+#yZtDzowDk_rnbq+0)Qqj@M{L0hgr%4nrCtiZF4 zO;yuricG6aA^ zSDdwHXPHV(r0EK!mnRn27F;^4ZX&G^ee}#`8`H64+B*wdXk`a7>Hjqd8%v{od}YJF z$@;&91dYMv8s1%<-uU>SO!~V=Cxf#I4toK_{(p)t!u1j17k`%v<@o)}31NzIPld4x z%Kn*5Bw&L#KX6v)f`@87Z6^iAXYVmMUMX^B1$%W&bz|hbxj&GyyXly;0Ax8_5w3U=|;;9@q%wY%{V}7i|$7P;VaB8U25eBZew)qazdT`ye2L z?ky}(?Nfvn;v>&^b@Ctyb)L1#AL_t>($n26445VTYC9P>+qO!8Ge=O_?P6Vonoi6Q z8<$MC^OduLzfV6Wr0EdFmVeXA@%nUz(0C}ByS)QPxu*Td)GWm33o7v?y=3rCw&}E|KDcB8O zTYDh2X6N%RctS?v8;Ym;oLbfH;Y~VgmPCPVH{@aTyg(iKZ4DG3JAl*ghx(|(JL(tC znV!c>>sQu?D_{uqwJRu~je*4%ne@P;R$MuepOx->-Ljdp8j~rIssu1U6{`9_A(1=* z*{J#B{7%2MbO?mck?c2Bz!~x?>-bbDeQn$+w~|ZAOe4K{yzt`g-ds63_y53AR(@Q5 z<|iI9vQHNS;>FY~au|8c#9iR)o$JTT=j!oNJOA#^$*9Sw)$^FVotRRP1qjUU2Pm1u zDz(aV_s-^cI@zhZd5|^mjoDk4kI3-|=uI=S(|*di0hFN3G9ggx6C!z7gwR@kxD+y^ z|7v`9BzA4DM;Qh6o{R@<4bIC2CUh;{?w`KrinSP~;G7-x&Uv&n^pos16SPlr<3{D3 zTr0f~6F*f8$+XI7n8$Xe>AYB-*r2fa{QsbxeaRem2F7Zyf)DFv7A51OM>EQ2gYsMb zFYysk!=1IdL9KEuBRtl3y1rceYV>=beG{E`0ha_p7;C~9hna$XCxj*|K6R7YTU`VV zx)dwaxXj;=leBh7wu|_S{fx8jd{x3u$Bj%EChE@Bu3f34Ch#8u!+GcJbleiZ#Z=SK z@9zoNLoYHO#FyPqDB%3-qA>jz@)~FSGrOVj(KdRPvyHEgc(~zAC5&k0IVp4Gv0HeZ zRWT+iRNBREtjF~~Fs}QF432rVwxcG$#}~s-8YJX%ZIfqfHrr}g#3ynIf~OluWMZTx zV~tQUxO29yn!{E&*3*2%8mj)aM~J%om#TfNMPF*tJIf(=kPd)Q%Pp8_$6)EBs^AoX zxBpa=i8f=I+uljsaeJ+D55uF()qJAndrNctl*8ckLe " .. v) + if self[i] and i ~= "events" then + self[i] = v + end + end + return o +end + +-- Méthode event_queue:flush +function event_queue:flush() + broker_log:info(2, "event_queue:flush: Concatenating all the events as one JSON string") + -- we concatenate all the events as a serie of json objects separated by a whitespace + local post_data = "" + for i, json_event in ipairs(self.events) do + post_data = post_data .. json_event + end + broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector\"") + broker_log:info(2, "event_queue:flush: HTTP POST data are: '" .. post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector", + method = "POST", + --sink = ltn12.sink.file("/dev/null"), -- sink is where the request result's body will go + headers = { + ["Authorization"] = "Splunk " .. self.splunk_auth_var, -- Splunk HTTP JSON API needs this header field to accept input + ["content-length"] = string.len(post_data) -- mandatory for POST request with body + }, + source = ltn12.source.string(post_data) -- request body needs to be formatted as a LTN12 source + } + if hr_code == 200 then + broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) + end + + -- now that the data has been sent, we flush the events array + self.events = {} +end + +-- Méthode event_queue:add +function event_queue:add(e) + local splunk_event_data = {} + local event_data = { + metric = e.name, + value = e.value, + ctime = e.ctime, + host_name = broker_cache:get_hostname(e.host_id), + service_description = broker_cache:get_service_description(e.host_id, e.service_id) + } + if not event_data.host_name then + broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found") + event_data.host_name = e.host_id + end + if not event_data.service_description then + broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found") + event_data.service_description = e.service_id + end + splunk_event_data = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_sourcename, + time = e.ctime, + event = event_data + } + local json_splunk_event_data = broker.json_encode(splunk_event_data) + broker_log:info(3, "event_queue:add: Adding event #" .. #self.events) + broker_log:info(3, "event_queue:add: event json: " .. json_splunk_event_data) + self.events[#self.events + 1] = json_splunk_event_data + + if #self.events < self.buffer_size then + return false + else + self:flush() + return true + end +end +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Fonctions requises pour Broker StreamConnector +-------------------------------------------------------------------------------- + +-- Fonction init() +function init(conf) + broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector.log") + broker_log:info(2, "init: Beginning init() function") + queue = event_queue:new(nil, conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function") + return true +end + +-- Fonction filter() +function filter(category, element) + --broker_log:info(3, "category: ".. category .. " - element: " .. element) + if category == 3 and element == 1 then + return true + end + return false +end diff --git a/stream-connectors/splunk/splunk-states-http.lua b/stream-connectors/splunk/splunk-states-http.lua new file mode 100644 index 00000000000..2afb289c434 --- /dev/null +++ b/stream-connectors/splunk/splunk-states-http.lua @@ -0,0 +1,137 @@ +#!/usr/bin/lua +local http = require("socket.http") +local ltn12 = require("ltn12") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local event_queue = { + receiver_address = "", + receiver_port = 8088, + receiver_proto = "http", + splunk_sourcename = "", + splunk_sourcetype = "_json", + splunk_auth_var = "", + events = {}, + buffer_size = 50 +} + +-- Constructeur event_queue:new +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + broker_log:info(1, "event_queue:new: getting parameter " .. i .. " => " .. v) + if self[i] and i ~= "events" then + self[i] = v + end + end + return o +end + +-- Méthode event_queue:flush +function event_queue:flush() + broker_log:info(2, "event_queue:flush: Concatenating all the events as one JSON string") + -- we concatenate all the events as a serie of json objects separated by a whitespace + local post_data = "" + for i, json_event in ipairs(self.events) do + post_data = post_data .. json_event + end + broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector\"") + broker_log:info(2, "event_queue:flush: HTTP POST data are: '" .. post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector", + method = "POST", + headers = { + ["Authorization"] = "Splunk " .. self.splunk_auth_var, -- Splunk HTTP JSON API needs this header field to accept input + ["content-length"] = string.len(post_data) -- mandatory for POST request with body + }, + source = ltn12.source.string(post_data) -- request body needs to be formatted as a LTN12 source + } + if hr_code == 200 then + broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) + end + + -- now that the data has been sent, we flush the events array + self.events = {} +end + +-- Méthode event_queue:add +function event_queue:add(e) + local splunk_event_data = {} + local event_data = { + output = e.output, + state = e.state + } + local t_event_type = "host" + local t_host_name = broker_cache:get_hostname(e.host_id) + if t_host_name then + event_data.host_name = t_host_name + else + broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found") + event_data.host_name = e.host_id + end + if e.service_id then + t_event_type = "service" + local t_service_description = broker_cache:get_service_description(e.host_id, e.service_id) + if t_service_description then + event_data.service_description = broker_cache:get_service_description(e.host_id, e.service_id) + else + broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found") + event_data.service_description = e.service_id + end + end + event_data.event_type = t_event_type + splunk_event_data = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_sourcename, + time = e.ctime, + event = event_data + } + local json_splunk_event_data = broker.json_encode(splunk_event_data) + broker_log:info(3, "event_queue:add: Adding event #" .. #self.events) + broker_log:info(3, "event_queue:add: event json: " .. json_splunk_event_data) + self.events[#self.events + 1] = json_splunk_event_data + + if #self.events < self.buffer_size then + return false + else + self:flush() + return true + end +end +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Fonctions requises pour Broker StreamConnector +-------------------------------------------------------------------------------- + +-- Fonction init() +function init(conf) + broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector-bis.log") + broker_log:info(2, "init: Beginning init() function") + queue = event_queue:new(nil, conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function") + return true +end + +-- Fonction filter() +function filter(category, element) + --broker_log:info(3, "category: ".. category .. " - element: " .. element) + if category == 1 and (element == 14 or element == 24) then + return true + end + return false +end From 6f4620fa69ced5730328f84bd28f2f46f8a7cf8d Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 7 Dec 2018 10:40:44 +0100 Subject: [PATCH 014/219] fix(splunk): Images names fixed. --- stream-connectors/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 6ef91727e11..c1c105774f6 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -102,7 +102,7 @@ In the first case, follow the instructions below: * Add a new broker output of type *stream connector* * Fill it as shown below -![alt text](pictures/centreon-conf1.png "stream connector configuration") +![alt text](pictures/splunk-conf1.png "stream connector configuration") In the second case, follow those instructions: @@ -110,7 +110,7 @@ In the second case, follow those instructions: * Add a new broker output of type *stream connector* * Fill it as shown below -![alt text](pictures/centreon-conf2.png "stream connector configuration") +![alt text](pictures/splunk-conf2.png "stream connector configuration") ## The Splunk configuration From e1ac90ce6277a162a556ed64cc55348f5e3e71d7 Mon Sep 17 00:00:00 2001 From: Etienne G Date: Mon, 10 Dec 2018 12:56:55 +0100 Subject: [PATCH 015/219] enh(doc): improve splunk part (#2) * enh(doc): improve splunk part --- stream-connectors/README.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index c1c105774f6..ea223604231 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -19,6 +19,8 @@ Stream connector documentation are provided here: * https://documentation.centreon.com/docs/centreon/en/latest/developer/writestreamconnector.html * https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html +Don't hesitate to propose improvements and/or contact the community through our Slack workspace. + # Elasticsearch ## Elasticsearch from metrics events: *elasticsearch/elastic-metrics.lua* @@ -90,7 +92,18 @@ Parameters to specify in the stream connector configuration are: # Splunk -## The proposed stream connector here +There are two ways to use our stream connector with Splunk. The first and probably most common way uses Splunk Universal Forwarder. The second +method uses Splunk API. + +## The Splunk Universal Forwarder method + +In that case, you're going to use "Centreon4Splunk", it comes with: +* A Splunk App. you may find on Splunkbase [here](https://splunkbase.splunk.com/app/4304/) +* The LUA script and documentation [here](https://github.com/lkco/centreon4Splunk) + +Thanks to lkco! + +## The Splunk API method There are two Lua scripts proposed here: 1. *splunk-states-http.lua* that sends states to Splunk. @@ -118,6 +131,3 @@ An HTTP events collector has be configured in data entries. ![alt text](pictures/splunk.png "Splunk configuration") -## Centreon4Splunk: A good alternative - -Here is the [link to centreon4Splunk](https://github.com/lkco/centreon4Splunk). From 18d15a176c2ab34a178f45c66a84b927b2b08229 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Fri, 11 Jan 2019 13:31:10 +0100 Subject: [PATCH 016/219] fix(influx-neb.lua): parse_perfdata function removed Thanks to David Guenault, this bug is fixed. There was previously a bugged parse_perfdata function. Now we use then centreon-broker one which is much better. --- stream-connectors/influxdb/influxdb-neb.lua | 24 ++++----------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 13ceebe4264..5a1945612ed 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -14,7 +14,7 @@ -- You need an influxdb server -- You can install one with docker and these commands: -- docker pull influxdb --- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb -- You need to create a database -- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" -- @@ -30,22 +30,6 @@ local http = require("socket.http") local ltn12 = require("ltn12") -local function parse_perfdata(perfdata) - retval = {} - for i in string.gmatch(perfdata, "%S+") do - local it = string.gmatch(i, "[^=]+") - local field = it() - local value = it() - if field and value then - for v in string.gmatch(value, "[0-9.]+") do - retval[field] = v - break - end - end - end - return retval -end - -------------------------------------------------------------------------------- -- EventQueue class -------------------------------------------------------------------------------- @@ -74,7 +58,7 @@ function EventQueue:flush() sink = ltn12.sink.table(http_result_body), -- request body needs to be formatted as a LTN12 source source = ltn12.source.string(http_post_data), - headers = { + headers = { -- mandatory for POST request with body ["content-length"] = string.len(http_post_data) } @@ -103,7 +87,7 @@ end function EventQueue:add(e) broker_log:info(2, "EventQueue:add: " .. broker.json_encode(e)) local metric = e.name - -- time is a reserved word in influxDB so I rename it + -- time is a reserved word in influxDB so I rename it if metric == "time" then metric = "_"..metric end @@ -120,7 +104,7 @@ function EventQueue:add(e) service_description = e.service_id end -- we finally append the event to the events table - local perfdata = parse_perfdata(e.perfdata) + local perfdata = broker.parse_perfdata(e.perfdata) if not next(perfdata) then broker_log:info(3, "EventQueue:add: No metric") return true From b80ed9e04399e55c14de552b1bb68a6b6c4b328a Mon Sep 17 00:00:00 2001 From: Etienne G Date: Thu, 17 Jan 2019 15:52:06 +0100 Subject: [PATCH 017/219] Add Service Now Stream Connector (#4) * Add Service Now stream connector --- stream-connectors/README.md | 51 ++++ .../servicenow/connector-servicenow.lua | 275 ++++++++++++++++++ 2 files changed, 326 insertions(+) create mode 100644 stream-connectors/servicenow/connector-servicenow.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index ea223604231..d05f10f4b5b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -131,3 +131,54 @@ An HTTP events collector has be configured in data entries. ![alt text](pictures/splunk.png "Splunk configuration") +# Service Now + +The stream connector sends the check results received from Centreon Engine to ServiceNow. Only the host and service check results are sent. + +This stream connector is in **BETA** version because it has not been used enough time in production environments. + +## Configuration + +In *Configuration > Pollers > Broker configuration*, you need to modify the Central Broker Master configuration. + +Add an output whose type is Stream Connector. +Choose a name for your configuration. +Enter the path to the **connector-servicenow.lua** file. + +Configure the *lua parameters* with the following informations: + +Name | Type | Description +--- | --- | --- +client\_id | String | The client id for OAuth authentication +client\_secret | String | The client secret for OAuth authentication +username | String | Username for OAuth authentication +password | Password | Password for OAuth authentication +instance | String | The ServiceNow instance +logfile | String | The log file with its full path (optional) + +## Protocol description + +The following table describes the matching information between Centreon and the +ServiceNow Event Manager. + + +**Host event** + +Centreon | ServiceNow Event Manager field | Description +--- | --- | --- +hostname | node | The hostname +output | description | The Centreon Plugin output +last\_check | time\_of\_event | The time of the event +hostname | resource | The hostname +severity | The level of severity depends on the host status + +**Service event** + +Centreon | ServiceNow Event Manager field | Description +--- | --- | --- +hostname | node | The hostname +output | description | The Centreon Plugin output +last\_check | time\_of\_event | The time of the event +service\_description | resource | The service name +severity | The level of severity depends on the host status + diff --git a/stream-connectors/servicenow/connector-servicenow.lua b/stream-connectors/servicenow/connector-servicenow.lua new file mode 100644 index 00000000000..07f31943440 --- /dev/null +++ b/stream-connectors/servicenow/connector-servicenow.lua @@ -0,0 +1,275 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Servicenow Connector +-- +-------------------------------------------------------------------------------- + +local curl = require "cURL" + +local serviceNow + +-- Class for Service now connection +local ServiceNow = {} +ServiceNow.__index = ServiceNow + +function ServiceNow:new(instance, username, password, clientId, clientPassword) + local serviceNow = {} + setmetatable(serviceNow, ServiceNow) + serviceNow.instance = instance + serviceNow.username = username + serviceNow.password = password + serviceNow.clientId = clientId + serviceNow.clientPassword = clientPassword + serviceNow.tokens = {} + serviceNow.tokens.authToken = nil + serviceNow.tokens.refreshToken = nil + return serviceNow +end + +function ServiceNow:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + return self.tokens.authToken.token +end + +function ServiceNow:authToken () + local data = "grant_type=password&client_id=" .. self.clientId .. "&client_secret=" .. self.clientPassword .. "&username=" .. self.username .. "&password=" .. self.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + if not res.access_token then + error("Authentication failed") + end + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + self.tokens.refreshToken = { + token = res.resfresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +function ServiceNow:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.clientId .. "&client_secret=" .. self.clientPassword .. "&username=" .. self.username .. "&password=" .. self.password + res = self.call( + "oauth_token.do", + "POST", + data + ) + if not res.access_token then + error("Bad access token") + end + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +function ServiceNow:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.refreshToken = nil + return false + end + return true +end + +function ServiceNow:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.authToken = nil + return false + end + return true +end + +function ServiceNow:call (url, method, data, authToken) + method = method or "GET" + data = data or nil + authToken = authToken or nil + + local endpoint = "https://" .. tostring(self.instance) .. ".service-now.com/" .. tostring(url) + broker_log:info(1, "Prepare url " .. endpoint) + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (responce) + res = res .. tostring(responce) + end) + broker_log:info(1, "Request initialize") + + if not authToken then + if method ~= "GET" then + broker_log:info(1, "Add form header") + request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) + broker_log:info(1, "After add form header") + end + else + broker_log:info(1, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + ) + end + + if method ~= "GET" then + broker_log:info(1, "Add post data") + request:setopt_postfields(data) + end + + broker_log:info(1, "Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + broker_log:info(1, "HTTP Code : " .. respCode) + broker_log:info(1, "Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + broker_log:info(1, "HTTP Code : " .. respCode) + broker_log:info(1, "HTTP Error : " .. res) + error("Bad request code") + end + + if res == "" then + broker_log:info(1, "HTTP Error : " .. res) + error("Bad content") + end + + broker_log:info(1, "Parsing JSON") + return broker.json_decode(res) +end + +function ServiceNow:sendEvent (event) + local authToken = self:getAuthToken() + + broker_log:info(1, "Event information :") + for k, v in pairs(event) do + broker_log:info(1, tostring(k) .. " : " .. tostring(v)) + end + broker_log:info(1, "------") + + broker_log:info(1, "Auth token " .. authToken) + if pcall(self:call( + "api/now/table/em_event", + "POST", + broker.json_encode(event), + authToken + )) then + return true + end + return false +end + +function init(parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-servicenow.log" + if not parameters.instance or not parameters.username or not parameters.password + or not parameters.client_id or not parameters.client_secret then + error("The needed parameters are 'instance', 'username', 'password', 'client_id' and 'client_secret'") + end + broker_log:set_parameters(1, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + broker_log:info(1, "Init " .. i .. " : " .. v) + end + serviceNow = ServiceNow:new( + parameters.instance, + parameters.username, + parameters.password, + parameters.client_id, + parameters.client_secret + ) +end + +function write(data) + local sendData = { + source = "centreon", + event_class = "centreon", + severity = 5 + } + + broker_log:info(1, "Prepare Go category " .. tostring(data.category) .. " element " .. tostring(data.element)) + + if data.category == 1 then + broker_log:info(1, "Broker event data") + for k, v in pairs(data) do + broker_log:info(1, tostring(k) .. " : " .. tostring(v)) + end + broker_log:info(1, "------") + + -- Doesn't process if the host is acknowledged or disabled + if data.acknowledged or not data.enabled then + broker_log:info(1, "Dropped because acknowledged or not enabled") + return true + end + -- Doesn't process if the host state is not hard + if data.state_type ~= 1 then + broker_log:info(1, "Dropped because state is not hard") + return true + end + hostname = broker_cache:get_hostname(data.host_id) + if not hostname then + broker_log:info(1, "Dropped missing hostname") + return true + end + sendData.node = hostname + sendData.description = data.output + sendData.time_of_event = os.date("%Y-%m-%d %H:%M:%S", data.last_check) + if data.element == 14 then + sendData.resource = hostname + if data.current_state == 0 then + sendData.severity = 0 + elseif data.current_state then + sendData.severity = 1 + end + else + service_description = broker_cache:get_service_description(data.host_id, data.service_id) + if not service_description then + broker_log:info(1, "Droped missing service description") + return true + end + if data.current_state == 0 then + sendData.severity = 0 + elseif data.current_state == 1 then + sendData.severity = 3 + elseif data.current_state == 2 then + sendData.severity = 1 + elseif data.current_state == 3 then + sendData.severity = 4 + end + sendData.resource = service_description + end + else + return true + end + + return serviceNow:sendEvent(sendData) +end + +function filter(category, element) + if category == 1 then + if element == 14 or element == 24 then + broker_log:info(1, "Go category " .. tostring(category) .. " element " .. tostring(element)) + return true + end + end + return false +end From 2d3b585cbdb140099e058711556dbe69ee00eba3 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 31 Jan 2019 10:42:49 +0100 Subject: [PATCH 018/219] Update README.md --- stream-connectors/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index d05f10f4b5b..5f02d3a5005 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -137,6 +137,12 @@ The stream connector sends the check results received from Centreon Engine to Se This stream connector is in **BETA** version because it has not been used enough time in production environments. +## Installation + +This stream connector needs the lua-curl library available for example with *luarocks*: + +`luarocks install lua-curl` + ## Configuration In *Configuration > Pollers > Broker configuration*, you need to modify the Central Broker Master configuration. From 270800796527bba41da6a484fedc1a54af0ecc98 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 31 Jan 2019 10:51:05 +0100 Subject: [PATCH 019/219] feat(README.md) A TOC is added --- stream-connectors/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 5f02d3a5005..142f7ef6510 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -21,6 +21,16 @@ Stream connector documentation are provided here: Don't hesitate to propose improvements and/or contact the community through our Slack workspace. +Here is a list of the available scripts here: + +* [Elasticsearch](#elasticsearch) +* https://github.com/centreon/centreon-stream-connector-scripts#elasticsearch +* https://github.com/centreon/centreon-stream-connector-scripts#Influxdb +* https://github.com/centreon/centreon-stream-connector-scripts#Warp10 +* https://github.com/centreon/centreon-stream-connector-scripts#Splunk +* https://github.com/centreon/centreon-stream-connector-scripts#Warp10 +* https://github.com/centreon/centreon-stream-connector-scripts#service-now + # Elasticsearch ## Elasticsearch from metrics events: *elasticsearch/elastic-metrics.lua* From 2c1f2e4bd42cafdc028982c8e4691a2a85a0112e Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 31 Jan 2019 10:54:23 +0100 Subject: [PATCH 020/219] fix(README.md): TOC updated --- stream-connectors/README.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 142f7ef6510..b2fce31988c 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -24,12 +24,10 @@ Don't hesitate to propose improvements and/or contact the community through our Here is a list of the available scripts here: * [Elasticsearch](#elasticsearch) -* https://github.com/centreon/centreon-stream-connector-scripts#elasticsearch -* https://github.com/centreon/centreon-stream-connector-scripts#Influxdb -* https://github.com/centreon/centreon-stream-connector-scripts#Warp10 -* https://github.com/centreon/centreon-stream-connector-scripts#Splunk -* https://github.com/centreon/centreon-stream-connector-scripts#Warp10 -* https://github.com/centreon/centreon-stream-connector-scripts#service-now +* [InfluxDB](#InfluxDB) +* [Warp 10](#Warp10) +* [Splunk](#Splunk) +* [ServiceNow](#service-now) # Elasticsearch From 4d0d61c7127aefc3332b72de5f808c9bbf631809 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Thu, 31 Jan 2019 10:55:07 +0100 Subject: [PATCH 021/219] fix(README.md): Typo --- stream-connectors/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index b2fce31988c..c8c362b4d2e 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -21,7 +21,7 @@ Stream connector documentation are provided here: Don't hesitate to propose improvements and/or contact the community through our Slack workspace. -Here is a list of the available scripts here: +Here is a list of the available scripts: * [Elasticsearch](#elasticsearch) * [InfluxDB](#InfluxDB) From c2faffbf08b03c0bce8439ba5ce0448cc8acaacd Mon Sep 17 00:00:00 2001 From: Maximilien Bersoult Date: Mon, 18 Feb 2019 16:59:18 +0100 Subject: [PATCH 022/219] fix(warp10): Remove space in class name and better class name and labels --- stream-connectors/warp10/export-warp10.lua | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stream-connectors/warp10/export-warp10.lua b/stream-connectors/warp10/export-warp10.lua index a7da343c3dd..f9a831de5a8 100644 --- a/stream-connectors/warp10/export-warp10.lua +++ b/stream-connectors/warp10/export-warp10.lua @@ -82,10 +82,10 @@ function write(d) end for metric,v in pairs(pd) do local line = tostring(d.last_update) .. "000000// " - .. host .. ":" .. service .. ":" .. metric - .. " {" .. "host=" .. host - .. ", service=" .. service - .. ", metric=" .. metric .. "} " + .. metric + .. "{" .. "host=" .. host + .. ",service=" .. service + .. "} " .. tostring(v) table.insert(my_data.data, line) broker_log:info(0, "New line added to data: '" .. line .. "'") From eca138b58982b2e129d7669785b027b76978e296 Mon Sep 17 00:00:00 2001 From: benrobert Date: Tue, 16 Apr 2019 15:44:55 +0200 Subject: [PATCH 023/219] add NDO output (#7) * add NDO output * add OMI stream connector * rename file * add url configuration and improve readme --- stream-connectors/README.md | 30 + stream-connectors/ndo/ndo-module.lua | 332 +++++++++++ stream-connectors/ndo/ndo-output.lua | 722 ++++++++++++++++++++++++ stream-connectors/omi/omi_connector.lua | 151 +++++ 4 files changed, 1235 insertions(+) create mode 100644 stream-connectors/ndo/ndo-module.lua create mode 100644 stream-connectors/ndo/ndo-output.lua create mode 100644 stream-connectors/omi/omi_connector.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index c8c362b4d2e..21dd07e7465 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -28,6 +28,8 @@ Here is a list of the available scripts: * [Warp 10](#Warp10) * [Splunk](#Splunk) * [ServiceNow](#service-now) +* [NDO](#NDO) +* [HP OMI](#OMI) # Elasticsearch @@ -196,3 +198,31 @@ last\_check | time\_of\_event | The time of the event service\_description | resource | The service name severity | The level of severity depends on the host status +# NDO + +## Send service status events in the historical NDO protocol format : *ndo/ndo-output.lua* +NDO protocol is no longer supported by Centreon Broker. It is now replaced by BBDO (lower network footprint, automatic compression and encryption). +However it is possible to emulate the historical NDO protocol output with this stream connector. + +Parameters to specify in the broker output web ui are: + +* ipaddr as **string**: the ip address of the listening server +* port as **number**: the listening server port +* max-row as **number**: the number of event to store before sending the data + +By default logs are in /var/log/centreon-broker/ndo-output.log + +# OMI + +## stream connector for HP OMI : *ndo/ndo-output.lua* + +Create a broker output for HP OMI Connector + +Parameters to specify in the broker output web ui are: + +* ipaddr as **string**: the ip address of the listening server +* port as **number**: the listening server port +* logfile as **string**: where to send logs +* loglevel as **number** : the log level (0, 1, 2, 3) where 3 is the maximum level +* max_size as **number** : how many events to store before sending them to the server +* max_age as **number** : flush the events when the specified time (in second) is reach (even if max_size is not reach) diff --git a/stream-connectors/ndo/ndo-module.lua b/stream-connectors/ndo/ndo-module.lua new file mode 100644 index 00000000000..91f22e49bc4 --- /dev/null +++ b/stream-connectors/ndo/ndo-module.lua @@ -0,0 +1,332 @@ +local ndo = {} + +ndo.api = { + NDO_API_PROTOVERSION = 2, + NDO_API_STARTCONFIGDUMP = 900, + NDO_API_ENDCONFIGDUMP = 901, + NDO_API_ENDDATA = 999, + NDO_API_ENDDATADUMP = 1000, + NDO_API_LOGENTRY = 100, + NDO_API_PROCESSDATA = 200, + NDO_API_TIMEDEVENTDATA = 201, + NDO_API_LOGDATA = 202, + NDO_API_SYSTEMCOMMANDDATA = 203, + NDO_API_EVENTHANDLERDATA = 204, + NDO_API_NOTIFICATIONDATA = 205, + NDO_API_SERVICECHECKDATA = 206, + NDO_API_HOSTCHECKDATA = 207, + NDO_API_COMMENTDATA = 208, + NDO_API_DOWNTIMEDATA = 209, + NDO_API_FLAPPINGDATA = 210, + NDO_API_PROGRAMSTATUSDATA = 211, + NDO_API_HOSTSTATUSDATA = 212, + NDO_API_SERVICESTATUSDATA = 213, + NDO_API_ADAPTIVEPROGRAMDATA = 214, + NDO_API_ADAPTIVEHOSTDATA = 215, + NDO_API_ADAPTIVESERVICEDATA = 216, + NDO_API_EXTERNALCOMMANDDATA = 217, + NDO_API_AGGREGATEDSTATUSDATA = 218, + NDO_API_RETENTIONDATA = 219, + NDO_API_CONTACTNOTIFICATIONDATA = 220, + NDO_API_CONTACTNOTIFICATIONMETHODDATA = 221, + NDO_API_ACKNOWLEDGEMENTDATA = 222, + NDO_API_STATECHANGEDATA = 223, + NDO_API_CONTACTSTATUSDATA = 224, + NDO_API_ADAPTIVECONTACTDATA = 225, + NDO_API_MAINCONFIGFILEVARIABLES = 300, + NDO_API_RESOURCECONFIGFILEVARIABLES = 301, + NDO_API_CONFIGVARIABLES = 302, + NDO_API_RUNTIMEVARIABLES = 303, + NDO_API_HOSTDEFINITION = 400, + NDO_API_HOSTGROUPDEFINITION = 401, + NDO_API_SERVICEDEFINITION = 402, + NDO_API_SERVICEGROUPDEFINITION = 403, + NDO_API_HOSTDEPENDENCYDEFINITION = 404, + NDO_API_SERVICEDEPENDENCYDEFINITION = 405, + NDO_API_HOSTESCALATIONDEFINITION = 406, + NDO_API_SERVICEESCALATIONDEFINITION = 407, + NDO_API_COMMANDDEFINITION = 408, + NDO_API_TIMEPERIODDEFINITION = 409, + NDO_API_CONTACTDEFINITION = 410, + NDO_API_CONTACTGROUPDEFINITION = 411, + NDO_API_HOSTEXTINFODEFINITION = 412, + NDO_API_SERVICEEXTINFODEFINITION = 413, + NDO_API_ACTIVEOBJECTSLIST = 414 +} + +ndo.data = { + NDO_DATA_NONE = 0, + NDO_DATA_TYPE = 1, + NDO_DATA_FLAGS = 2, + NDO_DATA_ATTRIBUTES = 3, + NDO_DATA_TIMESTAMP = 4, + NDO_DATA_ACKAUTHOR = 5, + NDO_DATA_ACKDATA = 6, + NDO_DATA_ACKNOWLEDGEMENTTYPE = 7, + NDO_DATA_ACTIVEHOSTCHECKSENABLED = 8, + NDO_DATA_ACTIVESERVICECHECKSENABLED = 9, + NDO_DATA_AUTHORNAME = 10, + NDO_DATA_CHECKCOMMAND = 11, + NDO_DATA_CHECKTYPE = 12, + NDO_DATA_COMMANDARGS = 13, + NDO_DATA_COMMANDLINE = 14, + NDO_DATA_COMMANDSTRING = 15, + NDO_DATA_COMMANDTYPE = 16, + NDO_DATA_COMMENT = 17, + NDO_DATA_COMMENTID = 18, + NDO_DATA_COMMENTTIME = 19, + NDO_DATA_COMMENTTYPE = 20, + NDO_DATA_CONFIGFILENAME = 21, + NDO_DATA_CONFIGFILEVARIABLE = 22, + NDO_DATA_CONFIGVARIABLE = 23, + NDO_DATA_CONTACTSNOTIFIED = 24, + NDO_DATA_CURRENTCHECKATTEMPT = 25, + NDO_DATA_CURRENTNOTIFICATIONNUMBER = 26, + NDO_DATA_CURRENTSTATE = 27, + NDO_DATA_DAEMONMODE = 28, + NDO_DATA_DOWNTIMEID = 29, + NDO_DATA_DOWNTIMETYPE = 30, + NDO_DATA_DURATION = 31, + NDO_DATA_EARLYTIMEOUT = 32, + NDO_DATA_ENDTIME = 33, + NDO_DATA_ENTRYTIME = 34, + NDO_DATA_ENTRYTYPE = 35, + NDO_DATA_ESCALATED = 36, + NDO_DATA_EVENTHANDLER = 37, + NDO_DATA_EVENTHANDLERENABLED = 38, + NDO_DATA_EVENTHANDLERSENABLED = 39, + NDO_DATA_EVENTHANDLERTYPE = 40, + NDO_DATA_EVENTTYPE = 41, + NDO_DATA_EXECUTIONTIME = 42, + NDO_DATA_EXPIRATIONTIME = 43, + NDO_DATA_EXPIRES = 44, + NDO_DATA_FAILUREPREDICTIONENABLED = 45, + NDO_DATA_FIXED = 46, + NDO_DATA_FLAPDETECTIONENABLED = 47, + NDO_DATA_FLAPPINGTYPE = 48, + NDO_DATA_GLOBALHOSTEVENTHANDLER = 49, + NDO_DATA_GLOBALSERVICEEVENTHANDLER = 50, + NDO_DATA_HASBEENCHECKED = 51, + NDO_DATA_HIGHTHRESHOLD = 52, + NDO_DATA_HOST = 53, + NDO_DATA_ISFLAPPING = 54, + NDO_DATA_LASTCOMMANDCHECK = 55, + NDO_DATA_LASTHARDSTATE = 56, + NDO_DATA_LASTHARDSTATECHANGE = 57, + NDO_DATA_LASTHOSTCHECK = 58, + NDO_DATA_LASTHOSTNOTIFICATION = 59, + NDO_DATA_LASTLOGROTATION = 60, + NDO_DATA_LASTSERVICECHECK = 61, + NDO_DATA_LASTSERVICENOTIFICATION = 62, + NDO_DATA_LASTSTATECHANGE = 63, + NDO_DATA_LASTTIMECRITICAL = 64, + NDO_DATA_LASTTIMEDOWN = 65, + NDO_DATA_LASTTIMEOK = 66, + NDO_DATA_LASTTIMEUNKNOWN = 67, + NDO_DATA_LASTTIMEUNREACHABLE = 68, + NDO_DATA_LASTTIMEUP = 69, + NDO_DATA_LASTTIMEWARNING = 70, + NDO_DATA_LATENCY = 71, + NDO_DATA_LOGENTRY = 72, + NDO_DATA_LOGENTRYTIME = 73, + NDO_DATA_LOGENTRYTYPE = 74, + NDO_DATA_LOWTHRESHOLD = 75, + NDO_DATA_MAXCHECKATTEMPTS = 76, + NDO_DATA_MODIFIEDHOSTATTRIBUTE = 77, + NDO_DATA_MODIFIEDHOSTATTRIBUTES = 78, + NDO_DATA_MODIFIEDSERVICEATTRIBUTE = 79, + NDO_DATA_MODIFIEDSERVICEATTRIBUTES = 80, + NDO_DATA_NEXTHOSTCHECK = 81, + NDO_DATA_NEXTHOSTNOTIFICATION = 82, + NDO_DATA_NEXTSERVICECHECK = 83, + NDO_DATA_NEXTSERVICENOTIFICATION = 84, + NDO_DATA_NOMORENOTIFICATIONS = 85, + NDO_DATA_NORMALCHECKINTERVAL = 86, + NDO_DATA_NOTIFICATIONREASON = 87, + NDO_DATA_NOTIFICATIONSENABLED = 88, + NDO_DATA_NOTIFICATIONTYPE = 89, + NDO_DATA_NOTIFYCONTACTS = 90, + NDO_DATA_OBSESSOVERHOST = 91, + NDO_DATA_OBSESSOVERHOSTS = 92, + NDO_DATA_OBSESSOVERSERVICE = 93, + NDO_DATA_OBSESSOVERSERVICES = 94, + NDO_DATA_OUTPUT = 95, + NDO_DATA_PASSIVEHOSTCHECKSENABLED = 96, + NDO_DATA_PASSIVESERVICECHECKSENABLED = 97, + NDO_DATA_PERCENTSTATECHANGE = 98, + NDO_DATA_PERFDATA = 99, + NDO_DATA_PERSISTENT = 100, + NDO_DATA_PROBLEMHASBEENACKNOWLEDGED = 101, + NDO_DATA_PROCESSID = 102, + NDO_DATA_PROCESSPERFORMANCEDATA = 103, + NDO_DATA_PROGRAMDATE = 104, + NDO_DATA_PROGRAMNAME = 105, + NDO_DATA_PROGRAMSTARTTIME = 106, + NDO_DATA_PROGRAMVERSION = 107, + NDO_DATA_RECURRING = 108, + NDO_DATA_RETRYCHECKINTERVAL = 109, + NDO_DATA_RETURNCODE = 110, + NDO_DATA_RUNTIME = 111, + NDO_DATA_RUNTIMEVARIABLE = 112, + NDO_DATA_SCHEDULEDDOWNTIMEDEPTH = 113, + NDO_DATA_SERVICE = 114, + NDO_DATA_SHOULDBESCHEDULED = 115, + NDO_DATA_SOURCE = 116, + NDO_DATA_STARTTIME = 117, + NDO_DATA_STATE = 118, + NDO_DATA_STATECHANGE = 119, + NDO_DATA_STATECHANGETYPE = 120, + NDO_DATA_STATETYPE = 121, + NDO_DATA_STICKY = 122, + NDO_DATA_TIMEOUT = 123, + NDO_DATA_TRIGGEREDBY = 124, + NDO_DATA_LONGOUTPUT = 125, + NDO_DATA_ACTIONURL = 126, + NDO_DATA_COMMANDNAME = 127, + NDO_DATA_CONTACTADDRESS = 128, + NDO_DATA_CONTACTALIAS = 129, + NDO_DATA_CONTACTGROUP = 130, + NDO_DATA_CONTACTGROUPALIAS = 131, + NDO_DATA_CONTACTGROUPMEMBER = 132, + NDO_DATA_CONTACTGROUPNAME = 133, + NDO_DATA_CONTACTNAME = 134, + NDO_DATA_DEPENDENCYTYPE = 135, + NDO_DATA_DEPENDENTHOSTNAME = 136, + NDO_DATA_DEPENDENTSERVICEDESCRIPTION = 137, + NDO_DATA_EMAILADDRESS = 138, + NDO_DATA_ESCALATEONCRITICAL = 139, + NDO_DATA_ESCALATEONDOWN = 140, + NDO_DATA_ESCALATEONRECOVERY = 141, + NDO_DATA_ESCALATEONUNKNOWN = 142, + NDO_DATA_ESCALATEONUNREACHABLE = 143, + NDO_DATA_ESCALATEONWARNING = 144, + NDO_DATA_ESCALATIONPERIOD = 145, + NDO_DATA_FAILONCRITICAL = 146, + NDO_DATA_FAILONDOWN = 147, + NDO_DATA_FAILONOK = 148, + NDO_DATA_FAILONUNKNOWN = 149, + NDO_DATA_FAILONUNREACHABLE = 150, + NDO_DATA_FAILONUP = 151, + NDO_DATA_FAILONWARNING = 152, + NDO_DATA_FIRSTNOTIFICATION = 153, + NDO_DATA_HAVE2DCOORDS = 154, + NDO_DATA_HAVE3DCOORDS = 155, + NDO_DATA_HIGHHOSTFLAPTHRESHOLD = 156, + NDO_DATA_HIGHSERVICEFLAPTHRESHOLD = 157, + NDO_DATA_HOSTADDRESS = 158, + NDO_DATA_HOSTALIAS = 159, + NDO_DATA_HOSTCHECKCOMMAND = 160, + NDO_DATA_HOSTCHECKINTERVAL = 161, + NDO_DATA_HOSTCHECKPERIOD = 162, + NDO_DATA_HOSTEVENTHANDLER = 163, + NDO_DATA_HOSTEVENTHANDLERENABLED = 164, + NDO_DATA_HOSTFAILUREPREDICTIONENABLED = 165, + NDO_DATA_HOSTFAILUREPREDICTIONOPTIONS = 166, + NDO_DATA_HOSTFLAPDETECTIONENABLED = 167, + NDO_DATA_HOSTFRESHNESSCHECKSENABLED = 168, + NDO_DATA_HOSTFRESHNESSTHRESHOLD = 169, + NDO_DATA_HOSTGROUPALIAS = 170, + NDO_DATA_HOSTGROUPMEMBER = 171, + NDO_DATA_HOSTGROUPNAME = 172, + NDO_DATA_HOSTMAXCHECKATTEMPTS = 173, + NDO_DATA_HOSTNAME = 174, + NDO_DATA_HOSTNOTIFICATIONCOMMAND = 175, + NDO_DATA_HOSTNOTIFICATIONINTERVAL = 176, + NDO_DATA_HOSTNOTIFICATIONPERIOD = 177, + NDO_DATA_HOSTNOTIFICATIONSENABLED = 178, + NDO_DATA_ICONIMAGE = 179, + NDO_DATA_ICONIMAGEALT = 180, + NDO_DATA_INHERITSPARENT = 181, + NDO_DATA_LASTNOTIFICATION = 182, + NDO_DATA_LOWHOSTFLAPTHRESHOLD = 183, + NDO_DATA_LOWSERVICEFLAPTHRESHOLD = 184, + NDO_DATA_MAXSERVICECHECKATTEMPTS = 185, + NDO_DATA_NOTES = 186, + NDO_DATA_NOTESURL = 187, + NDO_DATA_NOTIFICATIONINTERVAL = 188, + NDO_DATA_NOTIFYHOSTDOWN = 189, + NDO_DATA_NOTIFYHOSTFLAPPING = 190, + NDO_DATA_NOTIFYHOSTRECOVERY = 191, + NDO_DATA_NOTIFYHOSTUNREACHABLE = 192, + NDO_DATA_NOTIFYSERVICECRITICAL = 193, + NDO_DATA_NOTIFYSERVICEFLAPPING = 194, + NDO_DATA_NOTIFYSERVICERECOVERY = 195, + NDO_DATA_NOTIFYSERVICEUNKNOWN = 196, + NDO_DATA_NOTIFYSERVICEWARNING = 197, + NDO_DATA_PAGERADDRESS = 198, + NDO_DATA_PARALLELIZESERVICECHECK = 199, + NDO_DATA_PARENTHOST = 200, + NDO_DATA_PROCESSHOSTPERFORMANCEDATA = 201, + NDO_DATA_PROCESSSERVICEPERFORMANCEDATA = 202, + NDO_DATA_RETAINHOSTNONSTATUSINFORMATION = 203, + NDO_DATA_RETAINHOSTSTATUSINFORMATION = 204, + NDO_DATA_RETAINSERVICENONSTATUSINFORMATION = 205, + NDO_DATA_RETAINSERVICESTATUSINFORMATION = 206, + NDO_DATA_SERVICECHECKCOMMAND = 207, + NDO_DATA_SERVICECHECKINTERVAL = 208, + NDO_DATA_SERVICECHECKPERIOD = 209, + NDO_DATA_SERVICEDESCRIPTION = 210, + NDO_DATA_SERVICEEVENTHANDLER = 211, + NDO_DATA_SERVICEEVENTHANDLERENABLED = 212, + NDO_DATA_SERVICEFAILUREPREDICTIONENABLED = 213, + NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS = 214, + NDO_DATA_SERVICEFLAPDETECTIONENABLED = 215, + NDO_DATA_SERVICEFRESHNESSCHECKSENABLED = 216, + NDO_DATA_SERVICEFRESHNESSTHRESHOLD = 217, + NDO_DATA_SERVICEGROUPALIAS = 218, + NDO_DATA_SERVICEGROUPMEMBER = 219, + NDO_DATA_SERVICEGROUPNAME = 220, + NDO_DATA_SERVICEISVOLATILE = 221, + NDO_DATA_SERVICENOTIFICATIONCOMMAND = 222, + NDO_DATA_SERVICENOTIFICATIONINTERVAL = 223, + NDO_DATA_SERVICENOTIFICATIONPERIOD = 224, + NDO_DATA_SERVICENOTIFICATIONSENABLED = 225, + NDO_DATA_SERVICERETRYINTERVAL = 226, + NDO_DATA_SHOULDBEDRAWN = 227, + NDO_DATA_STALKHOSTONDOWN = 228, + NDO_DATA_STALKHOSTONUNREACHABLE = 229, + NDO_DATA_STALKHOSTONUP = 230, + NDO_DATA_STALKSERVICEONCRITICAL = 231, + NDO_DATA_STALKSERVICEONOK = 232, + NDO_DATA_STALKSERVICEONUNKNOWN = 233, + NDO_DATA_STALKSERVICEONWARNING = 234, + NDO_DATA_STATUSMAPIMAGE = 235, + NDO_DATA_TIMEPERIODALIAS = 236, + NDO_DATA_TIMEPERIODNAME = 237, + NDO_DATA_TIMERANGE = 238, + NDO_DATA_VRMLIMAGE = 239, + NDO_DATA_X2D = 240, + NDO_DATA_X3D = 241, + NDO_DATA_Y2D = 242, + NDO_DATA_Y3D = 243, + NDO_DATA_Z3D = 244, + NDO_DATA_CONFIGDUMPTYPE = 245, + NDO_DATA_FIRSTNOTIFICATIONDELAY = 246, + NDO_DATA_HOSTRETRYINTERVAL = 247, + NDO_DATA_NOTIFYHOSTDOWNTIME = 248, + NDO_DATA_NOTIFYSERVICEDOWNTIME = 249, + NDO_DATA_CANSUBMITCOMMANDS = 250, + NDO_DATA_FLAPDETECTIONONUP = 251, + NDO_DATA_FLAPDETECTIONONDOWN = 252, + NDO_DATA_FLAPDETECTIONONUNREACHABLE = 253, + NDO_DATA_FLAPDETECTIONONOK = 254, + NDO_DATA_FLAPDETECTIONONWARNING = 255, + NDO_DATA_FLAPDETECTIONONUNKNOWN = 256, + NDO_DATA_FLAPDETECTIONONCRITICAL = 257, + NDO_DATA_DISPLAYNAME = 258, + NDO_DATA_DEPENDENCYPERIOD = 259, + NDO_DATA_MODIFIEDCONTACTATTRIBUTE = 260, + NDO_DATA_MODIFIEDCONTACTATTRIBUTES = 261, + NDO_DATA_CUSTOMVARIABLE = 262, + NDO_DATA_HASBEENMODIFIED = 263, + NDO_DATA_CONTACT = 264, + NDO_DATA_LASTSTATE = 265, + NDO_DATA_INSTANCE = 266, + NDO_DATA_HOSTID = 267, + NDO_DATA_SERVICEID = 268, + NDO_DATA_LASTUPDATE = 269, + NDO_DATA_ACTUALENDTIME = 270, + NDO_DATA_ACTUALSTARTTIME = 271 +} + +return ndo diff --git a/stream-connectors/ndo/ndo-output.lua b/stream-connectors/ndo/ndo-output.lua new file mode 100644 index 00000000000..65d4cd8a50f --- /dev/null +++ b/stream-connectors/ndo/ndo-output.lua @@ -0,0 +1,722 @@ +local socket = require "socket" + +-- Specifying where is the module to load +package.path = package.path .. ";/usr/share/centreon-broker/lua/ndo-module.lua" +local NDO = require "ndo" + +local ndo = { + [65537] = { + id = 1, + ndo_api_id = NDO.api.NDO_API_ACKNOWLEDGEMENTDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "comment_data" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_TIMESTAMP, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_STICKY, tag = "sticky" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYCONTACTS, tag = "notify_contacts" }, + { ndo_data = NDO.data.NDO_DATA_PERSISTENT, tag = "persistent_comment" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + } + }, + [65538] = { + id = 2, + ndo_api_id = NDO.api.NDO_API_COMMENTDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTYPE, tag = "entry_type" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "expire_time" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRES, tag = "expires" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTID, tag = "internal_id" }, + { ndo_data = NDO.data.NDO_DATA_PERSISTENT, tag = "persistent" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_SOURCE, tag = "source" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "data" }, + } + }, + [65539] = { + id = 3, + ndo_api_id = NDO.api.NDO_API_RUNTIMEVARIABLES, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENMODIFIED, tag = "modified" }, + { ndo_data = NDO.data.NDO_DATA_CONFIGFILENAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "update_time" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "value" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "default_value" }, + } + }, + [65540] = { + id = 4, + ndo_api_id = NDO.api.NDO_API_CONFIGVARIABLES, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENMODIFIED, tag = "modified" }, + { ndo_data = NDO.data.NDO_DATA_CONFIGFILENAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "update_time" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "value" }, + } + }, + [65541] = { + id = 5, + ndo_api_id = NDO.api.NDO_API_DOWNTIMEDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACTUALENDTIME, tag = "actual_end_time" }, + { ndo_data = NDO.data.NDO_DATA_ACTUALSTARTTIME, tag = "actual_start_time" }, + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_DOWNTIMETYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_DURATION, tag = "duration" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_FIXED, tag = "fixed" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_DOWNTIMEID, tag = "internal_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_TRIGGEREDBY, tag = "triggered_by" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "cancelled" }, + { ndo_data = NDO.data.NDO_DATA_Y3D, tag = "started" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "comment_data" }, + } + }, + [65542] = { + id = 6, + ndo_api_id = NDO.api.NDO_API_EVENTHANDLERDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_EARLYTIMEOUT, tag = "early_timeout" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_RETURNCODE, tag = "return_code" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_TIMEOUT, tag = "timeout" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDARGS, tag = "command_args" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + } + }, + [65543] = { + id = 7, + ndo_api_id = NDO.api.NDO_API_FLAPPINGDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_COMMENTTIME, tag = "comment_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "event_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTYPE, tag = "event_type" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_HIGHTHRESHOLD, tag = "high_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTID, tag = "internal_comment_id" }, + { ndo_data = NDO.data.NDO_DATA_LOWTHRESHOLD, tag = "low_threshold" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONREASON, tag = "reason_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + } + }, + [65544] = { + id = 8, + ndo_api_id = NDO.api.NDO_API_HOSTDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIONURL, tag = "action_url" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_HOSTADDRESS, tag = "address" }, + { ndo_data = NDO.data.NDO_DATA_HOSTALIAS, tag = "alias" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSCHECKSENABLED, tag = "check_freshness" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTCHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = 0, tag = "default_active_checks" }, + { ndo_data = 0, tag = "default_event_handler_enabled" }, + { ndo_data = 0, tag = "default_failure_prediction" }, + { ndo_data = 0, tag = "default_flap_detection" }, + { ndo_data = 0, tag = "default_notify" }, + { ndo_data = 0, tag = "default_passive_checks" }, + { ndo_data = 0, tag = "default_process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_DISPLAYNAME, tag = "display_name" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FIRSTNOTIFICATIONDELAY, tag = "first_notification_delay" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONDOWN, tag = "flap_detection_on_down" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUNREACHABLE, tag = "flap_detection_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUP, tag = "flap_detection_on_up" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSTHRESHOLD, tag = "freshness_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HIGHHOSTFLAPTHRESHOLD, tag = "high_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGE, tag = "icon_image" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGEALT, tag = "icon_image_alt" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTCHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTNOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEDOWN, tag = "last_time_down" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNREACHABLE, tag = "last_time_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUP, tag = "last_time_up" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_LOWHOSTFLAPTHRESHOLD, tag = "low_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTCHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTNOTIFICATION, tag = "next_host_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTES, tag = "notes" }, + { ndo_data = NDO.data.NDO_DATA_NOTESURL, tag = "notes_url" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONINTERVAL, tag = "notification_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONPERIOD, tag = "notification_period" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTDOWN, tag = "notify_on_down" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTDOWNTIME, tag = "notify_on_downtime" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTFLAPPING, tag = "notify_on_flapping" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTRECOVERY, tag = "notify_on_recovery" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTUNREACHABLE, tag = "notify_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_host" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETAINHOSTNONSTATUSINFORMATION, tag = "retain_nonstatus_information" }, + { ndo_data = NDO.data.NDO_DATA_RETAINHOSTSTATUSINFORMATION, tag = "retain_status_information" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONDOWN, tag = "stalk_on_down" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONUNREACHABLE, tag = "stalk_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONUP, tag = "stalk_on_up" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_STATUSMAPIMAGE, tag = "statusmap_image" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65545] = { + id = 9, + ndo_api_id = NDO.api.NDO_API_HOSTCHECKDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + } + }, + [65546] = { + id = 10, + ndo_api_id = NDO.api.NDO_API_HOSTDEPENDENCYDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_DEPENDENCYPERIOD, tag = "dependency_period" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTHOSTNAME, tag = "dependent_host_id" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFAILUREPREDICTIONOPTIONS, tag = "execution_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_INHERITSPARENT, tag = "inherits_parent" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONCOMMAND, tag = "notification_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + } + }, + [65547] = { + id = 11, + ndo_api_id = NDO.api.NDO_API_HOSTGROUPDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_HOSTID, tag = "hostgroup_id" }, + } + }, + [65548] = { + id = 12, + ndo_api_id = NDO.api.NDO_API_HOSTGROUPMEMBERDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_HOSTGROUPNAME, tag = "hostgroup_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "host_id" }, + } + }, + [65549] = { + id = 13, + ndo_api_id = NDO.api.NDO_API_HOSTPARENT, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "child_id" }, + { ndo_data = NDO.data.NDO_DATA_PARENTHOST, tag = "parent_id" }, + } + }, + [65550] = { + id = 14, + ndo_api_id = NDO.api.NDO_API_HOSTSTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTCHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTCHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTNOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEDOWN, tag = "last_time_down" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNREACHABLE, tag = "last_time_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUP, tag = "last_time_up" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTCHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTNOTIFICATION, tag = "next_host_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_host" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65551] = { + id = 15, + ndo_api_id = NDO.api.NDO_API_PROCESSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "engine" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMNAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_RUNTIME, tag = "running" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSID, tag = "pid" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMSTARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMVERSION, tag = "version" }, + } + }, + [65552] = { + id = 16, + ndo_api_id = NDO.api.NDO_API_PROGRAMSTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_host_checks" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_service_checks" }, + { ndo_data = NDO.data.NDO_DATA_HOSTADDRESS, tag = "address" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSCHECKSENABLED, tag = "check_hosts_freshness" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSCHECKSENABLED, tag = "check_services_freshness" }, + { ndo_data = NDO.data.NDO_DATA_DAEMONMODE, tag = "daemon_mode" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "description" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handlers" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATE, tag = "last_alive" }, + { ndo_data = NDO.data.NDO_DATA_LASTCOMMANDCHECK, tag = "last_command_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTLOGROTATION, tag = "last_log_rotation" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_host_attributes" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_service_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notifications" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_hosts" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_services" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_host_checks" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_service_checks" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_GLOBALHOSTEVENTHANDLER, tag = "global_host_event_handler" }, + { ndo_data = NDO.data.NDO_DATA_GLOBALSERVICEEVENTHANDLER, tag = "global_service_event_handler" }, + } + }, + [65553] = { + id = 17, + ndo_api_id = NDO.api.NDO_API_COMMANDDEFINITION, + key = { + { ndo_data = 1, tag = "args" }, + { ndo_data = 2, tag = "filename" }, + { ndo_data = 3, tag = "instance_id" }, + { ndo_data = 4, tag = "loaded" }, + { ndo_data = 5, tag = "should_be_loaded" }, + } + }, + [65554] = { + id = 18, + ndo_api_id = NDO.api.NDO_API_NOTIFICATIONDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_CONTACTSNOTIFIED, tag = "contacts_notified" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_ESCALATED, tag = "escalated" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONREASON, tag = "reason_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_ACKAUTHOR, tag = "ack_author" }, + { ndo_data = NDO.data.NDO_DATA_ACKDATA, tag = "ack_data" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDNAME, tag = "command_name" }, + { ndo_data = NDO.data.NDO_DATA_CONTACTNAME, tag = "contact_name" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + } + }, + [65555] = { + id = 19, + ndo_api_id = NDO.api.NDO_API_SERVICEDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIONURL, tag = "action_url" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSCHECKSENABLED, tag = "check_freshness" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_SERVICECHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = 0, tag = "default_active_checks" }, + { ndo_data = 0, tag = "default_event_handler_enabled" }, + { ndo_data = 0, tag = "default_failure_prediction" }, + { ndo_data = 0, tag = "default_flap_detection" }, + { ndo_data = 0, tag = "default_notify" }, + { ndo_data = 0, tag = "default_passive_checks" }, + { ndo_data = 0, tag = "default_process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_DISPLAYNAME, tag = "display_name" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS, tag = "failure_prediction_options" }, + { ndo_data = NDO.data.NDO_DATA_FIRSTNOTIFICATIONDELAY, tag = "first_notification_delay" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONCRITICAL, tag = "flap_detection_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONOK, tag = "flap_detection_on_ok" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUNKNOWN, tag = "flap_detection_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONWARNING, tag = "flap_detection_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSTHRESHOLD, tag = "freshness_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HIGHSERVICEFLAPTHRESHOLD, tag = "high_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGE, tag = "icon_image" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGEALT, tag = "icon_image_alt" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEISVOLATILE, tag = "volatile" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICECHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICENOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMECRITICAL, tag = "last_time_critical" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEOK, tag = "last_time_ok" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNKNOWN, tag = "last_time_unknown" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEWARNING, tag = "last_time_warning" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_LOWSERVICEFLAPTHRESHOLD, tag = "low_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICECHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICENOTIFICATION, tag = "next_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTES, tag = "notes" }, + { ndo_data = NDO.data.NDO_DATA_NOTESURL, tag = "notes_url" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONINTERVAL, tag = "notification_interval" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONPERIOD, tag = "notification_period" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICECRITICAL, tag = "notify_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEDOWNTIME, tag = "notify_on_downtime" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEFLAPPING, tag = "notify_on_flapping" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICERECOVERY, tag = "notify_on_recovery" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEUNKNOWN, tag = "notify_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEWARNING, tag = "notify_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_service" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETAINSERVICENONSTATUSINFORMATION, tag = "retain_nonstatus_information" }, + { ndo_data = NDO.data.NDO_DATA_RETAINSERVICESTATUSINFORMATION, tag = "retain_status_information" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "description" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONCRITICAL, tag = "stalk_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONOK, tag = "stalk_on_ok" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONUNKNOWN, tag = "stalk_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONWARNING, tag = "stalk_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65556] = { + id = 20, + ndo_api_id = NDO.api.NDO_API_SERVICECHECKDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + } + }, + [65557] = { + id = 21, + ndo_api_id = NDO.api.NDO_API_SERVICEDEPENDENCYDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_DEPENDENCYPERIOD, tag = "dependency_period" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTHOSTNAME, tag = "dependent_host_id" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTSERVICEDESCRIPTION, tag = "dependent_service_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS, tag = "execution_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INHERITSPARENT, tag = "inherits_parent" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONCOMMAND, tag = "notification_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + } + }, + [65558] = { + id = 22, + ndo_api_id = NDO.api.NDO_API_SERVICEGROUPDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEID, tag = "servicegroup_id" }, + } + }, + [65559] = { + id = 23, + ndo_api_id = NDO.api.NDO_API_SERVICEGROUPMEMBERDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_SERVICEGROUPNAME, tag = "servicegroup_id" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "service_id" }, + } + }, + [65560] = { + id = 24, + ndo_api_id = NDO.api.NDO_API_SERVICESTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICECHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_description" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICECHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICENOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMECRITICAL, tag = "last_time_critical" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEOK, tag = "last_time_ok" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNKNOWN, tag = "last_time_unknown" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEWARNING, tag = "last_time_warning" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_HOSTID, tag = "hostname" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICECHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICENOTIFICATION, tag = "next_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_service" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" } + } + } +} + +local function join(tab) + table.sort(tab) + +end + +local custom_output = { + hostname = function (ndo_data, d) + return ndo_data .. "=" .. tostring(broker_cache:get_hostname(d.host_id)) .. "\n" + end, + process_perfdata = function (ndo_data, d) + return ndo_data .. "=1\n" + end, + service_description = function (ndo_data, d) + return ndo_data .. "=" .. tostring(broker_cache:get_service_description(d.host_id, d.service_id)) .. "\n" + end, + default = function (ndo_data, d) + return ndo_data .. "=0\n" + end, +} + +-- Obsolete things and some initializations +custom_output.last_notification = custom_output.default +custom_output.last_time_ok = custom_output.default +custom_output.last_time_warning = custom_output.default +custom_output.last_time_critical = custom_output.default +custom_output.last_time_unknown = custom_output.default +custom_output.next_notification = custom_output.default +custom_output.modified_attributes = custom_output.default +custom_output.failure_prediction = custom_output.default +custom_output.instance_id = custom_output.default + +local function get_ndo_msg(d) + local t = d.type + if ndo[t] then + local output = "\n" .. ndo[t].ndo_api_id .. ":\n" + local key = ndo[t].key + for i,v in ipairs(key) do + if d[v.tag] ~= nil then + local value = d[v.tag] + if type(value) == "boolean" then + if value then value = "1" else value = "0" end + end + value = tostring(value):gsub("\n", "\\n") + output = output .. v.ndo_data .. "=" .. tostring(value) .. "\n" + else + if custom_output[v.tag] then + output = output .. custom_output[v.tag](v.ndo_data, d) + else + output = output .. tostring(v.ndo_data) .. "(index=" .. i .. ") =UNKNOWN (" .. v.tag .. ")\n" + broker_log:warning(1, "The event does not contain an item " .. v.tag) + end + end + end + output = output .. NDO.api.NDO_API_ENDDATA .. "\n\n" + return output + else + return nil + end +end + +local data = { + max_row = 1, + rows = {} +} + +local function connect() + data.socket, err = socket.connect(data.ipaddr, data.port) + if not data.socket then + local msg = "Unable to establish connection on server " .. data.ipaddr .. ":" .. data.port .. ": " .. err + broker_log:error(1, msg) + end +end + +-------------------------------------------------------------------------------- +-- Initialization of the module +-- @param conf A table containing data entered by the user through the GUI +-------------------------------------------------------------------------------- +function init(conf) + -- broker_ndo initialization + broker_log:set_parameters(1, '/var/log/centreon-broker/ndo-output.log') + if conf['ipaddr'] and conf['ipaddr'] ~= "" then + data.ipaddr = conf['ipaddr'] + else + error("Unable to find the 'ipaddr' value of type 'string'") + end + + if conf['port'] and conf['port'] ~= "" then + data.port = conf['port'] + else + error("Unable to find the 'port' value of type 'number'") + end + + if conf['max-row'] then + data.max_row = conf['max-row'] + else + error("Unable to find the 'max-row' value of type 'number'") + end + connect() +end + +-------------------------------------------------------------------------------- +-- Called when the data limit count is reached. +-------------------------------------------------------------------------------- +local function flush() + if #data.rows > 0 then + if not data.socket then + connect() + end + if data.socket then + for k,v in ipairs(data.rows) do + local msg = get_ndo_msg(v) + if msg then + local l, err = data.socket:send(msg) + if not l then + broker_log:error(2, "Unable to send data to socket :" .. err) + data.socket = nil + end + else + broker_log:info(1, "Unable to write event of cat " .. v.category .. " elem " .. v.element) + end + end + data.rows = {} + end + end + return true +end + +-------------------------------------------------------------------------------- +-- Function attached to the write event. +-------------------------------------------------------------------------------- +function write(d) + if d.category ~= 1 or d.element ~= 24 then + return true + end + data.rows[#data.rows + 1] = d + + if #data.rows >= data.max_row then + return flush() + end + return true +end diff --git a/stream-connectors/omi/omi_connector.lua b/stream-connectors/omi/omi_connector.lua new file mode 100644 index 00000000000..f583ce34108 --- /dev/null +++ b/stream-connectors/omi/omi_connector.lua @@ -0,0 +1,151 @@ +-- +-- Copyright 2018 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- ipaddr (string): the ip address of the operation connector server +-- url (string): url of the operation connector endpoint +-- logfile (string): the log file to use +-- loglevel (number): th log level (0, 1, 2, 3) where 3 is the maximum level +-- port (number): the operation connector server port +-- max_size (number): how many events to store before sending them to the server. +-- max_age (number): flush the events when the specified time (in second) is reach (even if max_size is not reach). + +local http = require("socket.http") +local ltn12 = require("ltn12") + +--default values overwrite if set in the GUI (Broker stream connector output configuration) +local my_data = { + source_ci = "Centreon", + ipaddr = "192.168.56.15", + url = "/bsmc/rest/events/opscx-sdk/v1/", + logfile = "/var/log/centreon-broker/omi_connector.log", + loglevel = 2, --set log level (0, 1, 2, 3) where 3 is the maximum level + port = 30005, + max_size = 5, + max_age = 60, + flush_time = os.time(), + data = {} +} + +--initialization of parameters if set in the GUI +function init(conf) + if conf.logfile then + my_data.logfile = conf.logfile + end + if conf.loglevel then + my_data.loglevel = conf.loglevel + end + if conf.ipaddr then + my_data.ipaddr = conf.ipaddr + end + if conf.url then + my_data.url = conf.url + end + if conf.port then + my_data.port = conf.port + end + if conf.max_size then + my_data.max_size = conf.max_size + end + if conf.max_age then + my_data.max_age = conf.max_age + end + broker_log:set_parameters(my_data.loglevel, my_data.logfile) + broker_log:info(2, "init values :" .. + "\nlogfile = " .. my_data.logfile .. + "\nloglevel = " .. my_data.loglevel .. + "\nipaddr = " .. my_data.ipaddr .. + "\nurl = " .. my_data.url .. + "\nport = " .. my_data.port .. + "\nmax_size = " .. my_data.max_size .. + "\nmax_age = " .. my_data.max_age .. "\n") +end + +--called when max_size or max_age is reached +local function flush() + if #my_data.data == 0 then + broker_log:info(2, "No data to flush") + my_data.flush_time = os.time() + return true + end + local buf = table.concat(my_data.data, "\n") + local respbody = {} + local body, code, headers, status = http.request { + method = "POST", + url = "https://" .. my_data.ipaddr .. ":" .. my_data.port .. my_data.port, + source = ltn12.source.string(buf), + headers = + { + ["Content-Type"] = "Content-Type:text/xml", + ["content-length"] = string.len(buf) + }, + sink = ltn12.sink.table(respbody) + } + if code == 200 then + my_data.data = {} + broker_log:info(2, "API connexion ok : " .. tostring(code) .. "\t" .. tostring(status)) + my_data.flush_time = os.time() + return true + else + broker_log:error(0, "Could not reach API : " .. tostring(code)) + return false + end +end + +function write(d) + -- Service status + if d.category == 1 and d.element == 24 then + broker_log:info(3, "write: " .. broker.json_encode(d)) + if d.host_id and d.service_id then + local hostname = broker_cache:get_hostname(d.host_id) + local service_desc = broker_cache:get_service_description(d.host_id,d.service_id) + if not hostname or not service_desc then + broker_log:error(2, "Unknown host id : " .. d.host_id .. " Try to restart centengine") + return true + end + if d.state_type == 1 then --we keep only events in hard state + broker_log:info(3, "HARD STATE") + if d.last_hard_state_change then + if math.abs(d.last_check - d.last_hard_state_change) < 10 then --we keep only events with a state that changed from the previous check + if d.state == d.last_hard_state then + broker_log:info(3, "STATE CHANGE") + local reqbody = "\t" .. + "" .. service_desc .. "\t" .. + "" .. d.output .. "\t" .. + "" .. d.state .. "\t" .. + "" .. d.last_update .. "\t" .. + "" .. hostname .. "\t" .. + "" .. hostname .. "\t" .. + "" .. my_data.source_ci .. "\t" .. + "" .. d.service_id .. "\t" .. + "" + table.insert(my_data.data, reqbody) + end + end + end + end + end + end + if #my_data.data > my_data.max_size or os.time() - my_data.flush_time > my_data.max_age then + broker_log:info(2, "max size or flush time is reached, flushing data") + return flush() + end + return true +end From f6234fc162935c53a51bbc1fc4c7387b6905434e Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Tue, 16 Apr 2019 15:49:46 +0200 Subject: [PATCH 024/219] Various improvements to InfluxDB script (#9) --- stream-connectors/influxdb/influxdb-neb.lua | 111 ++++++++++++-------- 1 file changed, 70 insertions(+), 41 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 5a1945612ed..4271a2ca540 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -2,7 +2,7 @@ -------------------------------------------------------------------------------- -- Centreon Broker InfluxDB Connector -- Tested with versions --- 1.4.3 +-- 1.4.3, 1.7.4 -- -- References: -- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ @@ -14,11 +14,11 @@ -- You need an influxdb server -- You can install one with docker and these commands: -- docker pull influxdb --- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb -- You need to create a database -- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" -- --- The Lua-socket library is required by this script. +-- The Lua-socket and Lua-sec libraries are required by this script. -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- @@ -26,8 +26,8 @@ -- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" --data-urlencode "q=SELECT * from Cpu" -------------------------------------------------------------------------------- - local http = require("socket.http") +local https = require("ssl.https") local ltn12 = require("ltn12") -------------------------------------------------------------------------------- @@ -39,10 +39,11 @@ EventQueue.__index = EventQueue -------------------------------------------------------------------------------- -- flush() method --- Called when the max number of events or the max age are reached +-- Called when the max number of events or the max age are reached -------------------------------------------------------------------------------- + function EventQueue:flush() - broker_log:info(2, "EventQueue:flush: Concatenating all the events as one string") + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") -- we concatenate all the events local http_post_data = "" local http_result_body = {} @@ -51,8 +52,15 @@ function EventQueue:flush() end broker_log:info(2, "EventQueue:flush: HTTP POST request \"" .. self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "\"") broker_log:info(3, "EventQueue:flush: HTTP POST data are: '" .. http_post_data .. "'") - local hr_result, hr_code, hr_header, hr_s = http.request{ - url = self.http_server_protocol.."://"..self.http_server_address..":"..self.http_server_port.."/write?db="..self.influx_database, + http.TIMEOUT = self.http_timeout + local req + if self.http_server_protocol == "http" then + req = http + else + req = https + end + local hr_result, hr_code, hr_header, hr_s = req.request{ + url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "&u=" .. self.influx_username .. "&p=" .. self.influx_password, method = "POST", -- sink is where the request result's body will go sink = ltn12.sink.table(http_result_body), @@ -66,6 +74,8 @@ function EventQueue:flush() -- Handling the return code if hr_code == 204 then broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) + -- now that the data has been sent, we empty the events array + self.events = {} else broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) for i, v in ipairs(http_result_body) do @@ -73,8 +83,6 @@ function EventQueue:flush() end end - -- now that the data has been sent, we empty the events array - self.events = {} -- and update the timestamp self.__internal_ts_last_flush = os.time() end @@ -82,18 +90,24 @@ end -------------------------------------------------------------------------------- -- EventQueue:add method -- @param e An event --- -------------------------------------------------------------------------------- + function EventQueue:add(e) - broker_log:info(2, "EventQueue:add: " .. broker.json_encode(e)) - local metric = e.name - -- time is a reserved word in influxDB so I rename it - if metric == "time" then - metric = "_"..metric + broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) + -- let's get and verify we have perfdata + local perfdata = broker.parse_perfdata(e.perfdata) + if not next(perfdata) then + broker_log:info(3, "EventQueue:add: No metric") + return true end -- retrieve objects names instead of IDs local host_name = broker_cache:get_hostname(e.host_id) - local service_description = broker_cache:get_service_description(e.host_id, e.service_id) + local service_description + if e.service_id then + service_description = broker_cache:get_service_description(e.host_id, e.service_id) + else + service_description = "host-latency" + end -- what if we could not get them from cache if not host_name then broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") @@ -103,23 +117,27 @@ function EventQueue:add(e) broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") service_description = e.service_id end - -- we finally append the event to the events table - local perfdata = broker.parse_perfdata(e.perfdata) - if not next(perfdata) then - broker_log:info(3, "EventQueue:add: No metric") - return true - end -- [,=...] =[,=...] [unix-nano-timestamp] - local mess = self.measurement .. ",host=" .. host_name .. ",service=" .. service_description + local item = "" + if string.find(service_description, " ") then + item = ",item=" .. string.gsub(service_description, ".* ", "") + service_description = string.gsub(service_description, " .*", "") + end + local mess + if string.len(self.measurement) > 0 then + mess = self.measurement .. ",host=" .. host_name .. ",service=" .. service_description .. item + else + mess = service_description .. ",host=" .. host_name .. item + end local sep = " " for m,v in pairs(perfdata) do - mess = mess .. sep .. m .. "=" .. v + mess = mess .. sep .. m .. "=" .. v sep = "," end mess = mess .. " " .. e.last_check .. "000000000\n" self.events[#self.events + 1] = mess - broker_log:info(3, "EventQueue:add: adding " .. mess) + broker_log:info(3, "EventQueue:add: adding " .. mess:sub(1, -2)) -- then we check whether it is time to send the events to the receiver and flush if #self.events >= self.max_buffer_size then @@ -140,21 +158,28 @@ end -- @param conf The table given by the init() function and returned from the GUI -- @return the new EventQueue -------------------------------------------------------------------------------- + function EventQueue.new(conf) local retval = { - measurement = "centreon", - http_server_address = "", - http_server_port = 8086, - http_server_protocol = "http", - influx_database = "mydb", - max_buffer_size = 5000, - max_buffer_age = 5 + measurement = "", + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "https", + http_timeout = 5, + influx_database = "mydb", + influx_username = "", + influx_password = "", + max_buffer_size = 5000, + max_buffer_age = 30, + log_level = 0 -- already proceeded in init function } for i,v in pairs(conf) do - broker_log:warning(1, "Conf parameter " .. i .. " => " .. v) if retval[i] then - broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) retval[i] = v + if i == "influx_password" then + v = string.gsub(v, ".", "*") + end + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) else broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) end @@ -167,17 +192,21 @@ function EventQueue.new(conf) return retval end --------------------------------------------------------------------------------- - - -------------------------------------------------------------------------------- -- Required functions for Broker StreamConnector -------------------------------------------------------------------------------- local queue + -- Fonction init() function init(conf) - broker_log:set_parameters(3, "/var/log/centreon-broker/stream-connector-influxdb-neb.log") + local log_level = 3 + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + end + broker_log:set_parameters(log_level, "/var/log/centreon-broker/stream-connector-influxdb-neb.log") broker_log:info(2, "init: Beginning init() function") queue = EventQueue.new(conf) broker_log:info(2, "init: Ending init() function, Event queue created") @@ -192,8 +221,8 @@ function write(e) end -- Fonction filter() --- return true if you want to handle this type of event (category, element) ; here category NEB and element Service Status +-- return true if you want to handle this type of event (category, element) ; here category NEB and element Host or Service -- return false otherwise function filter(category, element) - return category == 1 and element == 24 + return category == 1 and (element == 14 or element == 24) end From fbab6223e8a2cbae097bdab22742898645eda3a9 Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Wed, 17 Apr 2019 09:39:36 +0200 Subject: [PATCH 025/219] Support instances in InfluxDB (#10) Thank you again @CPbN !! --- stream-connectors/influxdb/influxdb-neb.lua | 38 ++++++++++++--------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 4271a2ca540..54e2f269458 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -82,7 +82,6 @@ function EventQueue:flush() broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. " is \"" .. v .. "\"") end end - -- and update the timestamp self.__internal_ts_last_flush = os.time() end @@ -117,28 +116,32 @@ function EventQueue:add(e) broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") service_description = e.service_id end - - -- [,=...] =[,=...] [unix-nano-timestamp] + -- message format : [,=...] =[,=...] [unix-nano-timestamp] + -- consider space in service_description as a separator for an item tag local item = "" if string.find(service_description, " ") then item = ",item=" .. string.gsub(service_description, ".* ", "") service_description = string.gsub(service_description, " .*", "") end - local mess - if string.len(self.measurement) > 0 then - mess = self.measurement .. ",host=" .. host_name .. ",service=" .. service_description .. item - else - mess = service_description .. ",host=" .. host_name .. item - end - local sep = " " + -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance + local instances = {} for m,v in pairs(perfdata) do - mess = mess .. sep .. m .. "=" .. v - sep = "," + local inst = string.match(m, "(.*)#.*") + if not inst then + inst = "" + else + inst = ",inst=" .. inst + end + if not instances[inst] then + instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " + end + instances[inst] = instances[inst] .. string.gsub(m, ".*#", "") .. "=" .. v .. "," + end + -- compute final messages to push + for _,v in pairs(instances) do + self.events[#self.events + 1] = v:sub(1, -2) .. " " .. e.last_check .. "000000000" .. "\n" + broker_log:info(3, "EventQueue:add: adding " .. self.events[#self.events]:sub(1, -2)) end - mess = mess .. " " .. e.last_check .. "000000000\n" - self.events[#self.events + 1] = mess - broker_log:info(3, "EventQueue:add: adding " .. mess:sub(1, -2)) - -- then we check whether it is time to send the events to the receiver and flush if #self.events >= self.max_buffer_size then broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") @@ -180,6 +183,9 @@ function EventQueue.new(conf) v = string.gsub(v, ".", "*") end broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + if i == "measurement" then + retval[i] = retval[i] .. ",service=" + end else broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) end From 5c1985a0f45d5158ba7144ca77e289a8cf1b0330 Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Wed, 1 May 2019 00:50:23 +0200 Subject: [PATCH 026/219] Properly verify perfdata is present (#11) --- stream-connectors/influxdb/influxdb-neb.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 54e2f269458..f2d96328c31 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -95,7 +95,7 @@ function EventQueue:add(e) broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) -- let's get and verify we have perfdata local perfdata = broker.parse_perfdata(e.perfdata) - if not next(perfdata) then + if not perfdata or not next(perfdata) then broker_log:info(3, "EventQueue:add: No metric") return true end From 63699a11e59544d4cf253a3618f9fff858580cc0 Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Wed, 1 May 2019 13:32:41 +0200 Subject: [PATCH 027/219] Better handle perfdata err (#12) --- stream-connectors/influxdb/influxdb-neb.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index f2d96328c31..757ce3ec625 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -94,9 +94,9 @@ end function EventQueue:add(e) broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) -- let's get and verify we have perfdata - local perfdata = broker.parse_perfdata(e.perfdata) - if not perfdata or not next(perfdata) then - broker_log:info(3, "EventQueue:add: No metric") + local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata) + if perfdata_err then + broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) return true end -- retrieve objects names instead of IDs From 7a10fc2ae8d500297bedc8d107820e24cfb4b5e8 Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Fri, 24 May 2019 12:35:46 +0200 Subject: [PATCH 028/219] Improve InfluxDB script (#13) * Properly handle write return value * Workaround duplicate events * Add an option to skip anon events * Improve documentation --- stream-connectors/README.md | 31 ++++++++++------- stream-connectors/influxdb/influxdb-neb.lua | 38 ++++++++++++++++----- 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 21dd07e7465..22cd6856296 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -44,9 +44,9 @@ Parameters to specify in the stream connector configuration are: * elastic-port as **number**: it is the port, if not provided, this value is *9200*. * max-row as **number**: it is the max number of events before sending them to the elastic server. If not specified, its value is 100 -# Influxdb +# InfluxDB -## Influxdb from metrics events: *influxdb/influxdb-metrics.lua* +## InfluxDB from metrics events: *influxdb/influxdb-metrics.lua* This stream connector works with **metric events**. So you need them to be configured in Centreon broker. @@ -54,31 +54,36 @@ To use this script, one need to install the lua-socket library. Parameters to specify in the stream connector configuration are: -* http\_server\_address as **string**: it is the *ip address* of the Influxdb server +* http\_server\_address as **string**: it is the *ip address* of the InfluxDB server * http\_server\_port as **number**: it is the port, if not provided, this value is *8086* * http\_server\_protocol as **string**: by default, this value is *http* * influx\_database as **string**: The database name, *mydb* is the default value -* max\_buffer\_size as **number**: The number of events to stock before them to be sent to influxdb +* max\_buffer\_size as **number**: The number of events to stock before them to be sent to InfluxDB * max\_buffer\_age as **number**: The delay in seconds to wait before the next flush. if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. -## Influxdb from neb events: *influxdb/influxdb-neb.lua* +## InfluxDB from neb events: *influxdb/influxdb-neb.lua* This stream connector is an alternative to the previous one, but works with **neb service\_status events**. As those events are always available on a Centreon platform, this script should work more often. -To use this script, one need to install the lua-socket library. +To use this script, one need to install the lua-socket and lua-sec libraries. Parameters to specify in the stream connector configuration are: -* measurement as **string**: it is the influxdb *measurement* -* http\_server\_address as **string**: it is the *ip address* of the Influxdb server -* http\_server\_port as **number**: it is the port, if not provided, this value is *8086* -* http\_server\_protocol as **string**: by default, this value is *http* -* influx\_database as **string**: The database name, *mydb* is the default value -* max\_buffer\_size as **number**: The number of events to stock before them to be sent to influxdb -* max\_buffer\_age as **number**: The delay in seconds to wait before the next flush. +* measurement as **string**: the InfluxDB *measurement*, overwrites the service description if set +* http\_server\_address as **string**: the *(ip) address* of the InfluxDB server +* http\_server\_port as **number**: the port of the InfluxDB server, by default *8086* +* http\_server\_protocol as **string**: the connection scheme, by default *https* +* http\_timeout as **number**: the connection timeout, by default *5* seconds +* influx\_database as **string**: the database name, by default *mydb* +* influx\_username as **string**: the database username, no authentication performed if not set +* influx\_password as **string**: the database password, no authentication performed if not set +* max\_buffer\_size as **number**: the number of events to stock before the next flush, by default *5000* +* max\_buffer\_age as **number**: the delay to wait before the next flush, by default *30* seconds +* skip\_anon\_events as **number**: skip events without name in broker cache, by default *1* +* log\_level as **number**: log level from 1 to 3, by default *3* if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 757ce3ec625..9b7bc096004 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -72,10 +72,12 @@ function EventQueue:flush() } } -- Handling the return code + local retval = false if hr_code == 204 then broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) -- now that the data has been sent, we empty the events array self.events = {} + retval = true else broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) for i, v in ipairs(http_result_body) do @@ -84,6 +86,7 @@ function EventQueue:flush() end -- and update the timestamp self.__internal_ts_last_flush = os.time() + return retval end -------------------------------------------------------------------------------- @@ -91,13 +94,22 @@ end -- @param e An event -------------------------------------------------------------------------------- +local previous_event = "" + function EventQueue:add(e) - broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + if current_event == previous_event then + broker_log:info(3, "EventQueue:add: Duplicate event ignored.") + return false + end + previous_event = current_event + broker_log:info(3, "EventQueue:add: " .. current_event) -- let's get and verify we have perfdata local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata) if perfdata_err then broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) - return true + return false end -- retrieve objects names instead of IDs local host_name = broker_cache:get_hostname(e.host_id) @@ -110,10 +122,16 @@ function EventQueue:add(e) -- what if we could not get them from cache if not host_name then broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") + if self.skip_anon_events == 1 then + return false + end host_name = e.host_id end if not service_description then broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") + if self.skip_anon_events == 1 then + return false + end service_description = e.service_id end -- message format : [,=...] =[,=...] [unix-nano-timestamp] @@ -145,12 +163,12 @@ function EventQueue:add(e) -- then we check whether it is time to send the events to the receiver and flush if #self.events >= self.max_buffer_size then broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") - self:flush() - return true + local retval = self:flush() + return retval elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") - self:flush() - return true + local retval = self:flush() + return retval else return false end @@ -174,6 +192,7 @@ function EventQueue.new(conf) influx_password = "", max_buffer_size = 5000, max_buffer_age = 30, + skip_anon_events = 1, log_level = 0 -- already proceeded in init function } for i,v in pairs(conf) do @@ -221,9 +240,10 @@ end -- Fonction write() function write(e) broker_log:info(3, "write: Beginning write() function") - queue:add(e) - broker_log:info(3, "write: Ending write() function") - return true + local retval = queue:add(e) + broker_log:info(3, "write: Ending write() function, returning " .. tostring(retval)) + -- return true to ask broker to clear its cache, false otherwise + return retval end -- Fonction filter() From 2ab4fca795082552600a1076ce070cad874cbc60 Mon Sep 17 00:00:00 2001 From: Colin Gagnaire Date: Fri, 6 Sep 2019 10:04:22 +0000 Subject: [PATCH 029/219] enh(influxdb-neb): add log_path and influxdb_retention_policy parameter, change authen method (#14) * enh(influxdb-neb): add log_path and influxdb_retention_policy parameter, change authen method Add a log_path parameter to provide path to log file Add a influxdb_retention_policy parameter to provide RP when writing to DB Change the way credentials are provided in request * enh(doc): add new parameters for influxdb-neb script --- stream-connectors/README.md | 2 + stream-connectors/influxdb/influxdb-neb.lua | 47 +++++++++++++++------ 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 22cd6856296..0fed9f18a45 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -78,12 +78,14 @@ Parameters to specify in the stream connector configuration are: * http\_server\_protocol as **string**: the connection scheme, by default *https* * http\_timeout as **number**: the connection timeout, by default *5* seconds * influx\_database as **string**: the database name, by default *mydb* +* influx\_retention\_policy as **string**: the database retention policy, default is database's default * influx\_username as **string**: the database username, no authentication performed if not set * influx\_password as **string**: the database password, no authentication performed if not set * max\_buffer\_size as **number**: the number of events to stock before the next flush, by default *5000* * max\_buffer\_age as **number**: the delay to wait before the next flush, by default *30* seconds * skip\_anon\_events as **number**: skip events without name in broker cache, by default *1* * log\_level as **number**: log level from 1 to 3, by default *3* +* log\_path as **string**: path to log file, by default */var/log/centreon-broker/stream-connector-influxdb-neb.log* if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 9b7bc096004..65757678e65 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -2,7 +2,7 @@ -------------------------------------------------------------------------------- -- Centreon Broker InfluxDB Connector -- Tested with versions --- 1.4.3, 1.7.4 +-- 1.4.3, 1.7.4, 1.7.6 -- -- References: -- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ @@ -17,18 +17,21 @@ -- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb -- You need to create a database -- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" +-- You can eventually create a retention policy -- -- The Lua-socket and Lua-sec libraries are required by this script. -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- -- Access to the data: --- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" --data-urlencode "q=SELECT * from Cpu" +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" +-- --data-urlencode "q=SELECT * from Cpu" -------------------------------------------------------------------------------- local http = require("socket.http") local https = require("ssl.https") local ltn12 = require("ltn12") +local mime = require("mime") -------------------------------------------------------------------------------- -- EventQueue class @@ -50,7 +53,9 @@ function EventQueue:flush() for _, raw_event in ipairs(self.events) do http_post_data = http_post_data .. raw_event end - broker_log:info(2, "EventQueue:flush: HTTP POST request \"" .. self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "\"") + local url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. + "/write?db=" .. self.influx_database .. "&rp=" .. self.influx_retention_policy + broker_log:info(2, "EventQueue:flush: HTTP POST request \"" .. url .. "\"") broker_log:info(3, "EventQueue:flush: HTTP POST data are: '" .. http_post_data .. "'") http.TIMEOUT = self.http_timeout local req @@ -60,7 +65,7 @@ function EventQueue:flush() req = https end local hr_result, hr_code, hr_header, hr_s = req.request{ - url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "&u=" .. self.influx_username .. "&p=" .. self.influx_password, + url = url, method = "POST", -- sink is where the request result's body will go sink = ltn12.sink.table(http_result_body), @@ -68,7 +73,8 @@ function EventQueue:flush() source = ltn12.source.string(http_post_data), headers = { -- mandatory for POST request with body - ["content-length"] = string.len(http_post_data) + ["content-length"] = string.len(http_post_data), + ["authorization"] = "Basic " .. (mime.b64(self.influx_username .. ":" .. self.influx_password)) } } -- Handling the return code @@ -81,7 +87,8 @@ function EventQueue:flush() else broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) for i, v in ipairs(http_result_body) do - broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. " is \"" .. v .. "\"") + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. + " is \"" .. v .. "\"") end end -- and update the timestamp @@ -121,20 +128,23 @@ function EventQueue:add(e) end -- what if we could not get them from cache if not host_name then - broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") + broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. + " not found. Restarting centengine should fix this.") if self.skip_anon_events == 1 then return false end host_name = e.host_id end if not service_description then - broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") + broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. + " not found. Restarting centengine should fix this.") if self.skip_anon_events == 1 then return false end service_description = e.service_id end - -- message format : [,=...] =[,=...] [unix-nano-timestamp] + -- message format : [,=...] + -- =[,=...] [unix-nano-timestamp] -- consider space in service_description as a separator for an item tag local item = "" if string.find(service_description, " ") then @@ -162,11 +172,13 @@ function EventQueue:add(e) end -- then we check whether it is time to send the events to the receiver and flush if #self.events >= self.max_buffer_size then - broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. + " elements.") local retval = self:flush() return retval elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then - broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. + (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") local retval = self:flush() return retval else @@ -188,12 +200,14 @@ function EventQueue.new(conf) http_server_protocol = "https", http_timeout = 5, influx_database = "mydb", + influx_retention_policy = "", influx_username = "", influx_password = "", max_buffer_size = 5000, max_buffer_age = 30, skip_anon_events = 1, - log_level = 0 -- already proceeded in init function + log_level = 0, -- already proceeded in init function + log_path = "" -- already proceeded in init function } for i,v in pairs(conf) do if retval[i] then @@ -226,12 +240,16 @@ local queue -- Fonction init() function init(conf) local log_level = 3 + local log_path = "/var/log/centreon-broker/stream-connector-influxdb-neb.log" for i,v in pairs(conf) do if i == "log_level" then log_level = v end + if i == "log_path" then + log_path = v + end end - broker_log:set_parameters(log_level, "/var/log/centreon-broker/stream-connector-influxdb-neb.log") + broker_log:set_parameters(log_level, log_path) broker_log:info(2, "init: Beginning init() function") queue = EventQueue.new(conf) broker_log:info(2, "init: Ending init() function, Event queue created") @@ -247,7 +265,8 @@ function write(e) end -- Fonction filter() --- return true if you want to handle this type of event (category, element) ; here category NEB and element Host or Service +-- return true if you want to handle this type of event (category, element) ; here category NEB and element +-- Host or Service -- return false otherwise function filter(category, element) return category == 1 and (element == 14 or element == 24) From 8f42b7f3bfe9553b84caefadac9d0611cd7a858a Mon Sep 17 00:00:00 2001 From: Colin Gagnaire Date: Fri, 6 Sep 2019 13:20:24 +0000 Subject: [PATCH 030/219] feat(elastic-neb): add elastic-neb.lua script (#15) * feat(elastic-neb): add elastic-neb.lua script Handles both metrics and statuses * feat(doc): add elastic-neb section * enh(elastic-neb): add min/max/uom to metrics index * feat(doc): update elastic-neb section --- stream-connectors/README.md | 40 ++ .../elasticsearch/elastic-neb.lua | 345 ++++++++++++++++++ 2 files changed, 385 insertions(+) create mode 100644 stream-connectors/elasticsearch/elastic-neb.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 0fed9f18a45..32dfc2f2609 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -44,6 +44,46 @@ Parameters to specify in the stream connector configuration are: * elastic-port as **number**: it is the port, if not provided, this value is *9200*. * max-row as **number**: it is the max number of events before sending them to the elastic server. If not specified, its value is 100 +## Elasticsearch from NEB events: *elasticsearch/elastic-neb.lua* + +This stream connector is an alternative to the previous one, but works with **neb service\_status events**. +As those events are always available on a Centreon platform, this script should work more often. + +To use this script, one need to install the lua-socket and lua-sec libraries. + +Parameters to specify in the stream connector configuration are: + +* http\_server\_address as **string**: the *(ip) address* of the Elasticsearch server +* http\_server\_port as **number**: the port of the Elasticsearch server, by default *9200* +* http\_server\_protocol as **string**: the connection scheme, by default *http* +* http\_timeout as **number**: the connection timeout, by default *5* seconds +* filter\_type as **string**: filter events to compute, by default *metric,status* +* elastic\_index\_metric as **string**: the index name for metrics, by default *centreon_metric* +* elastic\_index\_status as **string**: the index name for status, by default *centreon_status* +* elastic\_username as **string**: the API username if set +* elastic\_password as **password**: the API password if set +* max\_buffer\_size as **number**: the number of events to stock before the next flush, by default *5000* +* max\_buffer\_age as **number**: the delay to wait before the next flush, by default *30* seconds +* skip\_anon\_events as **number**: skip events without name in broker cache, by default *1* +* log\_level as **number**: log level from 1 to 3, by default *3* +* log\_path as **string**: path to log file, by default */var/log/centreon-broker/stream-connector-elastic-neb.log* + +If one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. + +Two indices need to be created on the Elasticsearch server: +``` +curl -X PUT "http://elasticsearch/centreon_metric" -H 'Content-Type: application/json' +-d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +"instance":{"type":"keyword"},"metric":{"type":"keyword"},"value":{"type":"double"}, +"min":{"type":"double"},"max":{"type":"double"},"uom":{"type":"text"}, +"type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}' + +curl -X PUT "http://elasticsearch/centreon_status" -H 'Content-Type: application/json' +-d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +"output":{"type":"text"},"status":{"type":"keyword"},"state":{"type":"keyword"}, +"type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}'' +``` + # InfluxDB ## InfluxDB from metrics events: *influxdb/influxdb-metrics.lua* diff --git a/stream-connectors/elasticsearch/elastic-neb.lua b/stream-connectors/elasticsearch/elastic-neb.lua new file mode 100644 index 00000000000..a1c453a4bf1 --- /dev/null +++ b/stream-connectors/elasticsearch/elastic-neb.lua @@ -0,0 +1,345 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elasticsearch Connector +-- Tested with versions +-- 7.1.1 +-- +-- References: +-- https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an elasticsearch server +-- You can install one with docker: +-- docker pull elasticsearch +-- docker run -p 9200:9200 -p 9300:9300 -v $PWD:/var/lib/elasticsearch -d elasticsearch +-- You need to create two indices: +-- curl -X PUT "http://elasticsearch/centreon_metric" -H 'Content-Type: application/json' +-- -d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +-- "instance":{"type":"keyword"},"metric":{"type":"keyword"},"value":{"type":"double"}, +-- "min":{"type":"double"},"max":{"type":"double"},"uom":{"type":"text"}, +-- "type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}' +-- curl -X PUT "http://elasticsearch/centreon_status" -H 'Content-Type: application/json' +-- -d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +-- "output":{"type":"text"},"status":{"type":"keyword"},"state":{"type":"keyword"}, +-- "type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}'' +-- +-- The Lua-socket and Lua-sec libraries are required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl "http://elasticsearch/centreon_status/_search?pretty" +-------------------------------------------------------------------------------- + +local http = require("socket.http") +local https = require("ssl.https") +local ltn12 = require("ltn12") +local mime = require("mime") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_result_body = {} + local url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. + "/_bulk" + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + if raw_event.status then + http_post_data = http_post_data .. '{"index":{"_index":"' .. self.elastic_index_status .. '"}}\n' .. + broker.json_encode(raw_event) .. '\n' + end + if raw_event.metric then + http_post_data = http_post_data .. '{"index":{"_index":"' .. self.elastic_index_metric .. '"}}\n' .. + broker.json_encode(raw_event) .. '\n' + end + end + broker_log:info(2, "EventQueue:flush: HTTP POST url: \"" .. url .. "\"") + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + http.TIMEOUT = self.http_timeout + local req + if self.http_server_protocol == "http" then + req = http + else + req = https + end + local hr_result, hr_code, hr_header, hr_s = req.request{ + url = url, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-type"] = "application/x-ndjson", + ["content-length"] = string.len(http_post_data), + ["authorization"] = "Basic " .. (mime.b64(self.elastic_username .. ":" .. self.elastic_password)) + } + } + + -- Handling the return code + local retval = false + if hr_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. + " is \"" .. v .. "\"") + end + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +local previous_event = "" + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. + " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. + " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + +function EventQueue:add(e) + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + if current_event == previous_event then + broker_log:info(3, "EventQueue:add: Duplicate event ignored.") + return false + end + previous_event = current_event + + broker_log:info(3, "EventQueue:add: " .. current_event) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events == 1 then return false end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events == 1 then return false end + end + end + + if string.match(self.filter_type, "status") then + self.events[#self.events + 1] = { + timestamp = e.last_check, + host = hostname, + service = service_description, + output = string.match(e.output, "^(.*)\n"), + status = e.state, + state = e.state_type, + type = type + } + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: timestamp = " .. + self.events[#self.events].timestamp) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: host = " .. + self.events[#self.events].host) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: service = " .. + self.events[#self.events].service) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: output = " .. + self.events[#self.events].output) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: status = " .. + self.events[#self.events].status) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: state = " .. + self.events[#self.events].state) + end + if string.match(self.filter_type, "metric") then + local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata, true) + if perfdata_err then + broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) + return false + end + + for m,v in pairs(perfdata) do + local instance = string.match(m, "(.*)#.*") + if not instance then + instance = "" + end + + local perfval = { + value = "", + min = "", + max = "", + uom = "" + } + for i,d in pairs(perfdata[m]) do + perfval[i] = d + end + self.events[#self.events + 1] = { + timestamp = e.last_check, + host = hostname, + service = service_description, + instance = instance, + metric = string.gsub(m, ".*#", ""), + value = perfval.value, + min = perfval.min, + max = perfval.max, + uom = perfval.uom, + type = type + } + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: timestamp = " .. + self.events[#self.events].timestamp) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: host = " .. + self.events[#self.events].host) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: service = " .. + self.events[#self.events].service) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: instance = " .. + self.events[#self.events].instance) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: metric = " .. + self.events[#self.events].metric) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: value = " .. + self.events[#self.events].value) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: min = " .. + self.events[#self.events].min) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: max = " .. + self.events[#self.events].max) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: uom = " .. + self.events[#self.events].uom) + end + end + + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. + " elements.") + local retval = self:flush() + return retval + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + if #self.events > 0 then + broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. + (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + local retval = self:flush() + return retval + end + return false + else + return false + end +end + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_address = "", + http_server_port = 9200, + http_server_protocol = "http", + http_timeout = 5, + elastic_username = "", + elastic_password = "", + elastic_index_metric = "centreon_metric", + elastic_index_status = "centreon_status", + filter_type = "metric,status", + max_buffer_size = 5000, + max_buffer_age = 30, + log_level = 0, -- already proceeded in init function + log_path = "", -- already proceeded in init function + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == "elastic_password" then + v = string.gsub(v, ".", "*") + end + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 3 + local log_path = "/var/log/centreon-broker/stream-connector-elastic-neb.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + local retval = queue:add(e) + broker_log:info(3, "write: Ending write() function, returning " .. tostring(retval)) + -- return true to ask broker to clear its cache, false otherwise + return retval +end + +-- Fonction filter() +-- return true if category NEB and elements Host or Service +-- return false otherwise +function filter(category, element) + return category == 1 and (element == 14 or element == 24) +end From c35b7b7383024a1d6921fa03f49b39acc21fb769 Mon Sep 17 00:00:00 2001 From: SAD Date: Sun, 13 Oct 2019 23:13:07 +0200 Subject: [PATCH 031/219] Update omi_connector.lua (#16) fix url value in post request --- stream-connectors/omi/omi_connector.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/omi/omi_connector.lua b/stream-connectors/omi/omi_connector.lua index f583ce34108..1132393b333 100644 --- a/stream-connectors/omi/omi_connector.lua +++ b/stream-connectors/omi/omi_connector.lua @@ -89,7 +89,7 @@ local function flush() local respbody = {} local body, code, headers, status = http.request { method = "POST", - url = "https://" .. my_data.ipaddr .. ":" .. my_data.port .. my_data.port, + url = "https://" .. my_data.ipaddr .. ":" .. my_data.port .. my_data.url, source = ltn12.source.string(buf), headers = { From bfb48ac7669c370a29ca2742e091cebfe68f10b0 Mon Sep 17 00:00:00 2001 From: CPbN <40244829+CPbN@users.noreply.github.com> Date: Sun, 13 Oct 2019 23:16:14 +0200 Subject: [PATCH 032/219] Escape / replace spec chars (#17) * Escape / replace spec chars * Typo --- stream-connectors/influxdb/influxdb-neb.lua | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 65757678e65..8f73729fd86 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -145,12 +145,14 @@ function EventQueue:add(e) end -- message format : [,=...] -- =[,=...] [unix-nano-timestamp] + -- some characters [ ,=] must be escaped, let's replace them by _ for better handling -- consider space in service_description as a separator for an item tag local item = "" - if string.find(service_description, " ") then - item = ",item=" .. string.gsub(service_description, ".* ", "") - service_description = string.gsub(service_description, " .*", "") + if string.find(service_description, " [^ ]+$") then + item = ",item=" .. string.gsub(string.gsub(service_description, ".* ", "", 1), "[ ,=]+" ,"_") + service_description = string.gsub(service_description, " +[^ ]+$", "", 1) end + service_description = string.gsub(service_description, "[ ,=]+" ,"_") -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance local instances = {} for m,v in pairs(perfdata) do @@ -158,7 +160,7 @@ function EventQueue:add(e) if not inst then inst = "" else - inst = ",inst=" .. inst + inst = ",inst=" .. string.gsub(inst, "[ ,=]+" ,"_") end if not instances[inst] then instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " From ac70a31b9a85beef602bad2377f406845d26fec2 Mon Sep 17 00:00:00 2001 From: Thomas Arnaud <38663853+Nohzoh@users.noreply.github.com> Date: Thu, 21 Nov 2019 10:07:50 +0100 Subject: [PATCH 033/219] feat(warp10): add host and srvc groups (#18) add host and service groups as labels of measures --- stream-connectors/warp10/export-warp10.lua | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/stream-connectors/warp10/export-warp10.lua b/stream-connectors/warp10/export-warp10.lua index f9a831de5a8..5878c0d8f1d 100644 --- a/stream-connectors/warp10/export-warp10.lua +++ b/stream-connectors/warp10/export-warp10.lua @@ -80,12 +80,27 @@ function write(d) broker_log:error(0, "You should restart engine to fill the cache") return true end + local labels = "hostname=" .. host .. ",service=" .. service .. ',' + local sgroups = broker_cache:get_servicegroups(d.host_id, d.service_id) + if sgroups and #sgroups > 0 then + grps = "" + for idx = 1, #sgroups do + grps = grps .. sgroups[idx].group_name .. ' ' + end + labels = labels .. "service_groups=" .. grps .. ',' + end + local hgroups = broker_cache:get_hostgroups(d.host_id) + if hgroups and #hgroups > 0 then + grps = "" + for idx = 1, #hgroups do + grps = grps .. hgroups[idx].group_name .. ' ' + end + labels = labels .. "host_groups=" .. grps .. ',' + end for metric,v in pairs(pd) do local line = tostring(d.last_update) .. "000000// " .. metric - .. "{" .. "host=" .. host - .. ",service=" .. service - .. "} " + .. "{" .. labels .. "} " .. tostring(v) table.insert(my_data.data, line) broker_log:info(0, "New line added to data: '" .. line .. "'") From 139921a7490bed30a77a3613a67c754dfeb46241 Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Fri, 22 May 2020 10:27:43 +0200 Subject: [PATCH 034/219] InfluxDB enhancements (#21) * enh(influxdb): Add replacement_character option * enh(influxdb): Only consider new perfdata --- stream-connectors/influxdb/influxdb-neb.lua | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 8f73729fd86..4562f9fd5fd 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -146,26 +146,30 @@ function EventQueue:add(e) -- message format : [,=...] -- =[,=...] [unix-nano-timestamp] -- some characters [ ,=] must be escaped, let's replace them by _ for better handling - -- consider space in service_description as a separator for an item tag + -- consider last space in service_description as a separator for an item tag local item = "" if string.find(service_description, " [^ ]+$") then - item = ",item=" .. string.gsub(string.gsub(service_description, ".* ", "", 1), "[ ,=]+" ,"_") + item = ",item=" .. string.gsub(string.gsub(service_description, ".* ", "", 1), "[ ,=]+", self.replacement_character) service_description = string.gsub(service_description, " +[^ ]+$", "", 1) end - service_description = string.gsub(service_description, "[ ,=]+" ,"_") + service_description = string.gsub(service_description, "[ ,=]+", self.replacement_character) -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance + -- consider new perfdata (dot-separated metric names) only (of course except for host-latency) local instances = {} for m,v in pairs(perfdata) do - local inst = string.match(m, "(.*)#.*") + local inst, metric = string.match(m, "(.+)#(.+)") if not inst then inst = "" + metric = m else - inst = ",inst=" .. string.gsub(inst, "[ ,=]+" ,"_") + inst = ",inst=" .. string.gsub(inst, "[ ,=]+", self.replacement_character) end - if not instances[inst] then - instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " + if not e.service_id or string.match(metric, ".+[.].+") then + if not instances[inst] then + instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " + end + instances[inst] = instances[inst] .. metric .. "=" .. v .. "," end - instances[inst] = instances[inst] .. string.gsub(m, ".*#", "") .. "=" .. v .. "," end -- compute final messages to push for _,v in pairs(instances) do @@ -208,6 +212,7 @@ function EventQueue.new(conf) max_buffer_size = 5000, max_buffer_age = 30, skip_anon_events = 1, + replacement_character = "_", log_level = 0, -- already proceeded in init function log_path = "" -- already proceeded in init function } From d9cf86555fd2deca7b2f23667325d0bf62d47737 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Mon, 8 Jun 2020 14:40:54 +0200 Subject: [PATCH 035/219] Ome fix omi doc and minor improvement (#19) * doc: wrong path for OMI in README.md * enh: lighter init log (one-liner) * enh: flush now called when data sizeor age is reached, not exceeded --- stream-connectors/README.md | 2 +- stream-connectors/omi/omi_connector.lua | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 32dfc2f2609..a3fa73d9d84 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -261,7 +261,7 @@ By default logs are in /var/log/centreon-broker/ndo-output.log # OMI -## stream connector for HP OMI : *ndo/ndo-output.lua* +## stream connector for HP OMI : *omi/omi_connector.lua* Create a broker output for HP OMI Connector diff --git a/stream-connectors/omi/omi_connector.lua b/stream-connectors/omi/omi_connector.lua index 1132393b333..c299f307fae 100644 --- a/stream-connectors/omi/omi_connector.lua +++ b/stream-connectors/omi/omi_connector.lua @@ -69,13 +69,13 @@ function init(conf) end broker_log:set_parameters(my_data.loglevel, my_data.logfile) broker_log:info(2, "init values :" .. - "\nlogfile = " .. my_data.logfile .. - "\nloglevel = " .. my_data.loglevel .. - "\nipaddr = " .. my_data.ipaddr .. - "\nurl = " .. my_data.url .. - "\nport = " .. my_data.port .. - "\nmax_size = " .. my_data.max_size .. - "\nmax_age = " .. my_data.max_age .. "\n") + " logfile = " .. my_data.logfile .. + " loglevel = " .. my_data.loglevel .. + " ipaddr = " .. my_data.ipaddr .. + " url = " .. my_data.url .. + " port = " .. my_data.port .. + " max_size = " .. my_data.max_size .. + " max_age = " .. my_data.max_age .. "\n") end --called when max_size or max_age is reached @@ -143,7 +143,7 @@ function write(d) end end end - if #my_data.data > my_data.max_size or os.time() - my_data.flush_time > my_data.max_age then + if #my_data.data >= my_data.max_size or os.time() - my_data.flush_time >= my_data.max_age then broker_log:info(2, "max size or flush time is reached, flushing data") return flush() end From fdfd3c07fc3bfaf92a1d43131a98df4318405723 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Mon, 8 Jun 2020 14:57:19 +0200 Subject: [PATCH 036/219] feat(pagerduty): adding a connector to PagerDuty based on lua-curl (#22) * feat(pagerduty): adding a connector to PagerDuty based on lua-curl * fix(pagerduty): making ifnil* functions local instead of global --- stream-connectors/README.md | 94 ++++++ stream-connectors/pagerduty/pagerduty.lua | 386 ++++++++++++++++++++++ 2 files changed, 480 insertions(+) create mode 100644 stream-connectors/pagerduty/pagerduty.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index a3fa73d9d84..78cb00deb24 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -30,6 +30,7 @@ Here is a list of the available scripts: * [ServiceNow](#service-now) * [NDO](#NDO) * [HP OMI](#OMI) +* [PagerDuty](#PagerDuty) # Elasticsearch @@ -273,3 +274,96 @@ Parameters to specify in the broker output web ui are: * loglevel as **number** : the log level (0, 1, 2, 3) where 3 is the maximum level * max_size as **number** : how many events to store before sending them to the server * max_age as **number** : flush the events when the specified time (in second) is reach (even if max_size is not reach) + +# PagerDuty + +## Installation / prerequisites + +The `lua-curl` and `luatz` libraries are required by this script: + +```bash +yum install -y lua-curl epel-release +yum install -y luarocks +luarocks install luatz +``` + +Then copy the `pagerduty.lua` script to `/usr/share/centreon-broker/lua`. + +## Configuration + +### Minimal configuration + +Here are the steps to configure your stream connector: + +* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. +* Name it as wanted and set the right path: + +| Name | pagerduty | +| ---- | -------------------------------------------- | +| Path | /usr/share/centreon-broker/lua/pagerduty.lua | + +* Add at least one string parameter containing your PagerDuty routing key/token. + +| Type | String | +| ----------------- | -------------------- | +| `pdy_routing_key` | `` | + +Thats all for now! + +Then save your configuration, export it and restart the broker daemon: + +```bash +systemctl restart cbd +``` + +### Advanced configuration + +#### Proxy + +If your Centreon central server has no direct access to PagerDuty but needs a proxy server, you will have to add a new string parameter: + +| Type | String | +| ------------------- | ------------------------------- | +| `http_proxy_string` | `http://your.proxy.server:3128` | + +#### Centreon URL + +In order to have working links/URL in your PagerDuty events, you are encouraged to add this parameter: + +| Type | String | +| ------------------ | ----------------------------- | +| `pdy_centreon_url` | `http://your.centreon.server` | + +#### Log level / file + +The default value of 2 is fine for initial troubleshooting, but generates a huge amount of logs if you have a lot of hosts. In order to get less log messages, you are should add this parameter: + +| Type | Number | +| ----------- | ------ | +| `log_level` | 1 | + +The default log file is `/var/log/centreon-broker/stream-connector-pagerduty.log`. If it does not suit you, you can set it with the `log_path` parameter: + +| Type | String | +| ---------- | ---------------------------------------------- | +| `log_path` | `/var/log/centreon-broker/my-custom-logfile.log` | + + +#### Buffer size / age + +In case you want to tune the maximum number of events sent in a row for optimization purpose, you may add this parameter: + +| Type | Number | +| ----------------- | ------------------ | +| `max_buffer_size` | 10 (default value) | + + +In case you want to shorten the delay (in seconds) between the reception of an event and its transmission to PagerDuty, you can set this parameter: + +| Type | Number | +| ---------------- | ------------------ | +| `max_buffer_age` | 30 (default value) | + + + + diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/pagerduty/pagerduty.lua new file mode 100644 index 00000000000..693e1cdd7ca --- /dev/null +++ b/stream-connectors/pagerduty/pagerduty.lua @@ -0,0 +1,386 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker PagerDuty Connector +-- Tested with the public API on the developer platform: +-- https://events.pagerduty.com/v2/enqueue +-- +-- References: +-- https://developer.pagerduty.com/api-reference/reference/events-v2/openapiv3.json/paths/~1enqueue/post +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- +-- You need a PagerDuty instance +-- You need your instance's routing_key. According to the page linked above: "The GUID of one of your Events API V2 integrations. This is the "Integration Key" listed on the Events API V2 integration's detail page." +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] pdy_routing_key: see above, this will be your authentication token +-- [RECOMMENDED] pdy_centreon_url: in order to get links/url that work in your events +-- [RECOMMENDED] log_level: level of verbose. Default is 2 but in production 1 is the recommended value. +-- [OPTIONAL] http_server_url: default "https://events.pagerduty.com/v2/enqueue" +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp + +-- Global variables +local previous_event = "" +-- Nagios states to Pagerduty severity conversion table +local from_state_to_severity = { "info", "warning", "critical", "error" } + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "https://events.pagerduty.com/v2/enqueue", + http_proxy_string = "", + http_timeout = 5, + pdy_routing_key = "Please fill pdy_routing_key in StreamConnector parameter", + pdy_centreon_url = "http://set.pdy_centreon_url.parameter", + filter_type = "metric,status", + max_buffer_size = 10, + max_buffer_age = 30, + log_level = 2, -- already processed in init function + log_path = "", -- already processed in init function + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hotsname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local pdy_dedup_key + if e.service_id then --to remain consistent in the alert handling even in the event of the loss of the broker cache, we should use the ids to link the events + pdy_dedup_key = e.host_id .. "_" .. e.service_id + else + pdy_dedup_key = e.host_id .. "_H" + end + + -- converting epoch timestamp to UTC time in RFC3339 + local pdy_timestamp = new_from_timestamp(e.last_update):rfc_3339() + broker_log:info(3, "EventQueue:add: Timestamp converted from " .. e.last_update .. " to \"" .. pdy_timestamp .. "\"") + + -- converting e.state into PagerDuty severity + -- from_state_to_severity maps between 'critical', 'warning', 'error' or 'info' and e.state. WARNING: if info then "event_action" is not "trigger" but "resolve" + local pdy_severity = ifnil_or_empty(from_state_to_severity[e.state + 1], 'error') + broker_log:info(3, "EventQueue:add: Severity converted from " .. e.state .. " to \"" .. pdy_severity .. "\"") + + -- handling empty output (empty "summary" cannot be sent to PagerDuty) + local pdy_summary = ifnil_or_empty(string.match(e.output, "^(.*)\n"), 'no output') + + -- basic management of "class" attribute + local pdy_class + if e.service_id then + pdy_class = "service" + else + pdy_class = "host" + end + + -- managing "event_action" (trigger/resolve) + local pdy_event_action + if pdy_severity == "info" then + pdy_event_action = "resolve" + else + pdy_event_action = "trigger" + end + broker_log:info(3, "EventQueue:add: Since severity is \"" .. pdy_severity .. "\", event_action is \"" .. pdy_event_action .. "\"") + + -- FIXME: managing perfdata + if e.perfdata then + broker_log:info(3, "EventQueue:add: Perfdata " .. broker.json_encode(e.perfdata) .. " ") + end + + -- FIXME: customize usage of "group" + + -- Appending the current event to the queue + self.events[#self.events + 1] = { + payload = { + summary = pdy_summary, + timestamp = pdy_timestamp, + severity = pdy_severity, + source = hostname, + component = service_description, + --group, FIXME: get hostgroup matching a filter for pdy? + class = pdy_class + --custom_details = { + --metric1 = value1, ... + --} + }, + routing_key = self.pdy_routing_key, + dedup_key = pdy_dedup_key, + event_action = pdy_event_action, + client = "Centreon Stream Connector", + client_url = self.pdy_centreon_url, + links = { + { + href = self.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. hostname, + text = "Link to host summary." + } + } + --images = { + --{ + --src = "https://chart.googleapis.com/chart?chs=600x400&chd=t:6,2,9,5,2,5,7,4,8,2,1&cht=lc&chds=a&chxt=y&chm=D,0033FF,0,0,5,1", + --href = "https://google.com", + --alt = "An example link with an image" + --} + --} + } + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "accept: application/vnd.pagerduty+json;version=2", + "content-type: application/json", + --"authorization: Token token=" .. self.pdy_routing_key, + "x-routing-key: " .. self.pdy_routing_key + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 202 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-pagerduty.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting PagerDuty StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + From 43bda970752df8f6c0f71b577452e2f15190329f Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Sun, 5 Jul 2020 18:28:37 +0200 Subject: [PATCH 037/219] enh(influx) Support perfdate perfdata (#23) * enh(influx) Support perfdate perfdata * Typo --- stream-connectors/influxdb/influxdb-neb.lua | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 4562f9fd5fd..14f8b16c452 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -156,6 +156,7 @@ function EventQueue:add(e) -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance -- consider new perfdata (dot-separated metric names) only (of course except for host-latency) local instances = {} + local perfdate = e.last_check for m,v in pairs(perfdata) do local inst, metric = string.match(m, "(.+)#(.+)") if not inst then @@ -169,11 +170,13 @@ function EventQueue:add(e) instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " end instances[inst] = instances[inst] .. metric .. "=" .. v .. "," + elseif metric == "perfdate" then + perfdate = v end end -- compute final messages to push for _,v in pairs(instances) do - self.events[#self.events + 1] = v:sub(1, -2) .. " " .. e.last_check .. "000000000" .. "\n" + self.events[#self.events + 1] = v:sub(1, -2) .. " " .. perfdate .. "000000000" .. "\n" broker_log:info(3, "EventQueue:add: adding " .. self.events[#self.events]:sub(1, -2)) end -- then we check whether it is time to send the events to the receiver and flush From d195ee6f2baf7c4ada21f65dbc20fafa2315dbc9 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Tue, 21 Jul 2020 17:29:44 +0200 Subject: [PATCH 038/219] enh(pagerduty): improving various defaut param values + handling metrics (#24) --- stream-connectors/pagerduty/pagerduty.lua | 35 ++++++++++++++--------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/pagerduty/pagerduty.lua index 693e1cdd7ca..3219c316afb 100644 --- a/stream-connectors/pagerduty/pagerduty.lua +++ b/stream-connectors/pagerduty/pagerduty.lua @@ -96,10 +96,8 @@ function EventQueue.new(conf) pdy_routing_key = "Please fill pdy_routing_key in StreamConnector parameter", pdy_centreon_url = "http://set.pdy_centreon_url.parameter", filter_type = "metric,status", - max_buffer_size = 10, - max_buffer_age = 30, - log_level = 2, -- already processed in init function - log_path = "", -- already processed in init function + max_buffer_size = 1, + max_buffer_age = 5, skip_anon_events = 1 } for i,v in pairs(conf) do @@ -185,9 +183,20 @@ function EventQueue:add(e) end broker_log:info(3, "EventQueue:add: Since severity is \"" .. pdy_severity .. "\", event_action is \"" .. pdy_event_action .. "\"") - -- FIXME: managing perfdata + -- Managing perfdata + local custom_details = {} if e.perfdata then - broker_log:info(3, "EventQueue:add: Perfdata " .. broker.json_encode(e.perfdata) .. " ") + broker_log:info(3, "EventQueue:add: Perfdata list: " .. broker.json_encode(e.perfdata) .. " ") + -- Case when the perfdata name is delimited with simple quotes: spaces allowed + for metric_name, metric_value in e.perfdata:gmatch("%s?'(.+)'=(%d+[%a]?);?[%W;]*%s?") do + broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) + custom_details[metric_name] = metric_value + end + -- Case when the perfdata name is NOT delimited with simple quotes: no spaces allowed + for metric_name, metric_value in e.perfdata:gmatch("%s?([^'][%S]+[^'])=(%d+[%a]?);?[%W;]*%s?") do + broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) + custom_details[metric_name] = metric_value + end end -- FIXME: customize usage of "group" @@ -200,11 +209,9 @@ function EventQueue:add(e) severity = pdy_severity, source = hostname, component = service_description, - --group, FIXME: get hostgroup matching a filter for pdy? - class = pdy_class - --custom_details = { - --metric1 = value1, ... - --} + --group, FIXME: get hostgroup matching a filter for PagerDuty? + class = pdy_class, + custom_details = custom_details }, routing_key = self.pdy_routing_key, dedup_key = pdy_dedup_key, @@ -272,6 +279,7 @@ function EventQueue:flush() end -- adding the HTTP POST data + broker_log:info(3, "EventQueue:flush: POST data: '" .. http_post_data .. "'") http_request:setopt_postfields(http_post_data) -- performing the HTTP request @@ -288,8 +296,7 @@ function EventQueue:flush() self.events = {} retval = true else - broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) - broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code .. " message is:\n\"" .. http_response_body .. "\n\"\n") end -- and update the timestamp self.__internal_ts_last_flush = os.time() @@ -303,7 +310,7 @@ local queue -- Fonction init() function init(conf) - local log_level = 1 + local log_level = 2 local log_path = "/var/log/centreon-broker/stream-connector-pagerduty.log" for i,v in pairs(conf) do if i == "log_level" then From 2503664113f371aeec36d15d502f8a3aa9b56678 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Thu, 23 Jul 2020 15:48:58 +0200 Subject: [PATCH 039/219] enh(pagerduty): Update pager duty (#26) * enh(pagerduty): improving various defaut param values + handling metrics * enh(pagerduty): handle the case when the event queue is already full before appending a new event --- stream-connectors/pagerduty/pagerduty.lua | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/pagerduty/pagerduty.lua index 3219c316afb..f4091403691 100644 --- a/stream-connectors/pagerduty/pagerduty.lua +++ b/stream-connectors/pagerduty/pagerduty.lua @@ -337,6 +337,13 @@ function write(e) queue:flush() end + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + return queue:flush() + end + -- Here come the filters -- Host/service status only if not (e.category == 1 and (e.element == 24 or e.element == 14)) then From 771b45b4fa75ffd434c07be2a03097a1441888fc Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Thu, 23 Jul 2020 17:38:40 +0200 Subject: [PATCH 040/219] Ome add bsm connector wip (#20) * doc: wrong path for OMI in README.md * enh: lighter init log (one-liner) * enh: flush now called when data sizeor age is reached, not exceeded * (feat+doc): Add HP BSM connector, forked from OMI * enh/refacto(bsm): a lot of changes * use of luacurl * improving reliability * fix(merge): restoring OMI doc that was deleted during bad merge in github WUI --- stream-connectors/README.md | 57 +++- stream-connectors/bsm/bsm_connector.lua | 369 ++++++++++++++++++++++++ 2 files changed, 417 insertions(+), 9 deletions(-) create mode 100644 stream-connectors/bsm/bsm_connector.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 78cb00deb24..2410f7faa5b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -268,12 +268,54 @@ Create a broker output for HP OMI Connector Parameters to specify in the broker output web ui are: -* ipaddr as **string**: the ip address of the listening server -* port as **number**: the listening server port -* logfile as **string**: where to send logs -* loglevel as **number** : the log level (0, 1, 2, 3) where 3 is the maximum level -* max_size as **number** : how many events to store before sending them to the server -* max_age as **number** : flush the events when the specified time (in second) is reach (even if max_size is not reach) +* `ipaddr` as **string**: the ip address of the listening server +* `port` as **number**: the listening server port +* `logfile` as **string**: where to send logs +* `loglevel` as **number**: the log level (0, 1, 2, 3) where 3 is the maximum level +* `max_size` as **number**: how many events to store before sending them to the server +* `max_age` as **number**: flush the events when the specified time (in second) is reach (even if `max_size` is not reach) + +# BSM + +## Installation + +Login as `root` on the Centreon central server using your favorite SSH client. + +In case your Centreon central server must use a proxy server to reach the Internet, you will have to export the `https_proxy` environment variable and configure `yum` to be able to install everything. + +```bash +export https_proxy=http://my.proxy.server:3128 +echo "proxy=http://my.proxy.server:3128" >> /etc/yum.conf +``` + +Now that your Centreon central server is able to reach the Internet, you can run: + +```bash +yum install -y lua-curl +``` + +These packages are necessary for the script to run. Now let's download the script: + +```bash +wget -O /usr/share/centreon-broker/lua/bsm_connector.lua https://raw.githubusercontent.com/centreon/centreon-stream-connector-scripts/master/bsm/bsm_connector.lua +chmod 644 /usr/share/centreon-broker/lua/bsm_connector.lua +``` + +The BSM StreamConnnector is now installed on your Centreon central server! + +## Configuration + +Create a broker output for HP BSM Connector. + +Parameters to specify in the broker output WUI are: + +* `source_ci` (string): Name of the transmiter, usually Centreon server name +* `http_server_url` (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. +* `http_proxy_string` (string): the full proxy URL if needed to reach the BSM server. Default: empty. +* `log_path` (string): the log file to use +* `log_level` (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbose. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. +* `max_buffer_size` (number): how many events to store before sending them to the server. +* `max_buffer_age` (number): flush the events when the specified time (in second) is reached (even if `max_buffer_size` is not reached). # PagerDuty @@ -364,6 +406,3 @@ In case you want to shorten the delay (in seconds) between the reception of an e | ---------------- | ------------------ | | `max_buffer_age` | 30 (default value) | - - - diff --git a/stream-connectors/bsm/bsm_connector.lua b/stream-connectors/bsm/bsm_connector.lua new file mode 100644 index 00000000000..a20f1b5d757 --- /dev/null +++ b/stream-connectors/bsm/bsm_connector.lua @@ -0,0 +1,369 @@ +-- +-- Copyright 2018 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- http_server_url (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. +-- http_proxy_string (string): the full proxy URL if needed to reach the BSM server. Default: empty. +-- log_path (string): the log file to use +-- log_level (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbose. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. +-- max_buffer_size (number): how many events to store before sending them to the server. +-- max_buffer_age (number): flush the events when the specified time (in second) is reached (even if max_size is not reached). + +-- Libraries +local curl = require "cURL" + +-- workaround https://github.com/centreon/centreon-broker/issues/201 +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if not var or var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + source_ci = "Centreon", + http_server_url = "https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/", + http_proxy_string = "", + http_timeout = 10, + filter_type = "metric,status", + filter_hostgroups = "", + max_output_length = 1024, + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {} + -- Storing the allowed hostgroups in an array + retval.filter_hostgroups_array = {} + if retval.filter_hostgroups and retval.filter_hostgroups ~= "" then + filter_hostgroups_regex = "^(" + for hg in string.gmatch(retval.filter_hostgroups, "([^,]+)") do + table.insert(retval.filter_hostgroups_array, hg) + end + broker_log:info(3, "EventQueue.new: Allowed hostgroups are: " .. table.concat(retval.filter_hostgroups_array, ' - ')) + end + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = "Meta" + if e.host_id then + hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(1, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + end + + local service_description = "host" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(1, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + elseif hostname == "Meta" then + service_description = e.output + end + + -- Getting the host extended information + local xml_url = '' + local xml_notes = '' + local xml_service_severity = '' + local xml_host_severity = '' + if e.host_id then + xml_host_severity = "" .. ifnil(broker_cache:get_severity(e.host_id), '0') .. "" + if e.service_id then + xml_url = ifnil(broker_cache:get_notes_url(e.host_id, e.service_id), 'no notes url for this service') + xml_service_severity = "" ..ifnil(broker_cache:get_severity(e.host_id, e.service_id), '0') .. "" + else + xml_url = ifnil(broker_cache:get_action_url(e.host_id), 'no action url for this host') + xml_notes = "" .. ifnil(broker_cache:get_notes(e.host_id), 'OS not set') .. "" + end + end + + -- Event to send + local event_to_send = "" + + -- Host and Service Status + event_to_send = "" .. + "" .. service_description .. "" .. + "" .. string.match(e.output, "^(.*)\n") .. "" .. + "" .. e.state .. "" .. + "" .. e.last_update .. "" .. + "" .. hostname .. "" --.. + xml_host_severity .. + xml_service_severity .. + xml_notes .. + "" .. xml_url .. "" .. + "" .. ifnil(self.source_ci, 'Centreon') .. "" .. + "" .. ifnil(e.host_id, '0') .. "" .. + "" .. ifnil(e.service_id, '0') .. "" .. + "" .. ifnil(e.scheduled_downtime_depth, '0') .. "" .. + "" + + -- Appending to the event queue + self.events[#self.events + 1] = event_to_send + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = table.concat(self.events, "") + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt(curl.OPT_SSL_VERIFYPEER, 0) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "Content-Type: Content-Type:text/xml", + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 202 or http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code .. " message is:\n\"" .. http_response_body .. "\"\n") + end + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-bsm.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting BSM StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check whether the event queue is already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, flushing data after a 1s sleep.") + os.execute("sleep " .. tonumber(1)) + return queue:flush() + end + + -- Here come the filters + -- Host Status/Service Status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- on drop les meta services pour le moment + if not e.host_id then + return true + end + + if not e.host_id and not e.output:find("Meta-Service") == 1 then + broker_log:error(1, "write: Event has no host_id: " .. broker.json_encode(e)) + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime -- Modif du 18/02/2020 => Simon Bomm + broker_log:info(3, "write: Scheduled downtime. Dropping.") + return true + end + + -- Ignore SOFT + if e.state_type and e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_check and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + From 3797580cd08a1950928cac45309c1779b1a2562a Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Fri, 31 Jul 2020 16:22:36 +0200 Subject: [PATCH 041/219] fix(bsm): some issues (typo + special chars handling) (#28) * doc: wrong path for OMI in README.md * enh: lighter init log (one-liner) * enh: flush now called when data sizeor age is reached, not exceeded * (feat+doc): Add HP BSM connector, forked from OMI * enh/refacto(bsm): a lot of changes * use of luacurl * improving reliability * fix(merge): restoring OMI doc that was deleted during bad merge in github WUI * fix(bsm): commented concatenation made the script fail * fix(bsm): messages were refused when containing non ASCII chars --- stream-connectors/bsm/bsm_connector.lua | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/stream-connectors/bsm/bsm_connector.lua b/stream-connectors/bsm/bsm_connector.lua index a20f1b5d757..82bb0ca22e4 100644 --- a/stream-connectors/bsm/bsm_connector.lua +++ b/stream-connectors/bsm/bsm_connector.lua @@ -28,6 +28,8 @@ -- Libraries local curl = require "cURL" +require("LuaXML") + -- workaround https://github.com/centreon/centreon-broker/issues/201 local previous_event = "" @@ -179,7 +181,7 @@ function EventQueue:add(e) "" .. string.match(e.output, "^(.*)\n") .. "" .. "" .. e.state .. "" .. "" .. e.last_update .. "" .. - "" .. hostname .. "" --.. + "" .. hostname .. "" .. xml_host_severity .. xml_service_severity .. xml_notes .. @@ -204,9 +206,10 @@ end function EventQueue:flush() broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") - local http_post_data = table.concat(self.events, "") - for s in http_post_data:gmatch("[^\r\n]+") do - broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + + local http_post_data = "" + for xml_i, xml_str in pairs(self.events) do + http_post_data = http_post_data .. tostring(xml.eval(xml_str)) end broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") From b2c816a334307ae5e18c5b820d312ef5a1cc5453 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Mon, 3 Aug 2020 09:53:27 +0200 Subject: [PATCH 042/219] fix(README): BSM connector now needs luaxml (#29) --- stream-connectors/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 2410f7faa5b..6b19c3a4631 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -291,7 +291,9 @@ echo "proxy=http://my.proxy.server:3128" >> /etc/yum.conf Now that your Centreon central server is able to reach the Internet, you can run: ```bash -yum install -y lua-curl +yum install -y lua-curl epel-release +yum install -y luarocks +luarocks install luaxml ``` These packages are necessary for the script to run. Now let's download the script: From 8ef3270d30a023d43418d46513d126c259657457 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Tue, 4 Aug 2020 17:17:11 +0200 Subject: [PATCH 043/219] fix(bsm): typo in content-type header definition (#30) --- stream-connectors/bsm/bsm_connector.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/bsm/bsm_connector.lua b/stream-connectors/bsm/bsm_connector.lua index 82bb0ca22e4..3b2cd9ad82c 100644 --- a/stream-connectors/bsm/bsm_connector.lua +++ b/stream-connectors/bsm/bsm_connector.lua @@ -227,7 +227,7 @@ function EventQueue:flush() :setopt( curl.OPT_HTTPHEADER, { - "Content-Type: Content-Type:text/xml", + "Content-Type: text/xml", } ) From ac6f64c3e764900a1550f8611c88434b4293bb09 Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Thu, 3 Sep 2020 11:33:59 +0200 Subject: [PATCH 044/219] fix(influx) Ignore time metric (#34) --- stream-connectors/influxdb/influxdb-neb.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 14f8b16c452..274528ab938 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -165,7 +165,7 @@ function EventQueue:add(e) else inst = ",inst=" .. string.gsub(inst, "[ ,=]+", self.replacement_character) end - if not e.service_id or string.match(metric, ".+[.].+") then + if (not e.service_id and metric ~= "time") or string.match(metric, ".+[.].+") then if not instances[inst] then instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " end From bf341cda1206fb706d59ac1239cc6ef8b7464ce4 Mon Sep 17 00:00:00 2001 From: Polo Date: Thu, 24 Sep 2020 19:57:22 +0200 Subject: [PATCH 045/219] Canopsis (#35) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add bbdo2canopsis.lua connector * canopsis fix README * fix README canopsis install part * remove no usefull warning README canopsis Co-authored-by: Paul PRÉMONT --- stream-connectors/README.md | 189 +++++++ stream-connectors/canopsis/bbdo2canopsis.lua | 526 ++++++++++++++++++ .../centreon-configuration-screenshot.png | Bin 0 -> 45817 bytes 3 files changed, 715 insertions(+) create mode 100755 stream-connectors/canopsis/bbdo2canopsis.lua create mode 100644 stream-connectors/pictures/centreon-configuration-screenshot.png diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 6b19c3a4631..c41a57ddf4b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -31,6 +31,7 @@ Here is a list of the available scripts: * [NDO](#NDO) * [HP OMI](#OMI) * [PagerDuty](#PagerDuty) +* [Canopsis](#Canopsis) # Elasticsearch @@ -408,3 +409,191 @@ In case you want to shorten the delay (in seconds) between the reception of an e | ---------------- | ------------------ | | `max_buffer_age` | 30 (default value) | + +# Canopsis + +## Links + +**Canopsis** + +- https://doc.canopsis.net/guide-developpement/struct-event/ + +## Description + +This script use the stream-connector mechanism of Centreon to get events from +the pollers. The event is then translated to a Canopsis event and sent to the +HTTP REST API. + +## Technical description + +This connector follow the best practices of the Centreon documentation +(see the listed links in the first section). + +The script is in lua language as imposed by the stream-connector specification. + +It get all the events from Centreon and convert these events in +a Canopsis compatible json format. + +Filtered events are sent to HTTP API of Canopsis by chunk to reduce the number of +connections. + +The filtered events are : + +- acknowledgment events (category 1, element 1) +- downtime events (category 1, element 5) +- host events (category 1, element 14) +- service events (category 1, element 24) + +Extra informations are added to the host and services as bellow : + +- action_url +- notes_url +- servicegroups (for service events) +- hostgroups (for host events) + +### Acknowledgment + +Two kinds of ack are sent to Canopsis : + +- Ack creation +- Ack deletion + +An ack is positioned on the resource/component reference + +### Downtime + +Two kinds of downtime are sent to Canopsis as "pbehavior" : + +- Downtime creation +- Downtime cancellation + +A uniq ID is generated from the informations of the downtime carried by Centreon. + +*Note : The recurrent downtimes are not implemented by the stream connector yet.* + +### Host status + +All HARD events with a state changed from hosts are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- UP (0) // INFO (0) +-- DOWN (1) // CRITICAL (3) +-- UNREACHABLE (2) // MAJOR (2) +``` + +### Service status + +All HARD events with a state changed from services are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- OK (0) // INFO (0) +-- WARNING (1) // MINOR (1) +-- CRITICAL (2) // CRITICAL (3) +-- UNKNOWN (3) // MAJOR (2) +``` + +## Howto + +### Prerequisites + +* lua version >= 5.1.4 +* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) + * >= 3.0rc1-2 ( from sources, you have to install also gcc + lua-devel packages ) available into canopsis repository +* centreon-broker version 19.10.5 or >= 20.04.2 + +### Installation + +**Software deployment from sources (centreon-broker 19.10.5 or >= 20.04.2) :** + +1. Copy the lua script `bbdo2canopsis.lua` from `canopsis` dir to `/usr/share/centreon-broker/lua/bbdo2canopsis.lua` +2. Change the permissions to this file `chown centreon-engine:centreon-engine /usr/share/centreon-broker/lua/bbdo2canopsis.lua` + +**Software deployment from packages (centreon-broker >= 20.04.2) :** + +1. Install canopsis repository first + +``` +echo "[canopsis] +name = canopsis +baseurl=https://repositories.canopsis.net/pulp/repos/centos7-canopsis/ +gpgcheck=0 +enabled=1" > /etc/yum.repos.d/canopsis.repo +``` + +2. install connector with Yum +``` +yum install canopsis-connector-centreon-stream-connector +``` + +**Enable the connector :** + +1. add a new "Generic - Stream connector" output on the central-broker-master (see the official documentation) +2. export the poller configuration (see the official documentation) +3. restart services 'systemctl restart cbd centengine gorgoned' + +If you modify this script in development mode ( directly into the centreon host ), +you will need to restart the Centreon services (at least the centengine service). + +### Configuration + +All the configuration can be made througt the Centreon interface as described in +the official documentation. + +**The main parameters you have to set are :** + +``` +connector_name = "your connector source name" +canopsis_user = "your Canopsis API user" +canopsis_password = "your Canopsis API password" +canopsis_host = "your Canopsis host" +``` + +**If you want to customize your queue parameters (optional) :** + +``` +max_buffer_age = 60 -- retention queue time before sending data +max_buffer_size = 10 -- buffer size in number of events +``` + +**The init spread timer (optional) :** + +``` +init_spread_timer = 360 -- time to spread events in seconds at connector starts +``` + +This timer is needed for the start of the connector. + +During this time, the connector send all HARD state events (with state change or +not) to update the events informations from Centreon to Canopsis. In that way +the level of information tends to a convergence. + +*This implies a burst of events and a higher load for the server during this time.* + +**On the Centreon WUI you can set these parameters as below :** + +In Configuration > Pollers > Broker configuration > central-broker-master > +Output > Select "Generic - Stream connector" > Add + +![centreon-configuration-screenshot](pictures/centreon-configuration-screenshot.png) + +### Check the output + +By default the connector use the HTTP REST API of Canopsis to send events. + +Check your alarm view to see the events from Centreon. + +All logs are dumped into the default log file "/var/log/centreon-broker/debug.log" + +#### Advanced usage + +You can also use a raw log file to dump all Canopsis events and manage your +own way to send events (by example with logstash) by editing the "sending_method" +variable en set the "file" method. diff --git a/stream-connectors/canopsis/bbdo2canopsis.lua b/stream-connectors/canopsis/bbdo2canopsis.lua new file mode 100755 index 00000000000..59b3065281f --- /dev/null +++ b/stream-connectors/canopsis/bbdo2canopsis.lua @@ -0,0 +1,526 @@ +#!/usr/bin/lua +----------------------------------------------------------------------------- +-- +-- DESCRIPTION +-- +-- Centreon Broker Canopsis Connector +-- Tested with Canopsis 3.42 and Centreon 20.04.6 +-- +-- References : +-- * https://doc.canopsis.net/interconnexions/#connecteurs +-- * https://docs.centreon.com/docs/centreon/en/19.10/developer/writestreamconnector.html +-- * https://docs.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html#the-broker-cache-object +-- * https://docs.centreon.com/docs/centreon-broker/en/3.0/dev/mapping.html +-- +-- Prerequisites : +-- * install packages gcc + lua-devel +-- * install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) +-- * Centreon version 19.10.5 or >= 20.04.2 + +----------------------------------------------------------------------------- +-- LIBS +----------------------------------------------------------------------------- + +local http = require("socket.http") +local ltn12 = require("ltn12") + +----------------------------------------------------------------------------- +-- GLOBAL SETTINGS +----------------------------------------------------------------------------- +version = "1.0.0" + +settings = { + debug_log = "/var/log/centreon-broker/debug.log", + verbose = true, + connector = "centreon-stream", + connector_name = "centreon-stream-central", + stream_file = "/var/log/centreon-broker/bbdo2canopsis.log", + canopsis_user = "root", + canopsis_password = "root", + canopsis_event_route = "/api/v2/event", + canopsis_downtime_route = "/api/v2/pbehavior", + canopsis_host = "localhost", + canopsis_port = 8082, + sending_method = "api", -- methods : api = Canopsis HTTP API // file = raw log file + sending_protocol = "http", + timezone = "Europe/Paris", + init_spread_timer = 360 -- time to spread events in seconds at connector starts +} + + +----------------------------------------------------------------------------- +-- CUSTOM FUNCTIONS +----------------------------------------------------------------------------- +-- Write a debug log when verbose is true +local function debug(output) + if settings.verbose then broker_log:info(3, "[STREAM-CANOPSIS] " .. output) end +end + +-- Write an important log +local function log(output) + broker_log:info(1, "[STREAM-CANOPSIS] " .. output) +end + +-- Dump an error +local function fatal(output) + broker_log:error(1, "[STREAM-CANOPSIS] " .. output) +end + +local function getVersion() + log("VERSION : ".. version) +end + +-- Send an event to stream file +local function writeIntoFile(output) + local file,err = io.open(settings.stream_file, 'a') + if file == nil then + fatal("Couldn't open file: " .. err) + else + log("Writting to stream file : " .. settings.stream_file) + file:write(broker.json_encode(output)) + file:close() + end +end + +local function deleteCanopsisAPI(route) + local http_result_body = {} + + log("Delete data from Canopsis : " .. route) + + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = settings.sending_protocol .. "://" .. settings.canopsis_user .. ":" .. settings.canopsis_password .. "@" .. settings.canopsis_host .. ":" .. settings.canopsis_port .. route, + method = "DELETE", + -- sink is where the request result's body will go + sink = ltn12.sink.table(result_body), + } + + -- handling the return code + if hr_code == 200 then + log("HTTP DELETE request successful: return code is " .. hr_code) + else + fatal("HTTP DELETE FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + fatal("HTTP DELETE FAILED: message line " .. i .. ' is "' .. v .. '"') + end + end + +end + +-- Send an event to Canopsis API +local function postCanopsisAPI(output, route) + local post_data = broker.json_encode(output) + local http_result_body = {} + + route = route or settings.canopsis_event_route + + log("Posting data to Canopsis " .. post_data .. " => To route : ".. route) + + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = settings.sending_protocol .. "://" .. settings.canopsis_user .. ":" .. settings.canopsis_password .. "@" .. settings.canopsis_host .. ":" .. settings.canopsis_port .. route, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(post_data), + ["Content-Type"] = "application/json" + } + } + -- handling the return code + if hr_code == 200 then + log("HTTP POST request successful: return code is " .. hr_code) + else + fatal("HTTP POST FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + fatal("HTTP POST FAILED: message line " .. i .. ' is "' .. v .. '"') + end + end +end + + +-- Convert Centreon host state to a Canopsis state : +-- +-- CENTREON // CANOPSIS +-- --------------------- +-- UP (0) // INFO (0) +-- DOWN (1) // CRITICAL (3) +-- UNREACHABLE (2) // MAJOR (2) +-- +local function hostStateMapping(state) + local canostate = { 0, 3, 2 } + return canostate[state+1] -- state + 1 because in lua the index start to one +end + +-- Convert Centreon service state to a Canopsis state : +-- +-- CENTREON // CANOPSIS +-- --------------------- +-- OK (0) // INFO (0) +-- WARNING (1) // MINOR (1) +-- CRITICAL (2) // CRITICAL (3) +-- UNKNOWN (3) // MAJOR (2) +-- +local function serviceStateMapping(state) + local canostate = { 0, 1, 3, 2 } + return canostate[state+1] -- state + 1 because in lua the index start to one +end +-- **************** +-- GET BROKER_CACHE INFORMATIONS : + +-- Convert host_id to an hostname (need to restart centengine) +local function getHostname(host_id) + local host_name = broker_cache:get_hostname(host_id) + if not host_name then + debug("Unable to get name of host from broker_cache") + host_name = host_id + end + return host_name +end + +-- Convert service_id to a service name (need to restart centengine) +local function getServicename(host_id, service_id) + local service_description = broker_cache:get_service_description(host_id, service_id) + if not service_description then + debug("Unable to get service description from broker_cache") + service_description = service_id + end + return service_description +end + +-- Get a service groups list of a service +local function getServiceGroups(host_id, service_id) + local servicegroups = broker_cache:get_servicegroups(host_id, service_id) + local servicegroups_list = {} + + if not servicegroups then + debug("Unable to get servicegroups from broker_cache") + else + for servicegroup_id, servicegroup_name in pairs(servicegroups) do + table.insert(servicegroups_list, servicegroup_name["group_name"]) + end + end + + return servicegroups_list +end + +-- Get a hostgroups list of a host +local function getHostGroups(host_id) + local hostgroups = broker_cache:get_hostgroups(host_id) + local hostgroups_list = {} + + if not hostgroups then + debug("Unable to get hostgroups from broker_cache") + else + for hostgroup_id, hostgroup_name in pairs(hostgroups) do + table.insert(hostgroups_list, hostgroup_name["group_name"]) + end + end + + return hostgroups_list +end + +-- Get notes url list from a host or a service +local function getNotesURL(host_id, service_id) + local notes_url = '' + + if not service_id then + notes_url = broker_cache:get_notes_url(host_id) + else + notes_url = broker_cache:get_notes_url(host_id, service_id) + end + + if notes_url ~= "" and notes_url then + debug("extra information notes_url found for host_id "..host_id.." => "..notes_url) + return notes_url + else + debug("no extra information notes_url found for host_id "..host_id) + return "" + end +end + +-- Get action url list from a host or a service +local function getActionURL(host_id, service_id) + local action_url = '' + + if not service_id then + action_url = broker_cache:get_action_url(host_id) + else + notes_url = broker_cache:get_action_url(host_id, service_id) + end + + if action_url then + debug("extra information action_url found for host_id "..host_id.." => "..action_url) + return notes_url + else + debug("no extra information action_url found for host_id "..host_id) + return "" + end +end + +-- **************** + +-- Translate Centreon event to Canopsis event +local function canopsisMapping(d) + event = {} + -- HOST STATUS + if d.element == 14 and stateChanged(d) then + event = { + event_type = "check", + source_type = "component", + connector = settings.connector, + connector_name = settings.connector_name, + component = getHostname(d.host_id), + resource = "", + timestamp = d.last_check, + output = d.output, + state = hostStateMapping(d.state), + -- extra informations + hostgroups = getHostGroups(d.host_id), + notes_url = getNotesURL(d.host_id), + action_url = getActionURL(d.host_id) + } + debug("Streaming HOST STATUS for host_id ".. d.host_id) + -- SERVICE STATUS + elseif d.element == 24 and stateChanged(d) then + event = { + event_type = "check", + source_type = "resource", + connector = settings.connector, + connector_name = settings.connector_name, + component = getHostname(d.host_id), + resource = getServicename(d.host_id, d.service_id), + timestamp = d.last_check, + output = d.output, + state = serviceStateMapping(d.state), + -- extra informations + servicegroups = getServiceGroups(d.host_id, d.service_id), + notes_url = getNotesURL(d.host_id, d.service_id), + action_url = getActionURL(d.host_id, d.service_id) + } + debug("Streaming SERVICE STATUS for service_id ".. d.service_id) + -- ACK + elseif d.element == 1 then + event = { + event_type = "ack", + crecord_type = "ack", + author = d.author, + resource = "", + component = getHostname(d.host_id), + connector = settings.connector, + connector_name = settings.connector_name, + timestamp = d.entry_time, + output = d.comment_data, + origin = "centreon", + ticket = "", + state_type = 1, + ack_resources = false + } + if d.service_id then + event['source_type'] = "resource" + event['resource'] = getServicename(d.host_id, d.service_id) + event['ref_rk'] = event['resource'] .. "/" .. event['component'] + event['state'] = serviceStateMapping(d.state) + else + event['source_type'] = "component" + event['ref_rk'] = "undefined/" .. event['component'] + event['state'] = hostStateMapping(d.state) + end + + -- send ackremove + if d.deletion_time then + event['event_type'] = "ackremove" + event['crecord_type'] = "ackremove" + event['timestamp'] = d.deletion_time + end + + debug("Streaming ACK for host_id ".. d.host_id) + + -- DOWNTIME (to change with Canopsis "planning" feature when available) + elseif d.element == 5 then + + local canopsis_downtime_id = "centreon-downtime-".. d.internal_id .. "-" .. d.entry_time + + debug("Streaming DOWNTIME for host_id ".. d.host_id) + + if d.cancelled then + deleteCanopsisAPI(settings.canopsis_downtime_route .. "/" .. canopsis_downtime_id) + else + event = { + _id = canopsis_downtime_id, + author = d.author, + name = canopsis_downtime_id, + tstart = d.start_time, + tstop = d.end_time, + type_ = "Maintenance", + reason = "Autre", + timezone = settings.timezone, + comments = { { ['author'] = d.author, + ['message'] = d.comment_data } }, + filter = { ['$and']= { { ['_id'] = "" }, } }, + exdate = {}, + } + if not d.service_id then + event['filter']['$and'][1]['_id'] = getHostname(d.host_id) + else + event['filter']['$and'][1]['_id'] = getServicename(d.host_id, d.service_id).."/"..getHostname(d.host_id) + end + -- This event is sent directly and bypass the queue process of a standard event. + postCanopsisAPI(event, settings.canopsis_downtime_route) + end + + -- Note : The event can be duplicated by the Centreon broker + -- See previous commit to get the "duplicated" function if needed + + event = {} + end + return event +end + + +function stateChanged(d) + + if d.service_id then + debug("Checking state change for service_id event [".. d.service_id .. "]") + else + debug("Checking state change for host_id event [".. d.host_id .. "]") + end + + if d.state_type == 1 and -- if the event is in hard state + d.last_hard_state_change ~= nil then -- if the event has been in a hard state + + if d.last_check == d.last_hard_state_change then -- if the state has changed + + if d.service_id then + debug("HARD state change detected for service_id [" .. d.service_id .. "]") + else + debug("HARD state change detected for host_id [" .. d.host_id .. "]") + end + + return true + + elseif os.time() - connector_start_time <= settings.init_spread_timer then -- if the connector has just started + + if d.service_id then + debug("HARD event for service_id [" .. d.service_id .. "] spread") + else + debug("HARD for host_id [" .. d.host_id .. "] spread") + end + + return true + + end + + -- note : No need to send new event without last_hard_state_change because + -- there is no state either + + end + + return false +end + +----------------------------------------------------------------------------- +-- Queue functions +----------------------------------------------------------------------------- + +local event_queue = { + __internal_ts_last_flush = nil, + events = {}, + max_buffer_size = 10, + max_buffer_age = 60 +} + +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + if self[i] and i ~= "events" and string.sub(i, 1, 11) ~= "__internal_" then + debug("event_queue:new: getting parameter " .. i .. " => " .. v) + self[i] = v + else + debug("event_queue:new: ignoring parameter " .. i .. " => " .. v) + end + end + self.__internal_ts_last_flush = os.time() + debug("event_queue:new: setting the internal timestamp to " .. self.__internal_ts_last_flush) + return o +end + +function event_queue:add(e) + -- we finally append the event to the events table + if next(e) ~= nil then + self.events[#self.events + 1] = e + debug("Queuing event : " .. broker.json_encode(e)) + end + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + debug("event_queue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + self:flush() + return true + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age and + #self.events ~= 0 then + debug("event_queue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + self:flush() + return true + else + return false + end +end + +function event_queue:flush() + --debug("DUMPING : " .. broker.json_encode(self.events)) + postCanopsisAPI(self.events) + -- now that the data has been sent, we empty the events array + self.events = {} + -- and update the timestamp + self.__internal_ts_last_flush = os.time() +end + +----------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +----------------------------------------------------------------------------- + +-- Init a stream connector +function init(conf) + connector_start_time = os.time() + + -- merge configuration from the WUI with default values + for k,v in pairs(conf) do settings[k] = v end + + broker_log:set_parameters(3, settings.debug_log) + getVersion() + debug("init : Beginning init() function") + debug("CONNECTOR:" .. settings.connector .. ";") + debug("CANOPSIS_HOST:" .. settings.canopsis_host .. ";") + + queue = event_queue:new(nil, conf) + debug("init : Ending init() function, Event queue created") +end + +-- Write events +function write(d) + debug("write : Beginning write() function") + if settings.sending_method == "api" then + --postCanopsisAPI(canopsisMapping(d)) -- for debug only + queue:add(canopsisMapping(d)) + elseif settings.sending_method == "file" then + writeIntoFile(canopsisMapping(d)) + --writeIntoFile(d) -- for debug only + end + debug("write : Ending write() function") + return true +end + +-- Filter events +function filter(category, element) + -- Filter NEB category types + if category == 1 and (element == 1 or -- Acknowledment + element == 5 or -- Downtime + element == 14 or -- Host status + element == 24) then -- Service status + return true + end + return false +end diff --git a/stream-connectors/pictures/centreon-configuration-screenshot.png b/stream-connectors/pictures/centreon-configuration-screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..7ba8f9f9f1fec3f1fe27796aeaaef0849f59175e GIT binary patch literal 45817 zcmb?@bzD?izxSv|K>?8vkkmmz8tDdcknV1g6a=I@3|c@yI)?7<9;LgxyStlr4W8%T z=ic|c_4&N}k8{S&%&xW8Z+&ZR9~miOj0c1dAP@+Ks0d6B0=bI^f!r=fy$$vRpt41R z|J|_?6jeY)MIE1&o`FD~LPTM&73^a+CLJ_z53f;njyD?VH*ObBg!}r=nI)9re+T9kf0G_Y&V~~Pf8W_6x!zB@?zsIZ51H_o z$cguQ<<D6FY*p-4(E3;y#b2w@URpkUQc{5APG zG&Hmq*gh~QmS3aY_7rRC)us;Z2lq45&j(F5!_vxXmB^QZ;B~O_C z@FRAU8T3oDkj0)*4^}gAt{Sy)rl=HN5baQ%odQ66M2TwxP za8^e~`~$8mos5*(F58M;l_NhcEAY1iG3*Al`+Ag?J2F z8VU*vv$L=4wlqvkOrXC0{{G(HFKKBvhV!N)Ug>(j&!azfl z&l+Q=zP>(sy+j;Og>G{IXr#`z zHYNh}+}vEN}e~towE!W_`Rh$!apRa(H&Szg+gA zKh^1YYkerkX+5Xx>~K9iB7y@nmn-rs)kk9{cTgTmNJ?U0U?3l<25FSCclP&nRh82Ev3eI;LpC=yyp6V}YR-?h$=J<^vGffL=(TGndV6<|r`@K( z^S?OVpP!qP0!^n=e=kF!(rmJl*Xigx8Gqb%#fN^10+ni$VfxyQ$tt_RdZrx46c3u{{7aSdyi!ImpWaX zor8meZ{NOsLINDZ8Uk`yu9{#FE1|}-@n2FZL;piQ2R8oE0){K z!{hF~duQk8`;YhZm?E>X)}vTWE-o)YGvq2zuOB+!JjFp{|DM~Q<1N{i_O0~v^gexJ ztY>siq=0_?{Z3zB@tDuCn&99t)5C14^Ca*Uid&(bwWI#P$xp;Q1L^hU)z(#{1zq}w zHJhFY(OfM_vblT+WbKyQWLAN!%IesQ>_t56{x$W|kFDL^+J$=eufyBcfwP8Y*5 zh}d4@`i_i@d_}?B+jOQ*H4a}{t70TTPrX}f0s85O4pK- z=b#@yJ}tI|sumg0Y1dXOW+~ak`~~`gyIYYO8Kr@)Zjw@# zmp7lTovgGmfsR!>SZQfZ1-}rNa46TTvdzfI7|zqCgeLW-gK5;bL@r}N7$F_QAuH?v zj_Kp$)4OU-+x}KqIGQ?J%htfapva)p^C1bBQBVBBf?hwKn7Fu~uP>MN3ZwBrx>Al> zN?IC!Yd;FI?0n3y!9B|jmCI`%Fk)e2W9#4tvlB^QjAE*f=fIx*T4GTP|nbA*fBAm4%+LzqI znsI75wl+4QBF`^6Hq+D7615zem!JaZ-)kjG76I4@mB0-N?9wSQ zU9SyGd!$CHzeXoSxG7R$adL4PF!_FW%tg*MV2hV^`1EaQ(R-tHC{8=~uz>B)Zb)}- z<(9|a;1GfXGx~dX(Nn=Ye?CUs#UlLk=@;!1pA;z~(;!Mayfg73{D7!e|hq3C0KifgJzo{6c!Gpz09%`B6%%R3b+%#OV9Q z`O!Q+5_?{yxgiApbuHo@KK-fs|A*868&3VtvqhZ4w)PTvO4K?T#>dC~Hhvx|-JoCy zL_qgo1~sQ05fT}hA@g#J#~nQiLJRbU0rLdiwC+Ut6r~VM?O7dlUGsE`TaNA zozYIpbxIhlP^E3Jtf~@Ek@M6^yx9OKeBxP9T54-$C7+?-8C*A})GZ+%&9pR$x!j?` zW49DoWmib6u3LuaYTQK&XNb8rp1;Su9@ZIQz-wDPTU*+Avt>wONPJ%DqJt7Q18K&ds$FOe|hNvB=B zfW>wjvLgU0s8~ucB4jaP$KqgPwCL8YThhtFTzi}3_^P9EyiO;hZ$qE>H#Ltgy>XB` zcf4TkF5C#EQRcGSx`3@^6fT0PcVnW$3beGDnVEW-naf-iqqc~MHyjSXiv}9mo^X&W zNIl$5IN=6KBV;Xz@rj8+?YzN{xYI@^Ceh*H&6(TxaV6JAycDPp4^5)scH#5oIYd|T zKj3K$3}ifdMl<^i#AArO{P-FEEagK7S9cX!rO=K85(VR45tPvGI9^u5&T@-+*RxehC=;Q7Ud@r3o?fz6o;JeO z#lRp{;OQq@Tdi8>)4?od1ON_zM&aV&)m|QrZ;qAt`TJwszn?J`;!RCWJ(Q;nkBOnh z0~|$dVRcniL7~{7lQ33FSyr|eplX1Gab#gIn5}JTuMsUhy%wnL+lXsY5|Yls?3|p< zY}FzS4Gjq)Y*{duh0&>7T3J2ue8TZ*W<3v~B_ors-yR0$8ed=E-=J5KaQ#FgA|A7e z^5Wv+jg1XZr$IwmfqGBdPOnk^Lnk7T@5~5P_#Cy8w$9FyqM~Rf{kF1c*XSQV-Z2t5 zySSuDMpNMV6%`iN)zu9cmk2y1U^k0FAl&?;b6ojA2c4UrSEL>sDbRH~Tx-lboO0R* zlWi6t0NuRD8i$>G-`~!MYrBfVz-@?$i6JKjFmK+y{rh|yF%3x6SXcFjLqfMNuwvWI zvFvQxufJ#3)<6f*@DO|r19LE)TJaB&=dholp(UV;CniEMF$2NR)YR37N74t4`L8eb zV`7K_QB_x0XJBBcRS&H%C@NZ?tYX)e19ZXdWEREs6pT7<+l`krG`a)^dkd{3+_pTQ zW`6nLP=0@#1cOHMxj6da(HV2s1H{eidclEpdUghIo+3qv5Db=_lH$`iS6IjtB*J3Y zMQmcS4%iGJKJqVd3DG_8<4{;{jURDW}6;vdHr}4l{uHj*gB7-97lgwnuntTI@1mFf( zg9ULtlnegEdbQcB3!0zZAxeURrq<5k+$xPG~z zP{C>H9>iYH!oos4Y=EbGu`fmZ@#DuA0*Ojli$wDB^2px0wDcB6=%1ROpATR!_~xKf zYI1T%#H;$|=H*|X?|{b_%kNgJL9bn5nIb`GP6~ZnK>M@l)hE zxOZNwr4F~NeX;@-=cA2LeKaC;-NjRU2Qnt!#*ozl0V8wuj0AOvm(vtvM zo`q##csQKQ{^01y^?b7oV7)hQ-hc<*+T0G%5UPh@El;de^Ky!}H~5T&hxZl+2qNNQ zuU*}|ro^B#YFNw3bmrG*P_#e^)^@vOg;!;MEIkt~Dkx9|gIG$c3)CV2eA@FCK^J`A zfC`NQJ;Ohmj*dfjZsj%s;jF(l?M2-rj+^m7kvAFW?F81 zl8ux%WG{&S4!G7xMjb(ae=IC4&@;rt>FKDbBH2s{$r4Ps#bFJF8wqc`PB=6*G^!mB z*B;X-ot&N)&}Q|odK>+6zmW6=Ra_^9XE&8j*f}q4#3W=@Jo1!gHxu_FoDElxRr$5S{ zh-sLb;@j}PlRvK|Gh2v8n+R|wjfn|`7cXqWwQB9D5He?rO_ff!Av{rEzT6I&kBN;n zsqQQX{i)1sa;*4+JSa_o2$>l#$P;5}mVZhB(?ZGN$KFJbz(7o_64PaWT{n6fJi1DGh`xFT#bJn>$Ad zl(u}QW3$>6P;MwGy#jkKAbD9?CZeJ~zu%>k#z;J2Vq;T!iQClNysvIFSqbo9S4#^4 z<2#6(5g#9)sHo^g_9}|mQA)TL6l!N@r>#A`KAgAjG<&VuYyf&B3I-vIUdtnBrR^pm zzecyY3@zUB;^Ls~64}Z?hCCoFu;l(#ViJAQXt#@?BryJAOEw!FX@)SZ$ zGt`DB{}Ta?uJmbq4+ZrPpA5lmw?&MHhetrLvc0W+8i>Xe*)ce1(;3AI>J}jO%G6|Y zDL_@T1zZ@2g^&FjjZ3(=xyOfxhc5;}XNY@S^493HfD9+m{wXID6B7qVgbo3pEKbnx zclm{dnWp4sq@?Y9mmLSY6OsKHRP1Tp+F%AT&?Z){L8w4q(J3QdU_#A^tV-Q5kR*BmR*Md5Fu4Hc8*1ygSOb!S0ST(DcNeJVl-=nVNl z-2l@mH0UJ{&&0ta+I}|90PyUQnw2qa;hBDjxoV9gD?F8&xr7m2eqM7Z)UHiBjLjt_ z_lRBQ>Kb*Bo#^r761QttDB-L3f04zRXk_h)W)-|ilZx$v^Ya_^JS|Y^u!so{9|n|$ z`k0oMHhZ@tdf)^{LQ3iis0wT!N8)8(zSq?SttA2jbOMZGXTjW@I6@*xH-_};JVGb4 z(ZUz}{BD3kl}zS=5e;O!p&a$)`T3+lZC?rJ0#j7kA+r~;v zywIQ2UGskN{_FPdf1=Za;+eaDfn%qD;eduu|2z@sL%emm;{YVZag|89mKGGqW(}r3 zGdeT8VuQm^`G}iWt+}oOrq*SJtqYnoI`D|F*ZIK%vAHRLklAoWy->R;#s7TxxKBKFv zD?k6vNkHINt;_jI1_E?3W@h<+pzG^|WlwN%ivf-RvvtJYK2l$KNl8h`$yp&8%@)c% zTrdje-r|p=mH@l}-4S#PacSvm0D?~Pa6kHhOlLGZ7^!rsg$=l#<0U5HQBiGV-n~W&S%Y9E zy!Z8YeSMOp4(l4286_mXOQq?mvT<=$20UP9W{zexVY6Q8dHwa??)B2nKL`Ujy(!}1 zCsz7%OG{yqk*(JhGN*sBnU4MR?(6HL#ta2Ckf+|5#G};Kvc03@f2kS7v~*2~YG2)| zsnw`*Z2aP2%581AzcWL0Ud^}@qEA-4x5gOV>=Z@|`4y5fXuQ9-cTzC~W6^K>2|#yc zMFp58znhDKr?eDp?(MBV3dB%D!{vzyjs7(-`bfB}XS|-QZ+&3WYxz-PGHjryhw4#B zNZDY!HE~*VJf&Xn-lNw@PZe4C_I7sCHS~;(R+c)Wlai7EoSXUm)eDoX_S0+~P}d}5 zID&KtxVfvirjl7djv=yU9T)3R!SKaq&P>H8D(dT&@F0J}iKpwlR41mbfV+!c5g zXn3^gySEfr->ceo#c&#zI4*TW4i0DP7U?#99|bc5kZ+W7H7dZU0qTX9mlu)$TQV{- zSA#0iMP_Z)L-;#TMXpZf1HlwPgZm&wP8EP<7{Dq3;W6d>cjaVwgac?d-Tc?igPDp- zN=j2xQzax_|Ch4JNWuI{ZxRw0Z6P|oMv8tAURjqf8R*qn|&_TyP|O#-&o!Qa|3KF-a^_`|!dzJA(iyQa-eZ0G1`YpTY{ z$S7Sd!}`;w86xC%(^ls-9(XB^4+Mgp@7}<$LYVWrR^*~e!j2330Q~^A zk7PIgET!c#*cKNns;X!-c!`n?z!{Za2;SjIx$&>!vfHrEZf)lp&5d?6HBpe0&x8A8 ziu~R^u9gF4+tQDtaz^Vi+l}BXzv>0-fx3~-kLh}ulq(SHWBc%L7sN^t|fI}cR_qKmD&i-AH;CKZj%C8YPX>^)5KVs2B zzs_9#ufjs0IU)+ z#F(X?GqfvHrnfPzJ8757F;EIsxz=WV)So)6VSu>54VdK!@NQGRdF&9#l5D7fvzytw zir0fbxJ$4{12!zssR$ugi;OpyH$!DL*VK2fs~e_ZCKFp;Q2g2M0i^6=txAenbut$z3b&sKX4 zD=?JOu5wIi;yXXyq@$+IE*#nxP9)2dbB_{jlw3kYyox^*W_2YRdGE>qA5|or-aINw zb2&a^z?i|hyc!P@BI}5L<UW1X-gfumZ9 z>;_Fn$6X}veUkIze~pcg$6QTGM!S}Oyo;qNEkVF?61qmB5>^u%+?Q#y-sV4ygL5%f z>V$oGTVZf&(|^MWf24w~kY94&$;U`7QzOY0t`b^m%FDW=r z!l2O<@vd2hRN1x{Z;rEzxJ~722888F&u;{M3+QUjT z6_PN84Gaxj4_4Ix#4r44VX#yf+r^WY*rq04HN) zj5o|n5>D)wmnY+p!?^zY5q8&-9kG~>Z8!>kQA+`{DooGNJWn=@hML-S3SmC=MF{pY zB-CaN)=`rPRc2=o68RYxwYD(_4HXN?r7Ybw5r?Ve9PMf2@9*#BHfWRXt(m~Ad0#!V zUtx@tcncFukbwq8>>d5qeh44}{OAe*eef)Qz-M=DcI=n8H)>!%o!QTX5Yh`uxlwx) zscn3+Zr#rl`+O4@s*Rw*z8jnRoxzrv*s@Zs+NV*^+ruzqygoBvSXx+I&Yn+N6d2+= zmoGHz@`E;TyWT{_3dO}2Pw5+@&%F2b*;>zU8*S%S?Y`3|0v9D??-+^C7MHuaZTv@0 zf$9r1{3@4qB?nbifo?6>^H4F1JPZXdWNc!GpNic)bwa{yJlCb~5SAm=UP!x(rIvI` z$CWAEttFPSw{EKb0$cRmw5xJn!#?6$ zmYNR7UE9x}+0+sV=-&*f;{w_$ljfrz8U4|0CU0Tv!NIO$RbJj3o15k-$rH7qh1QG0 zKyV4C*BaS7w5`#)-W;C&orgi52nx!bd$kRPAH^*9ey%bfyOQ;mUGGmi9&lKl{nSXv z2dG|L5)`hBZ7T%n$o1Sz_^Hk#S32n4&H;!yuoC(ceDQ1^Zgcqbd8sK5t^Oo%%U09N zdIUH6OkZubRjKRi0~XQSZznWHuN}AY^gZ7zs+M}Wlx@tR*)o|63lX&|J(MAel`=Fn zm4prFYHjwF(D0Lu|9(AMDMuMwla8ok3V+2b@X)Sw>D1h9|pzJ6Y*gYbI&Ahhe^*YFRPSI5^lR zjOqWd-hvn_v?z=o*g-Q?IxK`XTp)dso&>5hP$Vu~>?nY|f?hvx0>!J*($><_I%c~i z!A4v(l6P2yvZQ7_ykp)}nEf^h_$j&0n)<3rDRQHuql$2Z{AFV1pfSiLsnAj&&DTL= zQ&ZE^JZr94q+h?yA2jujaplm_wTCF4>pQZQc!(2ZPJU&ppQdu-(9mR!orymWCQM?5 zKpq7NJY{?_)#eX)IO|cKt87U@!7Dm6kGeMXtl%&qsB9>|`)vh^eCxmA8AKZ8pHSH! z^KFJo_eEhx$TF^+wBH6kB!2f#p3A+53!$Ho3}he{ex}e_`ok_y13~y??Qed9{z{R$cXm#CNxGUA|%uZs3(xhz+e+ zWyCoW-@8-Z-3C6aeLj!~@1o&-^YjE#RQ)@~pFe*NOxJ>F)I(C9ku+&Cs3SOHPImU1 zxi%8<@5mq@HtEOftIOA~Ujvg1_8iZTgoMQBB?W~6NYxNG+iZ+j0AoQg;S=~T$Rv%| zIN1RoOW>(@U?7M%4X+;(v3)ED5^|G2kvL$8Y^G3kTp5&BjYNW9_CmKh3y@h7JP34T#1CW|LgN zpOr6@=D)NC6c_ZZ_5QSOpm&`%A>F)_Sub!-<*Y%s>r1;kG00?uyBnvKaDw=Qy#ZrZ z#7A>;2J^&hWN?v?Buqc6+LwZgDg($n7c^SHOK`b3*#!}kN4-GpHd^b?)#L_hD^xCP z5D0G|})}<;sdK@$b2=*KU^=YX5c1!i784MNE7ZkjW70YRry5)kbeFB#mCb(Sulz#fE;2C=A5O{JrpD+TK9oVU$ z3T*;aDmK>CD(_K9<*k{><@I$ULc+7tWjMHDV0!_HFFrp0$B!SFd?)Vn<-n+`0j`RT zxz6rfBk0$tn3(uJFf!+z&*9MIodQ==3>iT^x!?g(RC@Z$Fnw*8 zL&bo31!_fL6pu@38c$d|s*UY6`TyXGFiA5eS%930vgo zD&7I@NUv5b^7IiS`u_56$mdJ=85$OS8bpRnB3vFob8Y5zm2^b=hn%DyA-nCDxDRM( zX!vyZzjhNh13z9!NNCr;w6xU0!J#tPX~T@xzlUqlW4nd?u>Y%ZS zUytHP6@=MX$0)dASn}{X;`}K3dvUQezZR>(#yQwI{mq2-X`r1 z<^06NbB%}#D+LG=_@J1W-9GZE4?ODf%FL+fsidSOK}^Uzka&dBz z)B6mMdV@U@zG}HHE?@E=u94WEALZO7qkon-Qe;T4S=#Z-O6`akt&-LrG9tcOQimNJmeLh1 zb=g_kPF23z81Ny$$5-}ODw?T(p*%kyc?f49~>$~4jI{sje87euGZvZ=n7QKSQ$;rvTe*Jnn zoVC*nc*~CYiyT>7rJw!pU7l5OU&2R;?0>u&6yGa2jwChs`% zjp_Um7Jn5b3<2>INWLB(9*#9}-Y4PoFk#_-{yV*d{uDGc?e+EZLC2e`i~72{`#LKZ z1LI}W_U9yuqOH}c%uC&I4&Zrn4LR<|VtvRcTkeXv(phoNQQ|h*p*cTtMr_o3hzSex zjqnc-UW;8UJ{ITGQ?&)d93ey&$8ORzcb9Bos-j+65>Cow0YtTE#6?TiPfZxM z*7Yd1hoL*3KJIp#8ciXSem9&S-wCgK?wD>mnQ!g;%;BlR05xICVxeR-Pw^_oDKmJw|vp0}FM@Yr^`Tz=gm+sUQI-PKR!VR zmk{!cmZ9dL4WYd!OmU=UNy4}fQG%LNf=@Qo9{^e5&4!V0=ewxH4Rv9aq4H$XbEB*nU;PotN8)2k%R+tCzx#3 z6kK0JgFrZMuN~N3uWXR_*AdA~lgDJx5z$5>p$>63{GOAO1Ma&irwC${ zJhaG2rjg8Gb39~5yKZ>+7?{e6gys|FzxnmSc!+!OH@`{%u@gYtfg@&IGG@eL%n}85 zJVEy)<#kl41WS$>@sTu-C1z*m6O!NJkz@O4Ic=68IUm*7(9lqKH;DemH>Dllfe6fh z6Gui|1@2(^z(D86L`4N!EPDM7qxGcv+;qUb=0QYrCH2Z+fz3H6@a@Vb^;VBufq`d! zMs$kFEl9fXpNsPMesw9OGh<&0(_IxL4R}R991)boPm;$|Guc-HfeesOpdr(%J>Kbf zAD!3sitei?%F>M(0RRVC!@5}~B|t7`K{3_$f(Ng@1Q&VTGJ$^xGGS1f9ct7u?j!lGn_ueU<^nM+#f8JV8^H znYlFA-de=Ie1Ua;k~sFrdBb*(=D3{1w>3|FK?SU3x{ti|B6LEQ6%Z~0kq4#4w;*2% z5^7ZPsU6Ym4D82OSu&W!9FMTEnE-D|PJS+~@Bng+_r0U-EL3ak(hWQ4@bfnowyGN>fV0UXh)wKf}B z-p$P&6{NQ6$|(gajGr2MU85QJt%CG^$kE2rJ*T*}Znh^B42mbfayke+jjh{){bG zNTF2DMIiL_Dhl1gRKu_y_~>v1WQ`$;HSlf5mO4MflKr?kSKI!wwS=~ja0MPbK6H?P z00|7KsquUR0TR>c+E{&6`F0dC=X{I#CScs>XjYYqEw3S>K|bsC2CvQU0ajn1y!)Cz zD_59pt)}&}Q(({Y!~N8F?c>XNwxSfn+1_iLLxTfJ5)qam{XemTDCWGK2ij9rCqDY5 zUI->$)*YXefc|(pMU$8GF(??GK+D;A6a@lVAnFQM?(L9q+pHacEGQ0Gx&`2rv#y2) z$bD5>gIE{Zn;wWv9yPXn2|roc`8$Q(G0iiCc}48m*yWtrFu4xYybLARq=SYU^|ax` z@`Gc)DpM`DXKok5&Q7PJS+_k(1ave>&)KJgucsbZ(3l5`V{N%@+D?>u2dNIuKf48C zF^gycSM#YUfW+zI#G-!>8HuV3ijD3$c zV<}r0{cHhhwFWIPu?2YR<{Kc8t7U{+T)y>v$c{uC8zSp@zPQ7|>KRQ+tq*rGR72`k zd@WDIVsYHfdR9)8KWix5TOwT3;*ZZ`#J~>HySgUx_DkwI{V0e~Ft1a2{8^;x?+t4A ze584f)_vzn0~lJhMz+>{nCm@vL()HrBe?XAFS7spB@V|(74BwpuSDXfo9&n++?zmLx>zc$->778JjCod8OKiheCWWZ zTy5z>>-#c@#6q#nT4@Gm%RvO;#rdQzr+%pV++O>gq-_?3512XmG5)P|PM`R14wlY- zJ>@gu;UTolpiyl7c&FSf1H+41+~k~XF04tBg6C(3NlWlr>t`0aKg)jI^?lYpRP8F~ z0>9@T3JO*)z<~pc74)?^YH?gHOJ|44z7O6MrB1zm&7iymwM~-n-RP+m4m9iSMJz5&P1ZPdgV^r#@ODg6E)6+AFoV%uY`nqZq2467k+IRH zW^L2ay5&ZEl!1QLFU{c@tPKlc4|jtVuf;TfleAM!62yNmPjSeK#K2O_XW5nbefni@ z>49uP++tPZLYQPA&CdssxnfysM5hbJtye6j8A0pI)RRN05dTaNqw9;0u_3p7r9kfrH_Bi^L>R^ZtWrQ)>KIHHljvS7v)A zcipP0g?)p;770daIwSKq>8E5Uf=Ni z6yKL=dan?hsUoE**UJ)o#%7e>L%(4M?xJ4<1dmsyO#2#k<5Dx~X0!T)jWU4OX$u zHHbp{j93hHyZzm+YeaFT5!R=`02KvMWDQNtw}Gz1y}hp}mXhJx(cq8UBMFyo*WYK? zD_nU$)SVQxbo(o%4%ySDZR82Mcn*C6A1s4v#k}sUj`7j=B}@%bTD3oDQ-zLMsmsfl zNf0dUnwmF%n>)Xp=GWXYjh}CPXQys~(QD-i-%L@S#L?R42vWC|*H7Qu#87O^F0~cb zvUV7KQ+k;i0jiH5JqWW~SIEH6JeMQF_G)wck~mXf)-T_% zS7)&9F(XF_P$~0dR|qF1Dh`ha5x~9ya<`Q7+RV;%t&N=(P?=y0n~R~Lu^^Lc9(o0^ zC^RS?OjxXDonvETKniGW4e|zRCJ^p_(MdA?D+>Vh!7h-d$C5QrvK19=wd1ziU&jl_ z6>q@q&+YH)?`LLW&?pBMVvhchbCPy6+xvp@+V1;2g|xBHD9Ttgbz24qbuGo!$9$_e zM5ygXT*R4EJv1ov&Z^p6B?hHTk~p+w_!`=yo>>OnuX_0c_EWoQ4V{+%=|y3h%|};< z&>G^_A)$#r5f2HS$b$Tdk+thY41~B!_0rW!v2M!%x!2}~ ztLcf(0V@((qiSwmE7^S%N?*35li?F)VU|(*3`l~MU&c@V~mO0|kb5-~4TZ2V&Z6dCQ#>Ny&Kb0>R z6X}Q6g=?#Nk3-aLIpmLbtt2y@qjQ+!I_SjdGgMqpp`R)o=7PU6k|7sip7BUnKa4Hh@4QNk|6)BZ z+<)h8S!RD%j?kC#XJ)B^jgF;Fbjk+gmD*-M(mr;b);R$^!a~S=nQ$rbW`O<*k<-Sh zFkN7~>)`=Q9yBg0vhq$ZE|!1>ffpKtiiN_pWn4nDH#YkEfVOOwnrv_HI52trSv{++ zzn+(drYadG1eWy%=&Sm`vim39&=rePCFk=r7Q+XPJ9UZKSyp^G8QD@{Eru3^`?dMBh*JN!A9(`h#oEu<1)6Q$kF8+e&EatAeb19wbrK4kG zZ01wny)`sqG-G1x*vu>}Z1)aL6t4|kWxNHo;TNG4OR@#=e%1_x~S^&Ue> z-F9Ei#eXy4i>0LAyDX{+aorAt?<+f}qy^r<~US3Bsj%#I-3G67f@YqZQJy{T0&I~1bi$ij;FYi~?j!gFS{Fp+(WogY(i6f8VUUVDltOKLwYN9(Vx-5pljp}AEyy5RV`QO}tX<_V00d@HJ z<`BQIurP{HV~G&M&4m?HekCt1yAZLkC;)5ey}ch{N%n(DVhy~?LQbyZ)o#-fct;DE zbex!&1jF%N*N@M^(s8hgnpF}9OGp-k{A;w#frSHearw#H_-pItm`2i`LPljp4co`# zo3=u-xI&}(I$ynwqJVowLP{F<^QVHEnsI?bVfsyGbO%j;#P&UiMVF3#%*^x^0lKoc zp1yuYW>k21T4v_x#{fS+kjUa%EqP2IuG{v!`KKt(cC|}W4Gyt9#pYN=eR>^N4-oZS zug|Ba!ICGiVBiy1EOf6|n5=}abAI#Y|9TIDYgWRq5#$H@p9$SYp6a%t#5dcjNjyIx7>lGl?h9^ibr`M? z78YVU51#Z=OhqN4dA^|3wKsnKUCEu}gozc&Jb&dxW`9!OdA&2_RAXZ!Y+`&* zlij-K@Qu(yh@08@Q~tcs<#o5t_*q0n zeDotwV~4>)DMBJ5GN+A#4DgB!`%#>-Z=@gfYgO!EiHeiljQNQutQ z&deY?7J9-_q@*_q$FQ zpZ;_O!A?d+$7)RL$r#-UNN)A|aH1{wis%bfJkx#ELt;1a3S*u zEKw1W^xRy$Am9djdft=l2TgFN+UZswWz6A1_H{chkPjh{m@lu;D(LXU!J-Uj=W4$3 z?3cc^uIjS5E^pO9huL?mVT-2waW5w*$Vc?(&h*yw*v7ugbwY(|_rj#3 zmi6>kOd_UlY{5}cTwvK8c;Sc(Xi@e_>;7GS4^PihK7J04^2H+8bqc8%@LrS&5MP-3 zRF8pt13_suXxsOBQ35wwvbz8QGa3mgeBsb>eQj;Qi^@n*5iHnH$1d?67#J{$!aEVq zIn(6*bk1Kk$Ud;1RW7uU#bA4S>1tjk#%(9MIdu8!%AA^p8rGnxzJC7f9sEVL zmbs*)zc&jjD_Awq)YRlFVq|0leDn_`MIg8U)LihE6!QjeEK}LExR}EC+o5j-DU8va$k#K()ZR2XAHq>hcrDckZ((!8d*B>7(na9xeeX zL)D1QFCkY7nsO`FT${E|6PV^Hefw4p4i)W-g`mVXEcCCfV+Qi@=&arGv90p1J{o5A ziRHo|JzMWLPT1Ty$$z(U_;tK89As%gz-D1#0jy+pKAyA#^z{U+LL0B_pEsS7>scOS zBcniemJ28Wrzug$1b9;f@H-ThmBGtVR%a*XcOxWH!Q#(IsIGKYN&Sl#nZ{x~X7xG! zlIP3rdwYA}Ucf3(tc<+8z<_|Q6<_3`X8s3x?->?V)~$=uZL4h?5J3zGv}M2mC`gV5 z&^7>~7C9&)IY|};ObCL47(hTMK?E(5N+>cS5|pS&P9h>XQ)H<7u0s2}dw1CP+;je& zdiH*vuMsM&nrqH6-ZW;;4Zb(FJ9dfoaN;akFrg-f_hp^M6?sPEm%8XV9mlVOx@~;x zwVQV-L$U)O60h^;D{#Odtp!VfcmkFydu%t3UwG=uY3n4x$<#XSU3%6LusR-}9~}br zh{wO2p$3CT|N85{9ElcuoJabZ&;7{#k38ZJS$?@buX)T`Q_`eFpnEv77bbzAGY8ZK3j;`6n*v;{AsWD#{HDEjn@`CGG9|7!$P388oy$4 zfPemt|I7RRxV7OXcbrXHlhv%9y}iA{F&*tu<`$1LQDVRP1McmDf+<>YaI2CcOtmyM zHIW|zxs9Pt=1Zokgry?v)~$;C#o6@fS!id#b_7S=^orO9N0nXz4f2`n?76F7ITmCv z>w?F!>o~)USpz3#-7=cuw|PfwgLZ^Cl)*ve<>f($0pFBB)dT)+=CzpFzb+-Twl&Yw zQY?f@CEX1S9;)mVEY29f%f5Zh--4oai4knPPg)7b)0S+u_rZ( zR#h#CU#9^Vo?TMZ^Or;W3S$Qc2h-Ej*Mwb6c@;r|Gb-%xcmdz$wZgCWrs~AQ7c3E~ ztf+cpIhSLDCGEle`~2D|C4G)DR4I@|0KUEcIC!uC3a#IkE``tpo@Lq7uP^gTr^1E~ zB@R5)_)SQrfa>Tdh*>p0+jW<9#R|Ptn^xSQNDTODBO&F`Edl@?exn54jOu~$LkABk zhls_29E1VOt<_@<;ac0d6Y(n!I3alTM}Q$w+~+27!V>&D4yDnG0}ud`$z+65jJaef zDJdZs30&bLFSw1$y6oXA+#qTD_3lx@G=wFn$7zxE^yDD+5jtDY$#@C_I3naeeEM|X z-~aqbsW`16lqnt?J5!rj!91#a_Gzi!C&Y<uH#>9tUbHg1|JmisS>v9erf6* zV_A0pR8E3>7IgXx7VBycwrJ}y8g`B=<+MEeX)Y3$GS>AXCUY~BBy`fMvqb{5Mq$|7 z5VRn*!52KSoRpN70yx}*x7WAZOL4F83=7cawa;h*d8VIgbH>6VwzxeQ9}yRt@TKQ4 z5DPFjLV<%0wWf{DAo3;Pr-Im6A(BcpgRxOXW=O%-*4E9<4es)O)g;)a+{6UbxWe%3?W`5o(pVpCGaZrA9=#n@ex z348o_Ag1s92Sw5g%=G>S*IV)^l0bmD2;0m*mV1BR4)-dcCl% z4si&OG}05W5Ugk!&z_ke1b~Ec0JUHZs8xh5`FP23hdzAhjIV-H9_`n2{Tfta59GLF z#n*1w&}*9p>8G4U?QU6^1U)Io^9VK=89#{LAc2}A64G3nF=$>dbr|o)vO@K#AEyR?NCv^)QUrKa-r*K;HVfB zbGVSln1_Uouh7>0+WY}%Hzp>Wl`2PH$D$%1Np$m#GSy2^S22&i6Eqv5xrgUiUp<+T zK3A2K&$szj$nNts#uV4Vxr~-Ivj+mif{w*$ry76E3eB(=GSG+)Kld>DF@t-b!qscn zw0)EK?d4k^N13kCa}Jlwn%RS*DC|p3fSdN*ieY^QU0+mG^k@M8lHY!F=}+rGoknLb zF|sQT5}?o@ESwr#BdAmRk%_xMx_GNjnSbo+=7bt;b#*@ZWBX#L!9hWFcb_RnMnxU0 zOrl(R{-Dkxd)8OF#cgEyY)pf0j69r&KDt*<*{UWq^RBj7jhzsCs9V89WfdIv@NJ`6 zv79wWg;Zsxbog)`vIx93$T$xoA_5ZKWtq7UbWT)+AHdrDl!L89T>&s0QWZ|=>I#r} z{Gy*fe`oNrlcuYH6a>Pp?bgU*oU~Tjk~T$yGQ$P`Z}{i<+ZWCv>*I{T2-IWj2fSUaVhOS~XC>+m*}KG&VM3 z{W50C8JXAstTXyjg7z5i@LmN42&}Wvkn(NasvIn`T~yQozSliQB^sH|FUvc8e0|-Z zrY*Pl9fuaa*X=uY2nQXLbs7D2kc!N;IGwcNu#5ew99wqmc;9OCNRxTX+EkzTS@X&z zndn6Rt*xl)>?5C5blKPUjisgzLjrX(+gf_(i|YuuCGJRMjlKoSe-N3}m5}Jf@?GBn4DLiQ`*{3Q8HPIdG4rKf*F7sw zz9x;tEK84HUBBH}##1|Ap7p*;KCOKykmd36mv@+0z`wqI3qHr#_w9}u^m6!H_o<$@ z{J<MY0xlb_IpXBvjT@Z{FO$&tZ4$zm#F^9xYR|b4dBAs5q`v zsi2^GzqS@(QV_H8sOs~cQxSS0&HPOD_s+1@_wf%+&pKx40>My)#N>Ap3L#@-?32hU zc@Pkw#s9xpg(qgX%5Wjx;fyW%+N}0Nv1;a!(4%#%OhQnHQYG@_ z$mA;4g9i^%sohTMv}_rvtx+e3Js_HU>OMPD*{4oFDjxY_G}x< z_FQ9C9`3j$);C&WQJ?g#%RWV}xy+p@^s!m7+h`-Lp)W;)9~EwJ#5PEiND^3u zp}1t}(h+AFHS=RVobkOi44+VG!L(jmw#hGv-0ab^8k(vvr-IHcFI+E@!{}dfFT-BR zT=GJ7(nao(xwTwt)O9pc$0i^63F^d})=vfMPmMRr21ngq>)_d}ENv&TVU4c+2jo0p za}a{C$R~feOnLxY(XmGoL&4_p7hpet^=%+yK#uKsd^{k4VQekHrnCi}(sXci0S5Ap zSjHvRo6=wv^X!?%O9P0G6_L_|%CA(sg?YlE#oLYq3jqOc>=T!NT7b%8){TMqC=SKR zHeQAV6kXVRr9;bbMgEvt#(ya6}A-OHQ=t-Mf{aJV-ET@&A93o#@x6ZWx!*N z9xVAWv9TLO%|1c2ru>Y`J?BnzIiCZIUl@new=}v~R9{Pq*fx+k;>#0FmppOr-RRlG z`|6{8IfipjoNldiicA8M^7TH4Qe{->BW@_;-^tS~U7Xq`rlUe5#>?fK^z;Wp6K!r5 zca==$H9p;`woWv@@&uVo|9VeFjZD7HEAIv@pg2Ha8o>)Zz^u&U(~KqtTgMyi%Axqk zd-Dc|QbI<5dfP~%7r$PnbH(F*$c9EpFd|I$AmWk{ zrKN3^b`F_Us3+|uHV_F2=m2|+{1W;S%jL|?%v>PhSA5$x^9jTWnt!i-x^UhClWEbJ&*Lj_!TXEP8?`mPu4_TCx4 z(jNjdUKA1jhj86*#rS%E_{?p+`T?E+dOa<#>h4r^$hY@!p1!sx~{Lo_y!5W(eO7 z+;G%Fv}pvDT55z)P1O8%yok&G>ea`N5v(ob)>)vo##acKt~o-{$2X^IxOA)Y3-wn0qSWnGBVTc=LdS%k5S}cZ^n|Kbo#GAgp*@JJE-n(}%XSzS5kmuqth4{?j_vn)zRor)L zAq*oMHe7n6B5+8ne`JZ)H=3vt7vkL70~rIa9BB=%Z{EFU6(6~7guKs*0xoN>B79TU zsjWA#+^rxFrnru2T3N-*al3w0R?$jn8DQk`&fNi?4QrHN;YdTv+!zO~*iaZ3(W~hC z5_Z6}$l;(|_!d;%l+07V=IumyneGl$V__k0R zSG`iv07agCc`_1lf^H)!cf!Kv5C`3J^HZNAS zK5%(PL+n0=hxV6BCQ^yj@~C{-Mb3IFn!1`Aq$(k$MqmQA9YN0uca7c+)cIVFL@o9E;c0Q%x`#`CYXxFAUc?G zx8dOH%B*g*M2M?0Dun>zb()!ZZi-yZivG?WEJ>^LjKUQ+Z%y;bH$eYDjgMckbZKQ? zT49?|&@ohJdlU=S*VgP(K6mbEUaQyb+p=c_nL?<+qh}1SbF%$+U^NAxV&FSEDf6#!bXR_^O6_-XYSGR)u!0=*f@{$vaDSBec-}ROlLUE#Veat9B$yc0&z{c$GnKpS)*=<-Fscm?m&Y4cw+lE zfsSH*<{|W%@BfxU-=nzwq3}DQb06PITT{*t`~uc8!?Axi^~H)Li2I4M3-O`T*hn8K zM|)iYYZIq}Iyz^%X`ns}He!4+T|!2j_1bQ4&5}{yb>d`sXUr_`v4KA$kXW<`ZcfQ& zwnO_TSMF&(pzy6EmC?MI zXU=9sK9i1JzUf17@V=Vm-1f7_*(SeI8sv(~&BrFE^!J!dSX&7h@*hMm1u`c`0JSjb zm1Cb)U~#*R1jPMTVj-wo9ROT`BEq?5fsGvC0Kx`CfUs@})H$K5+B)RS18VE~_3Nl_ zI=+1q6Bmb1l?J*JrrIRc)!_TW%{r(jd6_pv(r$~5wCwBJex(8zZ$ic2bzTcpW#zH0 zCjlG&9!KYuFezkQ;wPOu2zjJVRH|y{SkLHWP z4<1M5rUpt69XWb?z0bxHqY+dCL_q`OJ}PP}+d9CtR4R;FHM&lrUg({==B5YSyPrq< z=1?Oe4YAiEQ>(jyHh^d7ZQd7mgv(Z`>lNPPr&rZ%bQRxw$mHb zxf+Ke#h(+PR;FG;snOENeddOS26ty))25x=Q%1_px0L&eOA9w}F?HfrtDMp~#DGR0(9|j0!lBxN^YjR+F(U=?I(!!b3x0DBfsu0O_>11rqSx z*2ZjQ3O!hUUs+JHA34mZ-2t&op7{+`ZAnQ8o(;Y!(>;;lApe39^4|M5w(~XBQ*OW38ZQ9`aPs6P5^6|<`hl7u zty;ex@wpeZ`g#G0qO3*##wBE+p#AANpYgm9Z)S5|^HNJ%1MG3|E0!81whX9qz(p>P ztUiiYd`!&F?b|;DZd=^xojt3kPaB)!@}zCJ>Fw)wsjPR(Wgu?Tp+g51Zx)4~9hfYp zaEgkF-SE^lDhcA{Fs!w4lcP1Dl<6%fI7lWVMY(=-)SeBArH_z1jKdff$psdCQc^Cc zqzNY4(C{m$43sISou7YYuBBh!G6a)W+^N*$G|KoKDNhdMkP21CK|7wO{e1LKP%A@J zXfkPPDsL03%EPI>5lw<5fDuXt(aP66=cbahOr)jd_}9A3eopB}45$SVbF6a|SuLoN zkyB5c)+zm?&fxM!(^5)3nRH&xuLqSI3N|~v)Zj<6jm{GwyNkrQv&`0e&w))w71dUn z_1PT0Ney1**I?6QUpXiFI5Sg5N=k}E+ON>v(*vAeI-NwUI|=u7nZtD(*jog)&S^X- z?xA-dvJlh$TrN*@9(yhy$SrxPX5wZ+z_?E(AZtXLF6MFm9T29W@VdDZJBcUQ`}vkm zk*l65#iNnicm6o6c=M2ZUUwtW!G^p7JpL-c`4c-^J98ptL$Q{BeLv-ZKv0NNPyooaaF$xR^NNQQ(MP6~@rl zm=L1_Xv_MQ|HzL1`F@k$IT{?=sbfXcW^6$80vme`XBPA2E5cB8kk1|R_VI~RGgnZ! z4ml2z{M4XBxqEljx;54o-;qhaO`Gh8u2|2yxoc@@Ay_ZU6j09*a56PL0=sr4BqZeg zF8Tk~`E(BFZNa@|MZ>1B?LF8w0)2zu9ON9h0*DBYI&AStXI_h(<2Nk%9MR;3ltxjh zUSHeWZ~g%v2bBu_5S1ES`VELDFb&>}nd4_#b!mPxQ!xpn35;giK)IyPZED{JQ1%H~ z0AS{^SC&U>(0}1G{VN0}t2KM}_FKNqnMqKJ zLBPGVoqPqEM^1~p+n2BLrlvjB@k1_VaV*8WavK>wTJ7}ql3ck9+8F-t9AOAz$T0yUu{wxex`0L*|R zZQ1fPDQWa3Uzmx7#pvWENS4yue5C;{ejRd#w__RwaV%&|Lt*qa2%OY+s3P;-A5UvKFx zeD$AW369}!DM_h(5rSRo=aHWCv*T6DnF%cA7KJZ(y{jSX+j z^)wEU4BgXDNwIlHIU(hwNALAkk)NdG*61?E=?UXqHIN~hFP*zs+~a<+J-ch1OY}8` z2>nOoXemWrU%{70b_WUh%vrd=3SWsVc+^PuROFshg8nN`bev31w(t7L7v@aDmWMqZ zydUyCIQ{X*r9pf6goMtcA%Z7R7mgkJ&C2cQX?c zWS0R{vXD%J6N_~B2Xjc&S^z?kE$c%N=UA*b_q}kKZ{51py9lYjdM8eN8FCg(`+671 zVpVmuo@3p)KwZbWp@lkblkI@@xFl>&0~n45R8>i%(`aR^`OI)2)Y= zENR<%tdU-4%$v#RA6I1Dd8)MaWDjS5kiB(>pzf`;)utxvjF#rKFg)dFHuCXBV=xwv zUk5E`*0NXy>vqaBfXm^893|oD5biid)p&%Aph|Irli3}*LSixnAXVGpiJ>;+eiA6~ zmZJesDJHw^otn4^Dmc>JqzObLfy#lhQc0D;hq^P(+Vyrg0rBfIcGk>w`)KF?jBKp>{BxUIS69f~Q1;y9{}z4N=uo5U zCo}AYhNYn|I(4_0m?KnLgn(l55_lmDT6^I(6X7F>Y#TpZN0<8if z`1@fy7x~Q8xxU^-`RKB6__<{0IiV$M9*stCQuKfi%8FK)?w1&1m zK;g}a1u1b04=iG}$QZ-Dlm*=ujNd6e#2J61LF0eI9Q1%I%YHTrWZ=<~G+g7Blsf>x71j_9rrZAo$;%!Gs7 zEN+aUZwoAOAN3V#^SvKn798;CZiH!lQcxns7syX`q$mwoZE$Z%IypKw8QlQ!sU4@_ zdC5>o_lFtw`8%EzIA~?+4IuJL!4TN+)_YyZ45PbbVUnObDhKSS8Gl1B&7h0Jhov4_{~PI%jP0koh&)@FGc%JYn~^i&ezw4;NIQX1%1If_ z7-DyFpQ{TPZ<&>k-r+r$b6e@QK;aFeMiKtDtp{${6@a=`anYF!|4fnjmH2uj%-1D` zYB*oIThkL46LXI@>M@yIimZ#npGc@8uq3wYnK_(mnWGHU>Zx}qA68oJpI)z}ed(%K z@1#M^5@V3ylB|K=MQfy;IuE&wWjhxZGR!P2IQ+Y*I}R+O+v%LoUkwU3KFXBGk1)rp z>zw!aJ1q_e2E#_Qh!bz=wPqRUP-uR^r`p)OsIwTGo<&@1M~AVIQ8)Vs2L}fnnmE;? zO*YP5UCKeuiU2^>Q*kTAR$XqyZ)UmcVx7Jnc0BGz)m{u|$#IW66&1ztsYm2(&*jW* zQS{_22@W^aqVdQUDu)?-N-ZnFicB-e_eO}nSJ8_zc~hfZn@9w9kE#px*FM|H#|#?o(L(7EodWfI1eCBuB$`%}`#q3(k{zW33N$FnJ$5gqP@VUlwH!@W%SR+>m zm5ZKxzj1M?c3EH!lOTAlVz809g#HE!_<^P05vx<~1=7*y*h9!2uSI;Rk66QeOZE41 z*#HJdrl!`KB_C8!@bZc>#S)}@ctB(K?V|2gPM&D_B`ojvUz`AdsxFL-P}}%A4>Iwvc|A=gHb& zAx0)EZr-&^MFA?CQ)Eo(JxQ*z5G%~jN5*gHX#H=2#X{%q!Z&l%sadYAd5Q1#dyajL zXHK2rHbEOe$T_Vs&;?5x0$sLA#Or8L05H3aXS5_=WO5IeE6O%+6m!Td}j6vvjuH z->E6II?L0mB%cgDcR!dsFf z;@PuCeIK5nuHgOJA^Yld>yxE{mquGc{O$*=7v((gu=E=y>S1(Q762Onr6ggiMhP+* zLR=_frPv@~tehQhVSGBN!tG9mhuDj46Ve<%(X3nG>`_sGvW8D3EE7pZdRZ8w>#&j%k-iEggdD!n5UcdA2?`-sqVR6m&^WA_O*28xThTwO3q7>Ns`>i046@Dti1i<7loX3Ht5CxpTO)Uj8GcysTr87^}X5)EM zXDJQcT-|hXBWbv5O$HPr87V0f$6Gx@Ga?aWcbXxN0*v$}K8`-S@5P^-6hmQl)GjU7k36BWun7KxRkmQe9E8+I2cB z+p3Q)zQS5^)oSk9iAQyGdb9Q+U*B8atNf)4n@0lW1#l)WW2Kcm+#|oO=tb+yB`X?` z?>c-RR4NgwgdkJW-Jl>zfMDpypt4_7t88yS3f;3lkcFb6B4{8$(7zr$G4CN->BZ1B z;r{(EoN&Jn(D7Q3C`n%Kgv4@Z} z)33u~Q?}`!+AV_LZ98$Y<%br4c&?v>CZ4KwJSf%vO=^DX>E7wq`B#DV^Iuu!rCXes zC2W~|wD{~mfC1Vo5S!~Dbm(>0Tvyc$X|DDhCgCep&EOT2VcHefNE~-dCEKPYP_^r=s2SDDJZ+D|Ki^?;}%* zIG~}E#KNmtkJ(r08@w@~ajxyTKYy~%(Q;V6+#_uk`FsU%L219+_5c9vvM zuKSO-RYYLt&ZCt{P_m%4y9PTOmFnWB#?aUVuxwXcDQbip_wBs_3 zqP4#U7AG6zGpq-c7d)Z%GyL2KN(^5e8#~6iXxa5&e)i&DrMB<*iKm-QcWSXw$sFgm z|GxUx-le4KpZ(3kFME%9ZX>0d|NiXUQWELa^%tB9`~Nt7_l|?`y*tOg&GG6Sn_TdQ z<({H*4o_!Es9pJN7#HP|Oh;Hoky-K%ZvyKimS;$Odh4al5kQ70@dJx#CC$G>JZS?$`n6ACN~+}>_( zbA*rRV*TjI`pGVh6KJhANaT!MtvER| zQl2akr=-wBpMnX1($hNHRS8GXlt-6)U*$l+)HTo$_qM9ghlc8gOTX>fqb4t8mFe{; zKHhCA$araN^H$uz5(jT}P+d7CCAyyS20nF5YaXOcfD<|Lb_2q%N89kbk5^Pu2C^8U^ zOq>E3EPf7yW}iOAQKg~)hZBg6BuI8~Z$lu9v32g)u!$M>J@FX|Z>?w)S2}!A#&GcB z>lF-)Nl%};x;m^S9QlxFf(LqfdTi4Olpa-h~ zKnxY@?nlp4Ts>EiR;^lZwtH=aHjR@lu}()-btAk_z4D_i*UWQf-gMb7`el)ep?&|0 zhCUeV!=#NcZt|@>R=m_!4$h)TZ{)~SUgF70tTlqF`hB{g2MbU_~ z$oktv#SiH-ha$w*GA^Sgu-lb&VLk^oI(+le8#jgnx5k13Kpp%1IfPeOsq3LdhCG(A zf4~9-^*1!zIyU?eUhiIin`Db2rTCsxOg`*afEqL%Ooj@B8Hn&W+jRZ4 zh@984aV^!uiPfOeJ@d|ph3sl3pLlP*RmDXr%2KoVmZ5e ztsW386k$sp>5%^RVtGUNts+bRL^xt_f$@KcmnTju&`y|c*qCAoKD|lM*8S3@OWpGN zGvx$%!crPwoWj2JwneLr1j@Pg=fY`Kja6 zW{G5z@;$}_Wwz0GS8z+ahNh`1oFZf7YMQr@E|0rAV`5?g7{;k-;Du`1+CBDC7`C~c zgdK8?Bn@;2)>U=NPJPUzMUDzaQhQ;KgpKLA0&uLBC?zg=d0;^^h(*V0APjiek&K;e z5n7KsbY_f}lQmK%T|W7|d6K){A_Y2nj0^7W?glK=e^nRGNEkO8?JJcC*bGYyU-(uM zRCo|FodeIQ5DBv-d=lB5;qF%a9m~|FRn7s&3B1U(Yd>Ccp;yxcx)mLrUeIYG>yeHS zu?=EEoXGVG_Gq6m8@gOzx7?uwIoImBj-!1ep$BdVx;eD2!2s<3hJ?qjUEbLZtaEG8 zw$2tBdVZcX+!Oirt#p58@IGBQ{XcmuU4!v|o%ZR--zRJaR~GX;6h4R1b%5^IND7IF z5OM$DY8<6PK{1ufjNEd}1b>Kkz?iDZS+*$e?M-$?L(r>u&y`zWf)N`^l)eBO(U(`g zAG)QU9_cf0JSZ&kIeECKST{&I=%!C0J5~S1PS3m{uvc;E>9Bah=#4wh=MO9gFppaV z#84EvVq|Ikn_&u9aosi85=w5Z+XHeviQG;slEI_lk8*yV{1RJz^^RB3(m8u!@j|m? z zPkz>y$4|nnh(JJ*fzQH?!xKINGKET^CkG=QWT$%*l{8oZyE$CdH2GZr?VV<)%f4@3 zk8gCYvD&noD5owLUiiUV#Q)GvHhvFsc`fi=O!=Q5EQfZ|!Qkw-Z&Ba1lg{hEX3g7} zbS`Qj=#Y3y?e9QTs>i&){IYhuq4Z0$z`|nUpIIKKo#kZi;4HN7i7hruzh~*ublxRa ztn}{-5e{Kits;k(&{kM_V??vpN zKL}U^P{{spc&SmP942^kU466u3xUw#DsE*xWOC@kyS75V#lQSAF+J_$MWuoTBuh$y z2`ji`Q?AE!NL?=Y`tDcV`yck)EE$uJ+-XdNC`zl7?AcH8{ z=MP)kSIW^CG+NVTfAEPA>^0)z<5xQZ9t6nIJ7L9m?kbC6XLiq3C@mK44c3-A(+{G5 zv)W3SXl&Dh=bh-UpI+FyU)o!F(J#MX9)x)hjZ9WjR#qd6Zr^?wAm-u)3l`wziq%}^ zx^g#e6wq$6v1!rk>xmzr(L1XOH+hKOgg9)RYnOhuPR2Z8VN|wfX&OAN?+l4gvsw|m z02@r{3XyIgEI9P_^`VYhxzg5+ReZcV>tf+pnQq3-qW;hZUM`0H(DRcFJ&Bwt`{Zt0 zS&45q#VpVEaH;Sx47=@iUBBVMUH(pUs=(ac^?V9k@|n|R?yS8h$qu!H{#x?4`9(#O zaXf-}*|yCDXD#G_Q%JHrMb5%hW9*)SB=mqVve4=*q(ZGeJyXR|4D;goG#w}fK}tYO zjX9TAGq0GurZMs|xDx&7hTmq+_!L@mFKrulGQG}8&pZ?!oV zBBrFkxl0d%A7Y$@XPyw0?;uankrVSZ2!-k@c6OZKII5;#B)A3V$u(BFhq4z_^b)M^ zUS+<^{*hzXtGn!tjn4|mN8=G0iG{SqjP1NssJ!@x^+sZgJ+FV8asK+cS+;(Mm9}Dj zG;i#)q`D1`8M!t$r5G9RExE228FH3XwOcT|fPF8j5IowF?V8MrLtwqUTV}*IOn8R@ z2>;oQ6UEA^AEQ+v+uj<5q8##t-pY867fNU^iA%@=R>9 zD!HoF2A~$DC-TsM+(lrNfmze6PY(JN@eSk{ukf2_>nwcb)ThH}4`w7hk1%1xKaYDZ zFXmiS@pj_@S>Z=JzqPg5<+i<7S*Sn!JMl`7>xN4l4cuA<;P8Szd`4tpTxA!^_-@#2 z?qgIi)x|$htEn+@Jif1C$4<)+LPCU6^UMmrmHw_7iDjni)~+>__1{IxaI&cJee0Bd zewwBYTt^`?De~>6Yw7DztmB*oD&m;oirFgoPh?4Z`TFWX{^JG9(I|k4TYi3P*zUcH zY+_=~d-rs_G!6&g1s58$@E=Sd8upGbG&J12Ws52M2S^>U_TXw?)3s4X_{4{1s?@O| zR(YZ7lWpn3IHegtx4_45BEc|2)Re+d>|^@7`(WE4l7LUwj<;ueHp^H`pLHnD=V>Lc~`JHz*-rSON)>GY~MAOtaB z2)*^x>*K87_r-(X-XWB$;Lc#`ox#d29xFT#NE@I$9G+ov6D&C#JiZm{2P%HGj(Wsg z)-{~vz4K?pYf--+XTx6(iyuFDy3g@zwz5S@W88KT?eo zn5w(IXNL{13%;MsZMVgs?Wu{0XVe`~-N9#rpooYF5`#+p{bH&gdo%JP;so(FkZI6_uBZ2YLTVR}3Rn40tNSrJ#Hzyz`2iB_%bZE0KwS zBKFA#%-0DB3AvbSr?Q$e%GXuKp|{Pi$-O0#uP_q-WnSp0vby>@idY8ZBj`k3;S@!Z z0cjZhJ*A}t)&G#6yTxcX^AiUa?n$ihjkt$Orn$Mfe#+8vSd&}@0c2H-x;b%HK{TYB zazb}+kz{npCelaF%&ny7XL%_zGd|&GYM|Dy?9xG#3wa|a8=I3XDRKON@?~(|uqE~VI$NFvFUwV4XX$=d?d@4FZ zL}z!6%MG4};)En@`SCg*W2Pj^fpPwnx=Q4b!Q^;>`lf>C>=8%)>JcsgyJ%YT>|%?SsW>l=M>3|n*J>r9{}7E;jQBg^Vi(3tA zkZU$M)Vl@-+c8fdvy?@u1XD30efJxyINj45uK(%JA_nHP?u zsCdGC%aV5&>EM$OAG>cfK6)b0BX`QS`&9i%Wn~yekdz)t0>){`UJw0Fo6Wa!Y}xeio09scj`CCnrO?1`h!VV?Iu*s-D-JH8eH`O7{g`UZf{!9QgEn%XiJb&fP+2_Q7KN z_+({f-a7cCxcvmj7N}oPR)%=R$Hta`GYi<3@%&=B-oP$xm+8=)c==Tv8)n8$PsUI; zI5kO=b^|}@cc1v+o+T0!6;)7PE@E20>{{;GyfhcF5KA=s_&xyZRBK!JRYv=H;iSEd zdj&p_D_5>qjj?aoAcG_}>?h*8ciW)agSd|VL$q&G$l%n@2Gz1>)lL-$ELhCwe&u2!K^%?Q zuBQ*^KYjf8Gv<@2K!-X$E)UeP1&{@yXI$?3Au&#y5@ch3?FV%sgM_hvIWsGYyZaRM z!jz-^q|Ec;ij}5oE~A}9GMKx&JKJ+A^*6XTy1j{;`SVnJ>x1(DeyBFN&!siTFlG+r zzGG*-cG!C5-=w80#PTy1&2YNE`oGE8F>iuFEcAKcX(o zzeipC%hBZHsc+_>t4Wi?VMn|r77-pjGv<+VS^J+4NHlpuytd4`>GRZq^-Kxd-?WhG z!~7x&h9d}ZBf z66=!6ffUSb{4YnT{o&ldw0ICDqs6;&rZp^Ueh}2{clX$)Cj8{VcO%h)mzNhDJ<=@@ zbAh{rnKre$Zb$vKKqdw8Sm*-({xzXRc4;~Ys3ZhMkCdLxGNdn|PmiJyICLaknLs7^_{2xa z)$zI*U&A%U?2Ri*E`Y=+HG+AH1cQ3SHdF~jW`)LRoeXgdJje3Uh;vZKd=xUKIC zAULAEiC@rHByVZdvZ%g_R0zbHrEx|l-!#e6gLq-wPWBNJ)^a@$4PcF!e1TVsh4~ss7bV5&0kOby~;AItw_%0A1&g~D> zlLBymK&?X4ih~>r9}9?P5I#3}m_{l&2L4Hez@QYlSp@e128wd72C)7>6k9X86?pT~ z-5GNT9}V=X?Hfx>m_zUgQb&Zu#H!nEk-w<_;tiOaGl(fgyGcxlLKd44X1{8W?Qmk zx#-$uAL6uqyy`6CpFLaY4?7_5)Ie^szFVuk)PkQE$O=n2;d9_}l!r>IPp{|T*i1MP zF^=m5DhRyJ>8>^q(oMDfcrHoK)@p1VQ7r{VtVC+a=#2dlQL1>VA+>wy^QSMXAv!%9D zq4dz2s;ceYAECWO5rdE7im|=`YJo4uS%a|XgjO5VL(!gwn;r*K{;A06-U6M!$oc`U3yf%9*JrNDbvu$ef)bsK(@q}t^bO4} zYutt{IN<($o&1M_emOb7^=G0fZd6zBxUhNv?!x8c@Q2sY=TQSzExu0kh6f3niCYB) z%gW24u(7qpXx?okxM!^^qi$0u`e=;)_~Q=)gD*g6oZ+xdl$LaxamH9Qx9NtKW())F zn6YQW=W0Rzab7DTjq#D|PxOssAUCU?cMU$(om?wz7M4{GhZXjnVu?5AL52ZBNY|`E z<+twk!NB6D+m1kHiRc0`HeJkq3=Hh}M*XbkIu$!x9$4QY?YT^Mru)qL*BaKD=hE- z8b@oEa+^s-g@Ik|`jURRpPHcw#`hT==5j;h6W1sHv)0-Da}S_ zQ5x)9FfFdb0RIoc8`34sQ;9O67X|fupkE)Kbt=pdnTOw<5Zc?CfkU z33wV@vgGi6a%||%#q2LUDvD$Xng4G7Ogjzd$>;kmA@fH@F%a{09X&{psXDd~%Q70~ zxjAzUjamfL^xRA*{*WldevMlqeCQWZg&Z5ME6l6`nX)}UL?ET#MIe6#9{$9jP4950 zC9ret5f^uu&uT4r@{H=D$)~}*7RygD@p6{ZA2R>{;NU+%g5n1jd}@8e&dk?l5pnu1 z^Z)Bh2JB?Y{9R8PeMiW*wN_tIf}H=x{_EMc*A|Ug14YNh$o!6{nC$(Y-1;{a+7HzK zT;qVwd=*&mwe{D&ZBiFNLtVbiLX@zo5|{aR?|y8T_dO!LO@4l0Z5Hw)<0LB^8x5{@{5482~Y5AoWFP z{cTX9SRIzu>3gpW?i6qpq<6VzPTj~oz0!Y^Cu`Phfq8j!)+Ob{2i7^OO z^kOwgSh<RnEg{e_8nt^D@jdF5s zNO1>8?YMvcRj&d=(Z}d!_Y02S zK7Go{NR$zNR=k>>y)xaw-15`<=*pmwkeK`?ploQ*af;n60xX`~SB(P<`007b#}lVN z^w6>6*R1~poiD0|wL%8hp*ZNFLm*T?1kywx0L3q|Kr_xXOl}G7gZ>Zo;uuIHj;Fvx zh%{n^o@hY#Ur&a_cNrDg|A51eqLIhJ<&=)2a8V<~K?)O$2CF%jM3 z;_4s0$l(Ftj$_Ae1?*?VlxTfjD^F8M3LnJ1y8h62t*2e zB*;VUHZD2&>Fk zIpUuFy#8B|F+NXIU7bdQ>apT-?}%qqs-iiwK6B0Gc~kQ3^PP%EynY*MGY)CzTgE(_ z7`?XofmleMRZZB3EH6fXi<3=DbB;;~b#aUM9bhIDsp8GfZFLJU zA_2x;9Okba1aQ9)h$$ZI4&@6Z*py*|myakq38^tD)_7hv=M zcz2cGviF;Q!zk14?s?}mWVp$tG;tIi&?f`u3Y{GthS=G6gXFN?EJ(XND;i=)yo(TS z3t|59MF$XI1j?O=#Kj*xMS2wJgtj)J{X$eArH4AVANn!17_tH;6QK2d4=K?h1>Ami zNJGQEe6t8?eT$c`|LQ+)5v^hI%lh>G-wP|JvYmltY&`3pciK0xyO`=PW?h&5HQ9sl zPNOQ(I_J&LJ#_;2Jex-P?}}%+UP^Zy2~dMHF0oXq4LMhE+6NkK9HIsQr`TiHVIJnW_Z)0#?E%`TOC_Kd&(g!h z-Qm$n6(6s>R%d+S=u!{h2w56%$~_z#1UBh-NZyN!dr*c|rN{!dq9Az-cxY0fmT4N3 zG5MXu&^GDY8k79GX2XSC%8?P-mkX(MNwnWVPd43o-q#lg7jnWpUdsvmsoxIh?Kem?RTli5K`^L932l zLojP-?L1sWJf8@UBDtzqDwl}^iX9gve#RoA{m%uD`m0cll^5?YtB`p4A2n7$ar5_d z*dF%pMwkC68h$`ee@Yzw@`Ghcy2R@S`wvqa9T=f{8ZQ$vr<~jLWs5p)c)jLYTv)zr z__kwoR#Sg%P;y3WQc1tf|5MnNMKyIrQLt7mRw^P!1lk&^MGz@SWeA`MQ7aPA5JFTy z7N|@ifQ(YcRKOrms(@t{ZDSIF8XyqJpaKRFDFH+hAk0%j2r`8sNN>_EfBfj*`FZcH zd){5=?sN9O=a`0JA=l^Tx2t}^qF+MaeO2LSpxj;(Wdu5lDYfZUw7Ab@bvAr~%A+pUT2| zY91gu-6iYF6+RqA>w}KV~#+}IIF*r=^tiYJw10PADvAZQ1n(4}Kd9&~ zhh)w4*EQB;!ekAZ@5LiF4xZvr(oLpdEL1by!&*%!2bxeF zIyaE?77uG(SX4p5nft3GWah}7n;SXLt#MfClkV(Xfl-&ypK8zr$3j^6^j^t?C}x(O zq91rn&5uXDCAHhfT6kS`}L_AK*#n~-v?h#7uDmo&< z%Ztl<(_P+sMfK=u`(HMM=o%nJ+;jdOL8aG1y$vAtbc{v9_-)0FxMzm8EN+?IT0k*A zw)Z|mw}q0i7WH|r6U5h`4o0h`u-+HRvdu8b13sFp@8fT!7DTv$AILLWnp*9QWuDY3 z17#P&H0Uzl7xpl9wAh?WvICtYAZkEX3ot7RzQwM^{%e zsC<8uyy_gVpEP$ULw`fl9AR_38O|;kV|LOoA9bGM@6b_M{Dh2Uv}9^q>>DU|r??tQ z#mA$N6Iv36_g{~PL+Mniw5!{4O)3ibVXu7`Siwm23OM0^_l)mk2IS_~hoi#IV*Ge! z19518K{0+Ci-l}`1EY-y5$&ac&WRfUHuAgVfxHBBe;(z6%$+^{x4?@Th1Zb8cbR8j zE?=8r*2~!f=l^F56d^m|C}W4Rqx%cZM4PfByqB*j7OcyFi>gpC4vJWAu50Vw!Rkb5~{;}8hnk---ev7Yd74{!>!6H(yBco<{nsFoN3iqcPSpk4hi^oAkdNxr-u zW%Ql-zjw;*Kkg!D;_)Y02IKsaC I Date: Mon, 5 Oct 2020 15:00:14 +0200 Subject: [PATCH 046/219] (enh): Refacto of splunk connector (#27) * (enh): change host/hostname field * (fix): change log level to avoid to much log * (enh): using neb instead of storage + rename the connector for more consistency * (fix): remove useless lib * (enh): Change the documentation for Splunk * (enh): remove mandatory in the commentary * Update README.md * Update README.md --- stream-connectors/README.md | 116 +++++- .../splunk/splunk-events-http.lua | 318 ++++++++++++++++ .../splunk/splunk-events-luacurl.lua | 336 ++++++++++++++++ .../splunk/splunk-metrics-http.lua | 359 +++++++++++++----- .../splunk/splunk-metrics-luacurl.lua | 310 +++++++++++++++ 5 files changed, 1322 insertions(+), 117 deletions(-) create mode 100755 stream-connectors/splunk/splunk-events-http.lua create mode 100755 stream-connectors/splunk/splunk-events-luacurl.lua mode change 100644 => 100755 stream-connectors/splunk/splunk-metrics-http.lua create mode 100755 stream-connectors/splunk/splunk-metrics-luacurl.lua diff --git a/stream-connectors/README.md b/stream-connectors/README.md index c41a57ddf4b..177fd3577e4 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -152,7 +152,7 @@ Parameters to specify in the stream connector configuration are: # Splunk There are two ways to use our stream connector with Splunk. The first and probably most common way uses Splunk Universal Forwarder. The second -method uses Splunk API. +method uses Splunk HEC (HTTP Event Collector). ## The Splunk Universal Forwarder method @@ -162,33 +162,115 @@ In that case, you're going to use "Centreon4Splunk", it comes with: Thanks to lkco! -## The Splunk API method +## The Splunk HEC method There are two Lua scripts proposed here: -1. *splunk-states-http.lua* that sends states to Splunk. -2. *splunk-metrics-http.lua* that sends metrics to Splunk. -In the first case, follow the instructions below: +* splunk-events-luacurl.lua that sends states to Splunk. +* splunk-metrics-luacurl.lua that sends metrics to Splunk. -* Copy them into the */usr/share/centreon-broker/lua/* -* Add a new broker output of type *stream connector* -* Fill it as shown below +### Splunk Configuration -![alt text](pictures/splunk-conf1.png "stream connector configuration") +An HTTP events collector has be configured in data entries. -In the second case, follow those instructions: +![alt text](pictures/splunk.png "Splunk configuration") -* Copy them into the */usr/share/centreon-broker/lua/* -* Add a new broker output of type *stream connector* -* Fill it as shown below +### Installation -![alt text](pictures/splunk-conf2.png "stream connector configuration") +Login as `root` on the Centreon central server using your favorite SSH client. -## The Splunk configuration +In case your Centreon central server must use a proxy server to reach the Internet, you will have to export the `https_proxy` environment variable and configure `yum` to be able to install everything. -An HTTP events collector has be configured in data entries. +```bash +export https_proxy=http://my.proxy.server:3128 +echo "proxy=http://my.proxy.server:3128" >> /etc/yum.conf +``` -![alt text](pictures/splunk.png "Splunk configuration") +Now that your Centreon central server is able to reach the Internet, you can run: + +```bash +yum install -y lua-curl +``` + +These packages are necessary for the script to run. + +Then copy the `splunk-events-luacurl.lua` and `splunk-metrics-luacurl.lua` scripts to `/usr/share/centreon-broker/lua`. + +### Minimal configuration + +Here are the steps to configure your stream connector for the Events: + +* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. +* Name it as wanted and set the right path: + +| Name | Splunk Events | +| ---- | -------------------------------------------------------- | +| Path | /usr/share/centreon-broker/lua/splunk-events-luacurl.lua | + +* Add at least 3 string parameters containing your Splunk configuration: + +| Type | String | +| ----------------- | --------------------------------------- | +| `http_server_url` | `http://x.x.x.:8088/services/collector` | +| `splunk_token` | `your hec token` | +| `splunk_index` | `your event index` | + +Here are the steps to configure your stream connector for the Metrics: + +* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. +* Name it as wanted and set the right path + +| Name | Splunk Metrics | +| ---- | --------------------------------------------------------- | +| Path | /usr/share/centreon-broker/lua/splunk-metrics-luacurl.lua | + +* Add at least 3 string parameters containing your Splunk configuration: + +| Type | String | +| ----------------- | --------------------------------------- | +| `http_server_url` | `http://x.x.x.:8088/services/collector` | +| `splunk_token` | `your hec token` | +| `splunk_index` | `your metric index` | + +Thats all for now! + +Then save your configuration, export it and restart the broker daemon: + +```bash +systemctl restart cbd +``` + +### Advanced configuration + +#### Splunk Host + +If you want to change the `host` value in the HTTP POST data to identify from which Centreon Plateform the data is sent: + +| Type | String | +| ------------- | ------------ | +| `splunk_host` | `Poller-ABC` | + +#### Proxy + +If your Centreon central server has no direct access to Splunk but needs a proxy server, you will have to add a new string parameter: + +| Type | String | +| ------------------- | ------------------------------- | +| `http_proxy_string` | `http://your.proxy.server:3128` | + +#### Log level / file + +The default value of 2 is fine for initial troubleshooting, but generates a huge amount of logs if you have a lot of hosts. In order to get less log messages, you are should add this parameter: + +| Type | Number | +| ----------- | ------ | +| `log_level` | 1 | + +The default log file is `/var/log/centreon-broker/stream-connector-splunk-*.log`. If it does not suit you, you can set it with the `log_path` parameter: + +| Type | String | +| ---------- | ---------------------------------------------- | +| `log_path` | `/var/log/centreon-broker/my-custom-logfile.log` | # Service Now diff --git a/stream-connectors/splunk/splunk-events-http.lua b/stream-connectors/splunk/splunk-events-http.lua new file mode 100755 index 00000000000..23cb677b5e3 --- /dev/null +++ b/stream-connectors/splunk/splunk-events-http.lua @@ -0,0 +1,318 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with an events index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, Centreon +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Centreon", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + event_type = type, + state = e.state, + hostname = hostname, + service_description = service_description, + output = string.gsub(e.output, "\n", "") + } + + self.events[#self.events + 1] = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_source, + index = self.splunk_index, + host = self.splunk_host, + time = e.ctime, + event = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 2 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-events.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end diff --git a/stream-connectors/splunk/splunk-events-luacurl.lua b/stream-connectors/splunk/splunk-events-luacurl.lua new file mode 100755 index 00000000000..aa867f1519a --- /dev/null +++ b/stream-connectors/splunk/splunk-events-luacurl.lua @@ -0,0 +1,336 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with an events index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_index: index where you want to store the events +-- [OPTIONAL] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Central +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +local function get_hostgroups(host_id) + local hostgroups = broker_cache:get_hostgroups(host_id) + if not hostgroups then + hostgroups = "No hostgroups" + end + return hostgroups +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Central", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + event_type = type, + state = e.state, + state_type = e.state_type, + hostname = hostname, + service_description = ifnil_or_empty(service_description,hostname), + output = string.gsub(e.output, "\n", ""), + hostgroups = get_hostgroups(e.host_id), + acknowledged = e.acknowledged, + acknowledegement_type = e.acknowledgement_type, + check_command = e.check_command, + check_period = e.check_period, + event_handler = e.event_handler, + event_handler_enabled = e.event_handler_enabled, + execution_time = e.execution_time + } + + self.events[#self.events + 1] = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_source, + index = self.splunk_index, + host = self.splunk_host, + time = e.last_check, + event = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-events.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status + if not (e.category == 1 and e.element == 24 or e.element == 14) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + -- Ignore Pending states + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end diff --git a/stream-connectors/splunk/splunk-metrics-http.lua b/stream-connectors/splunk/splunk-metrics-http.lua old mode 100644 new mode 100755 index 0b23ad83223..74ee52c29b4 --- a/stream-connectors/splunk/splunk-metrics-http.lua +++ b/stream-connectors/splunk/splunk-metrics-http.lua @@ -1,130 +1,289 @@ #!/usr/bin/lua -local http = require("socket.http") -local ltn12 = require("ltn12") +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Metrics +-------------------------------------------------------------------------------- -------------------------------------------------------------------------------- --- Classe event_queue +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with a metrics index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz -------------------------------------------------------------------------------- -local event_queue = { - receiver_address = "", - receiver_port = 8088, - receiver_proto = "http", - splunk_sourcename = "", - splunk_sourcetype = "", - splunk_auth_var = "", - events = {}, - buffer_size = 50 -} - --- Constructeur event_queue:new -function event_queue:new(o, conf) - o = o or {} - setmetatable(o, self) - self.__index = self - for i,v in pairs(conf) do - broker_log:info(1, "event_queue:new: getting parameter " .. i .. " => " .. v) - if self[i] and i ~= "events" then - self[i] = v - end - end - return o +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Centreon +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end end --- Méthode event_queue:flush -function event_queue:flush() - broker_log:info(2, "event_queue:flush: Concatenating all the events as one JSON string") - -- we concatenate all the events as a serie of json objects separated by a whitespace - local post_data = "" - for i, json_event in ipairs(self.events) do - post_data = post_data .. json_event - end - broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector\"") - broker_log:info(2, "event_queue:flush: HTTP POST data are: '" .. post_data .. "'") - local hr_result, hr_code, hr_header, hr_s = http.request{ - url = self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector", - method = "POST", - --sink = ltn12.sink.file("/dev/null"), -- sink is where the request result's body will go - headers = { - ["Authorization"] = "Splunk " .. self.splunk_auth_var, -- Splunk HTTP JSON API needs this header field to accept input - ["content-length"] = string.len(post_data) -- mandatory for POST request with body - }, - source = ltn12.source.string(post_data) -- request body needs to be formatted as a LTN12 source - } - if hr_code == 200 then - broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) - else - broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) - end +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end - -- now that the data has been sent, we flush the events array - self.events = {} +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname end --- Méthode event_queue:add -function event_queue:add(e) - local splunk_event_data = {} - local event_data = { - metric = e.name, - value = e.value, - ctime = e.ctime, - host_name = broker_cache:get_hostname(e.host_id), - service_description = broker_cache:get_service_description(e.host_id, e.service_id) - } - if not event_data.host_name then - broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found") - event_data.host_name = e.host_id - end - if not event_data.service_description then - broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found") - event_data.service_description = e.service_id +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Centreon", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) end - splunk_event_data = { - sourcetype = self.splunk_sourcetype, - source = self.splunk_sourcename, - time = e.ctime, - event = event_data - } - local json_splunk_event_data = broker.json_encode(splunk_event_data) - broker_log:info(3, "event_queue:add: Adding event #" .. #self.events) - broker_log:info(3, "event_queue:add: event json: " .. json_splunk_event_data) - self.events[#self.events + 1] = json_splunk_event_data - - if #self.events < self.buffer_size then - return false + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false else - self:flush() - return true + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + service_description = service_description, + hostname = hostname, + ctime = e.ctime + } + + event_data["metric_name:" .. e.name] = e.value + + self.events[#self.events + 1] = { + time = e.ctime, + source = self.splunk_source, + sourcetype = self.splunk_sourcetype, + index = self.splunk_index, + host = self.splunk_host, + fields = event_data + } + + return true + end + -------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end -------------------------------------------------------------------------------- --- Fonctions requises pour Broker StreamConnector +-- Required functions for Broker StreamConnector -------------------------------------------------------------------------------- +local queue + -- Fonction init() function init(conf) - broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector.log") - broker_log:info(2, "init: Beginning init() function") - queue = event_queue:new(nil, conf) - broker_log:info(2, "init: Ending init() function, Event queue created") + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-metrics.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk Metrics StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") end -- Fonction write() function write(e) - broker_log:info(3, "write: Beginning write() function") + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 3 and e.element == 1) then + broker_log:info(3, "write: Not a metric event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + -- + + -- Once all the filters have been passed successfully, we can add the current event to the queue queue:add(e) - broker_log:info(3, "write: Ending write() function") - return true -end --- Fonction filter() -function filter(category, element) - --broker_log:info(3, "category: ".. category .. " - element: " .. element) - if category == 3 and element == 1 then - return true + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() end - return false + broker_log:info(3, "write: Ending function") + + return true end diff --git a/stream-connectors/splunk/splunk-metrics-luacurl.lua b/stream-connectors/splunk/splunk-metrics-luacurl.lua new file mode 100755 index 00000000000..06c80ade68a --- /dev/null +++ b/stream-connectors/splunk/splunk-metrics-luacurl.lua @@ -0,0 +1,310 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Metrics +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with a metrics index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [OPTIONAL] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Central +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "Centreon", + splunk_token = "", + splunk_index = "", + splunk_host = "Central", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + service_description = ifnil_or_empty(service_description,hostname), + hostname = hostname, + ctime = e.last_check + } + + -- Managing perfdata + local metrics = "" + if e.perfdata then + local perf, err_str = broker.parse_perfdata(e.perfdata, true) + if perf then + for key,v in pairs(perf) do + event_data["metric_name:" .. key] = tostring(v.value) + end + end + end + + self.events[#self.events + 1] = { + time = e.last_time, + source = self.splunk_source, + sourcetype = self.splunk_sourcetype, + index = self.splunk_index, + host = self.splunk_host, + fields = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-metrics.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk Metrics StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 14 or e.element == 24)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(2, "write: Raw event: " .. current_event) + -- + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(2, "write: Raw event: " .. current_event) + -- + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end From 71f67c1ce8d975915ccca5ebd988eea378939ddb Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Wed, 7 Oct 2020 09:57:55 +0200 Subject: [PATCH 047/219] enh(pagerduty): various improvements to PagerDuty connector (#36) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Allow source field configuration => if user set a value then use this value, else use hostname like it is now if pdy_source is configured in the output, then the string is used as the source field. If not, the host name is used. * Add hostname in the title “Hostname/Servicedescription:” added at the beginning of the title (“Summary”) * Remove metrics (it will avoid the parsing bug also) Done. * Add all possible dimensions in customer details (*Groups / *Categories) Categories are not available if they are not severities. Severity numbers have been added. --- stream-connectors/pagerduty/pagerduty.lua | 82 ++++++++++++++++++----- 1 file changed, 66 insertions(+), 16 deletions(-) diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/pagerduty/pagerduty.lua index f4091403691..4d25734e1a9 100644 --- a/stream-connectors/pagerduty/pagerduty.lua +++ b/stream-connectors/pagerduty/pagerduty.lua @@ -93,6 +93,7 @@ function EventQueue.new(conf) http_server_url = "https://events.pagerduty.com/v2/enqueue", http_proxy_string = "", http_timeout = 5, + pdy_source = "", pdy_routing_key = "Please fill pdy_routing_key in StreamConnector parameter", pdy_centreon_url = "http://set.pdy_centreon_url.parameter", filter_type = "metric,status", @@ -164,7 +165,7 @@ function EventQueue:add(e) broker_log:info(3, "EventQueue:add: Severity converted from " .. e.state .. " to \"" .. pdy_severity .. "\"") -- handling empty output (empty "summary" cannot be sent to PagerDuty) - local pdy_summary = ifnil_or_empty(string.match(e.output, "^(.*)\n"), 'no output') + local pdy_summary = hostname .. "/" .. service_description .. ": " .. ifnil_or_empty(string.match(e.output, "^(.*)\n"), 'no output') -- basic management of "class" attribute local pdy_class @@ -184,34 +185,83 @@ function EventQueue:add(e) broker_log:info(3, "EventQueue:add: Since severity is \"" .. pdy_severity .. "\", event_action is \"" .. pdy_event_action .. "\"") -- Managing perfdata - local custom_details = {} - if e.perfdata then - broker_log:info(3, "EventQueue:add: Perfdata list: " .. broker.json_encode(e.perfdata) .. " ") - -- Case when the perfdata name is delimited with simple quotes: spaces allowed - for metric_name, metric_value in e.perfdata:gmatch("%s?'(.+)'=(%d+[%a]?);?[%W;]*%s?") do - broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) - custom_details[metric_name] = metric_value + local pdy_custom_details = {} +-- if e.perfdata then +-- broker_log:info(3, "EventQueue:add: Perfdata list: " .. broker.json_encode(e.perfdata) .. " ") +-- -- Case when the perfdata name is delimited with simple quotes: spaces allowed +-- for metric_name, metric_value in e.perfdata:gmatch("%s?'(.+)'=(%d+[%a]?);?[%W;]*%s?") do +-- broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) +-- pdy_custom_details[metric_name] = metric_value +-- end +-- -- Case when the perfdata name is NOT delimited with simple quotes: no spaces allowed +-- for metric_name, metric_value in e.perfdata:gmatch("%s?([^'][%S]+[^'])=(%d+[%a]?);?[%W;]*%s?") do +-- broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) +-- pdy_custom_details[metric_name] = metric_value +-- end +-- end + + -- Hostgroups + local host_hg_array = broker_cache:get_hostgroups(e.host_id) + local pdy_hostgroups = "" + -- case when no filter has been set for hostgroups + for i = 1, #host_hg_array do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. ifnil_or_empty(host_hg_array[i].group_name, "empty host group") + else + pdy_hostgroups = ifnil_or_empty(host_hg_array[i].group_name, "empty host group") + end + end + + -- Servicegroups + if e.service_id then + local service_hg_array = broker_cache:get_servicegroups(e.host_id, e.service_id) + local pdy_servicegroups = "" + -- case when no filter has been set for servicegroups + for i = 1, #service_hg_array do + if pdy_servicegroups ~= "" then + pdy_servicegroups = pdy_servicegroups .. ", " .. ifnil_or_empty(service_hg_array[i].group_name, "empty service group") + else + pdy_servicegroups = ifnil_or_empty(service_hg_array[i].group_name, "empty service group") + end end - -- Case when the perfdata name is NOT delimited with simple quotes: no spaces allowed - for metric_name, metric_value in e.perfdata:gmatch("%s?([^'][%S]+[^'])=(%d+[%a]?);?[%W;]*%s?") do - broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) - custom_details[metric_name] = metric_value + end + + local pdy_custom_details = {} + + local host_severity = broker_cache:get_severity(e.host_id) + if host_severity ~= nil then + pdy_custom_details["Hostseverity"] = host_severity + end + + if e.service_id then + local service_severity = broker_cache:get_severity(e.host_id, e.service_id) + if service_severity ~= nil then + pdy_custom_details["Serviceseverity"] = service_severity end end - -- FIXME: customize usage of "group" + if pdy_hostgroups ~= "" then + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + if pdy_servicegroups ~= "" then + pdy_custom_details["Servicegroups"] = pdy_servicegroups + end + local pdy_source_field = hostname + if self.pdy_source and self.pdy_source ~= "" then + pdy_source_field = self.pdy_source + end -- Appending the current event to the queue self.events[#self.events + 1] = { payload = { summary = pdy_summary, timestamp = pdy_timestamp, severity = pdy_severity, - source = hostname, + source = pdy_source_field, component = service_description, - --group, FIXME: get hostgroup matching a filter for PagerDuty? + group = pdy_hostgroups, class = pdy_class, - custom_details = custom_details + custom_details = pdy_custom_details }, routing_key = self.pdy_routing_key, dedup_key = pdy_dedup_key, From fe9a8f17129cc998cc7848e055dda7a7bd583ba3 Mon Sep 17 00:00:00 2001 From: ppremont-capensis <72454406+ppremont-capensis@users.noreply.github.com> Date: Wed, 14 Oct 2020 11:28:10 +0200 Subject: [PATCH 048/219] Fix hostgroups for services + state change condition (#37) * add hostgroups for service events and fix state change condition * fix service extra hostgroups information --- stream-connectors/README.md | 2 +- stream-connectors/canopsis/bbdo2canopsis.lua | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 177fd3577e4..c17c209a71d 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -530,8 +530,8 @@ Extra informations are added to the host and services as bellow : - action_url - notes_url +- hostgroups - servicegroups (for service events) -- hostgroups (for host events) ### Acknowledgment diff --git a/stream-connectors/canopsis/bbdo2canopsis.lua b/stream-connectors/canopsis/bbdo2canopsis.lua index 59b3065281f..2e516f621ee 100755 --- a/stream-connectors/canopsis/bbdo2canopsis.lua +++ b/stream-connectors/canopsis/bbdo2canopsis.lua @@ -297,7 +297,8 @@ local function canopsisMapping(d) -- extra informations servicegroups = getServiceGroups(d.host_id, d.service_id), notes_url = getNotesURL(d.host_id, d.service_id), - action_url = getActionURL(d.host_id, d.service_id) + action_url = getActionURL(d.host_id, d.service_id), + hostgroups = getHostGroups(d.host_id) } debug("Streaming SERVICE STATUS for service_id ".. d.service_id) -- ACK @@ -390,7 +391,9 @@ function stateChanged(d) if d.state_type == 1 and -- if the event is in hard state d.last_hard_state_change ~= nil then -- if the event has been in a hard state - if d.last_check == d.last_hard_state_change then -- if the state has changed + -- if the state has changed + -- (like noted in the omi connector, it could have a slight delta between last_check and last_hard_state_change) + if math.abs(d.last_check - d.last_hard_state_change) < 10 then if d.service_id then debug("HARD state change detected for service_id [" .. d.service_id .. "]") From 9dd540ba10116fe5edf40f54d4667ad47097183c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duret?= Date: Thu, 26 Nov 2020 18:01:09 +0100 Subject: [PATCH 049/219] fix(doc) - add prerequisites for OMI --- stream-connectors/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index c17c209a71d..be223830be3 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -347,6 +347,14 @@ By default logs are in /var/log/centreon-broker/ndo-output.log ## stream connector for HP OMI : *omi/omi_connector.lua* +### Prerequisites + +* lua version >= 5.1.4 +* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) + * from sources, you have to install also gcc + lua-devel packages + +### Configuration + Create a broker output for HP OMI Connector Parameters to specify in the broker output web ui are: From aa3f88dfacb4b79c0c23b1393d3f1e112fbf0076 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duret?= Date: Tue, 26 Jan 2021 15:35:49 +0100 Subject: [PATCH 050/219] avoid "attempt to compare number with nil" --- stream-connectors/pagerduty/pagerduty.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/pagerduty/pagerduty.lua index 4d25734e1a9..20bbf6bac5a 100644 --- a/stream-connectors/pagerduty/pagerduty.lua +++ b/stream-connectors/pagerduty/pagerduty.lua @@ -411,7 +411,7 @@ function write(e) end -- Ignore states different from previous hard state only - if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + if e.last_hard_state_change and e.last_check and e.last_hard_state_change < e.last_check then broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") return true end From 740c3fa52be3618568289ec4b3f192042f83f376 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 11 Feb 2021 09:52:33 +0100 Subject: [PATCH 051/219] add prometheus gateway stream connector (#41) * create prom gateway stream connector * fix hostgroup filter --- .../prometheus-gateway/prometheus-gateway.lua | 948 ++++++++++++++++++ 1 file changed, 948 insertions(+) create mode 100644 stream-connectors/prometheus-gateway/prometheus-gateway.lua diff --git a/stream-connectors/prometheus-gateway/prometheus-gateway.lua b/stream-connectors/prometheus-gateway/prometheus-gateway.lua new file mode 100644 index 00000000000..7784635d3e4 --- /dev/null +++ b/stream-connectors/prometheus-gateway/prometheus-gateway.lua @@ -0,0 +1,948 @@ +#!/usr/bin/lua + +-- libraries +local curl = require "cURL" + +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty (var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + + +-------------------------------------------------------------------------------- +-- ifnumber_not_nan: check if a number is a number (and not a NaN) +-- @param {number} number, the number to check +-- @return {boolean} +-------------------------------------------------------------------------------- +local function ifnumber_not_nan (number) + if (number ~= number) then + return false + elseif (type(number) ~= 'number') then + return false + else + return true + end +end + +-------------------------------------------------------------------------------- +-- convert_to_openmetric: replace unwanted characters in order to comply with the open metrics format +-- @param {string} string, the string to convert +-- @return {string} string, a string that matches [a-zA-Z0-9_\.]+ +-------------------------------------------------------------------------------- +local function convert_to_openmetric (string) + if string == nil or string == '' or type(string) ~= 'string' then + return false + end + + return string.gsub(string, '[^a-zA-Z0-9_:]', '_') +end + +-------------------------------------------------------------------------------- +-- unit_mapping: convert perfdata units to openmetrics standard +-- @param {string} unit, the unit value +-- @return {string} unit, the openmetrics unit name +-- @reuturn {boolean}, true if the unit is found in the mapping or empty +-------------------------------------------------------------------------------- +local function unit_mapping (unit) + local unitMapping = { + s = 'seconds', + m = 'meters', + B = 'bytes', + g = 'grams', + V = 'volts', + A = 'amperes', + K = 'kelvins', + ratio = 'ratios', + degres = 'celsius' + } + + local unhandledUnit = nil + + if unit == nil or unit == '' or type(unit) ~= 'string' then + unit = '' + end + + if unit == '%' then + unit = unitMapping['ratio'] + elseif unit == '°' then + unit = unitMapping['degres'] + else + if (unitMapping[unit] ~= nil) then + unit = unitMapping[unit] + end + end + + return unit, true +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + if not hostgroups then + broker_log:warning(1, "get_hostgroups: no hostgroup for host id " .. host_id .. " found.") + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + -- https://stackoverflow.com/questions/1426954/split-string-in-lua + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_neb_event_status: check the status of a neb event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_neb_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown + hard_only = 0, + acknowledged = 1, + element_type = "service_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "storage", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 1, + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1, + skip_nil_id = 1, + enable_threshold_metrics = 0, + enable_status_metrics = 0, + disable_bam_host = 1, + add_hostgroups = 1, + enable_extended_metric_name = 0, + prometheus_gateway_address = 'http://localhost', + prometheus_gateway_port = '9091', + prometheus_gateway_job = 'monitoring', + prometheus_gateway_instance = 'centreon', + http_timeout = 60, + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + current_event = nil, + element_mapping = {}, + category_mapping = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + retval.enable_threshold_metrics = check_boolean_number_option_syntax(retval.enable_threshold_metrics, 1) + retval.enable_status_metrics = check_boolean_number_option_syntax(retval.enable_status_metrics, 1) + retval.disable_bam_host = check_boolean_number_option_syntax(retval.disable_bam_host, 1) + retval.enable_extended_metric_name = check_boolean_number_option_syntax(retval.enable_extended_metric_name, 0) + retval.add_hostgroups = check_boolean_number_option_syntax(retval.add_hostgroups, 1) + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end + + +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @param {table} event, the event data +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.current_event.element == 14 or self.current_event.element == 24 then + self.current_event.hostname = get_hostname(self.current_event.host_id) + + -- can't find hostname in cache + if self.current_event.hostname == self.current_event.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.current_event.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + -- host is a BA + if (string.find(tostring(self.current_event.hostname), '_Module_BAM_') and self.disable_bam_host == 1) then + return false + end + + self.current_event.hostname = tostring(self.current_event.hostname) + + -- output isn't required, we only need perfdatas + -- self.current_event.output = ifnil_or_empty(string.match(self.current_event.output, "^(.*)\n"), 'no output') + end + + if self.current_event.element == 14 then + if not check_neb_event_status(self.current_event.state, self.host_status) then + return false + end + elseif self.current_event.element == 24 then + self.current_event.service_description = get_service_description(self.current_event.host_id, self.current_event.service_id) + + -- can't find service description in cache + if self.current_event.service_description == self.current_event.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_neb_event_status(self.current_event.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.current_event.service_description == 0 and self.skip_nil_id == 1 then + return false + end + end + + -- check hard state + if not compare_numbers(self.current_event.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.current_event.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.current_event.scheduled_downtime_depth, '>=') then + return false + end + + self.current_event.service_description = tostring(self.current_event.service_description) + + if not self:is_valid_hostgroup() then + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @param {table} event, the event data +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @param {table} event, the event data +-- @return {table} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @param {table} event, the event data +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + + if self.current_event.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.current_event.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.current_event.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + + -------------------------------------------------------------------------------- + -- is_valid_hostgroup: check if the event is associated to an accepted hostgroup + -- @return {boolean} + -------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.current_event.hostgroups = get_hostgroups(self.current_event.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then + return true + end + + -- drop event if we can't find any hostgroup on the host + if not self.current_event.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.current_event.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.current_event.hostgroups) + + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.current_event.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.current_event.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval +end + + +-------------------------------------------------------------------------------- +-- display_hostgroups: create the hostgroup label for the metric +-- @return {string} hostgroupLabel: the full label for the metric +-------------------------------------------------------------------------------- +function EventQueue:display_hostgroups () + if not self.current_event.hostgroups then + return false + end + + local hostgroupLabel = 'hostgroup="' + local counter = 0 + + for i, v in pairs(self.current_event.hostgroups) do + if counter == 0 then + hostgroupLabel = hostgroupLabel .. v.group_name + counter = 1 + else + hostgroupLabel = hostgroupLabel .. ',' .. v.group_name + end + end + + hostgroupLabel = hostgroupLabel .. '"' + + return hostgroupLabel +end + + +-------------------------------------------------------------------------------- +-- format_data: prepare the event data so it can be sent +-- @return {table|string|number} data, the formated data +-------------------------------------------------------------------------------- +function EventQueue:format_data () + local perf, error = broker.parse_perfdata(self.current_event.perfdata, true) + local type = nil + local data = '' + local name = nil + local unit = nil + + -- handle hostgroups + if self.add_hostgroups == 1 then + self.current_event.hostgroupsLabel = self:display_hostgroups() + else + self.current_event.hostgroupsLabel = false + end + + for label, metric in pairs(perf) do + type = self:get_metric_type(metric) + unit= unit_mapping(metric.uom) + name = self:create_metric_name(label, unit) + + + data = data .. '# TYPE ' .. name .. ' ' .. type .. '\n' + data = data .. self:add_unit_info(label, unit, name) + + if not self.current_event.hostgroupsLabel then + data = data .. name .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. metric.value .. '\n' + else + data = data .. name .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '", ' .. self.current_event.hostgroupsLabel .. '} ' .. metric.value .. '\n' + end + + if (self.enable_threshold_metrics == 1) then + data = data .. self:threshold_metrics(metric, label, unit, type) + end + end + + if (self.enable_status_metrics == 1) then + name = convert_to_openmetric(self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label .. ':monitoring_status') + data = data .. '# TYPE ' .. name .. ' counter\n' + data = data .. '# HELP ' .. name .. ' 0 is OK, 1 is WARNING, 2 is CRITICAL, 3 is UNKNOWN\n' + if not self.current_event.hostgroupsLabel then + data = data .. name .. '{label="monitoring_status", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. self.current_event.state .. '\n' + else + data = data .. name .. '{label="monitoring_status", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '", ' .. self.current_event.hostgroupsLabel .. '} ' .. self.current_event.state .. '\n' + end + end + + return data +end + +-------------------------------------------------------------------------------- +-- create_metric_name: concatenates data to create the metric name +-- @param {string} label, the name of the perfdata +-- @param {string} unit, the unit name +-- @return {string} name, the prometheus metric name (open metric format) +-------------------------------------------------------------------------------- +function EventQueue:create_metric_name (label, unit) + local name = '' + + if (unit ~= '') then + if (self.enable_extended_metric_name == 0) then + name = label .. '_' .. unit + else + name = self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label .. '_' .. unit + end + else + if (self.enable_extended_metric_name == 0) then + name = label + else + name = self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label + end + end + + return convert_to_openmetric(name) +end +-------------------------------------------------------------------------------- +-- get_metric_type: find out the metric type to match openmetrics standard +-- @param {table} perfdata, the perfdata informations +-- @return {string} metricType, the type of the metric +-------------------------------------------------------------------------------- +function EventQueue:get_metric_type (perfdata) + local metricType = nil; + if (ifnumber_not_nan(perfdata.max)) then + metricType = 'gauge' + else + metricType = 'counter' + end + + return metricType +end + +-------------------------------------------------------------------------------- +-- add_unit_info: add unit metadata to match openmetrics standard +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} name, the name of the metric +-- @return {string} data, the unit metadata information +-------------------------------------------------------------------------------- +function EventQueue:add_unit_info (label, unit, name) + local data = '' + + if (unit ~= '' and unit ~= nil) then + data = '# UNIT ' .. name .. '\n' + end + + return data +end + +-------------------------------------------------------------------------------- +-- add_type_info: add unit metadata to match openmetrics standard +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} suffix, a suffix that is part of the metric name +-- @return {string} name, the full metric name (open metric format) +-------------------------------------------------------------------------------- +function EventQueue:add_type_info (label, unit, suffix) + return self:create_metric_name(label, unit) .. '_' .. suffix +end + +-------------------------------------------------------------------------------- +-- threshold_metrics: create openmetrics metrics based on alert thresholds from centreon +-- @param {table} perfdata, perfdata informations +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} type, the type of unit (counter, gauge...) +-- @return {string} data, metrics based on alert thresholds +-------------------------------------------------------------------------------- +function EventQueue:threshold_metrics (perfdata, label, unit, type) + local data = '' + local metricName = nil + + if (ifnumber_not_nan(perfdata.warning_low)) then + metricName = self:add_type_info(label, unit, 'warning_low') + message = 'values below this will trigger a warning alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.warning_low) + end + + if (ifnumber_not_nan(perfdata.warning_high)) then + metricName = self:add_type_info(label, unit, 'warning_high') + message = 'alues above this will trigger a warning alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.warning_high) + end + + if (ifnumber_not_nan(perfdata.critical_low)) then + metricName = self:add_type_info(label, unit, 'critical_low') + message = 'values below this will trigger a critical alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.critical_low) + end + + if (ifnumber_not_nan(perfdata.critical_high)) then + metricName = self:add_type_info(label, unit, 'critical_high') + message = 'values above this will trigger a critical alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.critical_high) + end + + return data +end + +-------------------------------------------------------------------------------- +-- threshold_metrics_format: create data format for threshold metrics +-- @param {string} metricName, the formated metric name +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} type, the type of unit (counter, gauge...) +-- @return {string} data, metrics based on alert thresholds +-------------------------------------------------------------------------------- +function EventQueue:threshold_metrics_format (metricName, label, unit, type, message, perfdata) + local data = '' + + data = data .. '# TYPE ' .. metricName .. ' ' .. type .. '\n' + data = data .. '# UNIT ' .. metricName .. ' ' .. unit .. '\n' + data = data .. '# HELP ' .. metricName .. ' ' .. message + + if not self.current_event.hostgroupsLabel then + data = data .. metricName .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. perfdata .. '\n' + else + data = data .. metricName .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '",' .. self.current_event.hostgroupsLabel .. '"} ' .. perfdata .. '\n' + end + + return data +end + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-prometheus-gateway.log" + broker_log:set_parameters(1, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + broker_log:info(1, "Init " .. i .. " : " .. v) + end + + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} event, the event that will be added to the queue +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add (data) + self.events[#self.events + 1] = data + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + if not self:send_data() then + broker_log:error(1, "EventQueue:flush: couldn't send data, flushing data anyway") + end + + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local httpPostData = '' + + for _, raw_event in ipairs(self.events) do + httpPostData = httpPostData .. raw_event + end + + local httpResponseBody = "" + local httpRequest = curl.easy() + :setopt_url(self.prometheus_gateway_address .. ':' .. self.prometheus_gateway_port .. '/metrics/job/' .. self.prometheus_gateway_job .. '/instance/' .. self.prometheus_gateway_instance) + :setopt_writefunction( + function (response) + httpResponseBody = httpResponseBody .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/openmetrics-text" + } + ) + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + httpRequest:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:send_data: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + httpRequest:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:send_data: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + broker_log:info(3, "EventQueue:send_data: POST data: '" .. httpPostData .. "'") + httpRequest:setopt_postfields(httpPostData) + + -- performing the HTTP request + httpRequest:perform() + + -- collecting results + httpResponseCode = httpRequest:getinfo(curl.INFO_RESPONSE_CODE) + + -- Handling the return code + local retval = false + if httpResponseCode == 200 then + broker_log:info(2, "EventQueue:send_data: HTTP POST request successful: return code is " .. httpResponseCode) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(1, "the body request " .. httpPostData) + broker_log:error(1, "EventQueue:send_data: HTTP POST request FAILED, return code is " .. httpResponseCode .. " message is:\n\"" .. httpResponseBody .. "\n\"\n") + end + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + queue.current_event = event + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + if not queue:is_valid_category(event.category) then + broker_log:info(3, 'write: event category is ' .. event.category .. ' and is not valid') + return true + end + + if not queue:is_valid_element(event.category, event.element) then + broker_log:info(3, 'write: event element is ' .. event.element .. ' and is not valid') + return true + end + + -- adding event to the queue + if queue:is_valid_event() then + queue:add(queue:format_data()) + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end \ No newline at end of file From 377dee38fefadbde112567aded1380a0c8569330 Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Thu, 25 Feb 2021 15:48:42 +0100 Subject: [PATCH 052/219] enh(influxdb) Add an option to store states (#43) --- stream-connectors/influxdb/influxdb-neb.lua | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/influxdb/influxdb-neb.lua index 274528ab938..78a68dea66a 100644 --- a/stream-connectors/influxdb/influxdb-neb.lua +++ b/stream-connectors/influxdb/influxdb-neb.lua @@ -116,6 +116,13 @@ function EventQueue:add(e) local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata) if perfdata_err then broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) + perfdata = {} + end + -- retrieve and store state for further processing + if self.skip_events_state == 0 then + perfdata["centreon.state"] = e.state + perfdata["centreon.state_type"] = e.state_type + elseif perfdata_err then return false end -- retrieve objects names instead of IDs @@ -215,6 +222,7 @@ function EventQueue.new(conf) max_buffer_size = 5000, max_buffer_age = 30, skip_anon_events = 1, + skip_events_state = 0, replacement_character = "_", log_level = 0, -- already proceeded in init function log_path = "" -- already proceeded in init function From 91615303bcc5e9d887564e8faaea2e38e0dfc54d Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 25 Feb 2021 15:49:47 +0100 Subject: [PATCH 053/219] Snow rework (#39) * start snow connector v2 * add useful functions * starting refacto * refacto mapping * refacto event functions * refacto methods comment * rework snow provider * bring back old expiration time * better default parameters * ev manage json v2 compatibility + bulk insert * hide passwords in log file * add missing token method * fix variable name * remove old file * fix typo * fix global var * fix oauth * add hostgroup filter * improve log readability * add missing parameter * fix hostgroups and wrong var name * remove debug comment * simplify introduction --- .../servicenow/connector-servicenow.lua | 892 +++++++++++++++--- 1 file changed, 758 insertions(+), 134 deletions(-) diff --git a/stream-connectors/servicenow/connector-servicenow.lua b/stream-connectors/servicenow/connector-servicenow.lua index 07f31943440..b0b06fa3082 100644 --- a/stream-connectors/servicenow/connector-servicenow.lua +++ b/stream-connectors/servicenow/connector-servicenow.lua @@ -1,124 +1,500 @@ #!/usr/bin/lua + -------------------------------------------------------------------------------- --- Centreon Broker Servicenow Connector --- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html -------------------------------------------------------------------------------- + +-- libraries local curl = require "cURL" -local serviceNow - --- Class for Service now connection -local ServiceNow = {} -ServiceNow.__index = ServiceNow - -function ServiceNow:new(instance, username, password, clientId, clientPassword) - local serviceNow = {} - setmetatable(serviceNow, ServiceNow) - serviceNow.instance = instance - serviceNow.username = username - serviceNow.password = password - serviceNow.clientId = clientId - serviceNow.clientPassword = clientPassword - serviceNow.tokens = {} - serviceNow.tokens.authToken = nil - serviceNow.tokens.refreshToken = nil - return serviceNow +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + + if not hostgroups then + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + -- https://stackoverflow.com/questions/1426954/split-string-in-lua + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_neb_event_status: check the status of a neb event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_neb_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false end -function ServiceNow:getAuthToken () +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown + hard_only = 1, + acknowledged = 0, + element_type = "host_status,service_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "neb", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 0, + max_buffer_size = 10, + max_buffer_age = 5, + max_stored_events = 10, -- do not use values above 100 + skip_anon_events = 1, + skip_nil_id = 1, + element_mapping = {}, + category_mapping = {}, + instance = '', + username = '', + password = '', + client_id = '', + client_secret = '', + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + validatedEvents = {}, + tokens = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + retval.tokens.authToken = nil + retval.tokens.refreshToken = nil + + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == 'client_secret' or i == 'password' then + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => *********") + else + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + end + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () if not self:refreshTokenIsValid() then self:authToken() end + if not self:accessTokenIsValid() then self:refreshToken(self.tokens.refreshToken.token) end + return self.tokens.authToken.token end -function ServiceNow:authToken () - local data = "grant_type=password&client_id=" .. self.clientId .. "&client_secret=" .. self.clientPassword .. "&username=" .. self.username .. "&password=" .. self.password +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.client_id .. "&client_secret=" .. self.client_secret .. "&username=" .. self.username .. "&password=" .. self.password local res = self:call( "oauth_token.do", "POST", data ) + if not res.access_token then - error("Authentication failed") + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false end + self.tokens.authToken = { token = res.access_token, expTime = os.time(os.date("!*t")) + 1700 } + self.tokens.refreshToken = { - token = res.resfresh_token, + token = res.refresh_token, expTime = os.time(os.date("!*t")) + 360000 } end -function ServiceNow:refreshToken (token) - local data = "grant_type=refresh_token&client_id=" .. self.clientId .. "&client_secret=" .. self.clientPassword .. "&username=" .. self.username .. "&password=" .. self.password - res = self.call( +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.client_id .. "&client_secret=" .. self.client_secret .. "&username=" .. self.username .. "&password=" .. self.password .. "&refresh_token=" .. token + + local res = self:call( "oauth_token.do", "POST", data ) + if not res.access_token then - error("Bad access token") + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false end + self.tokens.authToken = { token = res.access_token, expTime = os.time(os.date("!*t")) + 1700 } end -function ServiceNow:refreshTokenIsValid () +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () if not self.tokens.refreshToken then return false end + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then - self.refreshToken = nil + self.tokens.refreshToken = nil return false end + return true end -function ServiceNow:accessTokenIsValid () +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () if not self.tokens.authToken then return false end + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then - self.authToken = nil + self.tokens.authToken = nil return false end + return true end -function ServiceNow:call (url, method, data, authToken) +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (url, method, data, authToken) method = method or "GET" data = data or nil authToken = authToken or nil local endpoint = "https://" .. tostring(self.instance) .. ".service-now.com/" .. tostring(url) - broker_log:info(1, "Prepare url " .. endpoint) + broker_log:info(3, "EventQueue:call: Prepare url " .. endpoint) local res = "" local request = curl.easy() :setopt_url(endpoint) - :setopt_writefunction(function (responce) - res = res .. tostring(responce) + :setopt_writefunction(function (response) + res = res .. tostring(response) end) - broker_log:info(1, "Request initialize") + + broker_log:info(3, "EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end if not authToken then if method ~= "GET" then - broker_log:info(1, "Add form header") + broker_log:info(3, "EventQueue:call: Add form header") request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) - broker_log:info(1, "After add form header") end else - broker_log:info(1, "Add JSON header") + broker_log:info(3, "Add JSON header") request:setopt( curl.OPT_HTTPHEADER, { @@ -130,146 +506,394 @@ function ServiceNow:call (url, method, data, authToken) end if method ~= "GET" then - broker_log:info(1, "Add post data") + broker_log:info(3, "EventQueue:call: Add post data") request:setopt_postfields(data) end - broker_log:info(1, "Call url " .. endpoint) + broker_log:info(3, "EventQueue:call: request body " .. tostring(data)) + broker_log:info(3, "EventQueue:call: request header " .. tostring(authToken)) + broker_log:info(3, "EventQueue:call: Call url " .. endpoint) request:perform() respCode = request:getinfo(curl.INFO_RESPONSE_CODE) - broker_log:info(1, "HTTP Code : " .. respCode) - broker_log:info(1, "Response body : " .. tostring(res)) + broker_log:info(3, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(3, "EventQueue:call: Response body : " .. tostring(res)) request:close() if respCode >= 300 then - broker_log:info(1, "HTTP Code : " .. respCode) - broker_log:info(1, "HTTP Error : " .. res) - error("Bad request code") + broker_log:info(1, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false end if res == "" then - broker_log:info(1, "HTTP Error : " .. res) - error("Bad content") + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false end - broker_log:info(1, "Parsing JSON") return broker.json_decode(res) end -function ServiceNow:sendEvent (event) - local authToken = self:getAuthToken() +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end - broker_log:info(1, "Event information :") - for k, v in pairs(event) do - broker_log:info(1, tostring(k) .. " : " .. tostring(v)) +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.currentEvent.element == 14 or self.currentEvent.element == 24 then + self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) + + -- can't find hostname in cache + if self.currentEvent.hostname == self.currentEvent.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.currentEvent.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + if (string.find(self.currentEvent.hostname, '^_Module_BAM_*')) then + return false + end + + self.currentEvent.output = ifnil_or_empty(string.match(self.currentEvent.output, "^(.*)\n"), 'no output') + self.sendData.source = 'centreon' + self.sendData.event_class = 'centreon' + self.sendData.severity = 5 + self.sendData.node = self.currentEvent.hostname + self.sendData.time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.currentEvent.last_check) + self.sendData.description = self.currentEvent.output end - broker_log:info(1, "------") - broker_log:info(1, "Auth token " .. authToken) - if pcall(self:call( - "api/now/table/em_event", - "POST", - broker.json_encode(event), - authToken - )) then + if self.currentEvent.element == 14 then + if not check_neb_event_status(self.currentEvent.state, self.host_status) then + return false + end + + self.sendData.resource = self.currentEvent.hostname + if self.currentEvent.state == 0 then + self.sendData.severity = 0 + elseif self.currentEvent.state == 1 then + self.sendData.severity = 1 + end + + elseif self.currentEvent.element == 24 then + self.currentEvent.serviceDescription = get_service_description(self.currentEvent.host_id, self.currentEvent.service_id) + + -- can't find service description in cache + if self.currentEvent.serviceDescription == self.currentEvent.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_neb_event_status(self.currentEvent.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.currentEvent.serviceDescription == 0 and self.skip_nil_id == 1 then + return false + end + + self.currentEvent.svc_severity = broker_cache:get_severity(self.currentEvent.host_id,self.currentEvent.service_id) + + end + + -- check hard state + if not compare_numbers(self.currentEvent.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.currentEvent.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.currentEvent.scheduled_downtime_depth, '>=') then + return false + end + + local my_retval = self:is_valid_hostgroup() + + if not self:is_valid_hostgroup() then + return false + end + + self.sendData.resource = self.currentEvent.serviceDescription + if self.currentEvent.state == 0 then + self.sendData.severity = 0 + elseif self.currentEvent.state == 1 then + self.sendData.severity = 3 + elseif self.currentEvent.state == 2 then + self.sendData.severity = 1 + elseif self.currentEvent.state == 3 then + self.sendData.severity = 4 + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @return {table} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + self.sendData = {} + if self.currentEvent.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.currentEvent.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.currentEvent.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + +-------------------------------------------------------------------------------- +-- : check if the event is associated to an accepted hostgroup +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.currentEvent.hostgroups = get_hostgroups(self.currentEvent.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then return true end - return false + + -- drop event if we can't find any hostgroup on the host + if not self.currentEvent.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.currentEvent.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.currentEvent.hostgroups) + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.currentEvent.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.currentEvent.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval end -function init(parameters) + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) logfile = parameters.logfile or "/var/log/centreon-broker/connector-servicenow.log" + if not parameters.instance or not parameters.username or not parameters.password - or not parameters.client_id or not parameters.client_secret then - error("The needed parameters are 'instance', 'username', 'password', 'client_id' and 'client_secret'") + or not parameters.client_id or not parameters.client_secret then + broker_log:error(1,'Required parameters are: instance, username, password, client_id and client_secret. There type must be string') end + broker_log:set_parameters(1, logfile) broker_log:info(1, "Parameters") for i,v in pairs(parameters) do - broker_log:info(1, "Init " .. i .. " : " .. v) - end - serviceNow = ServiceNow:new( - parameters.instance, - parameters.username, - parameters.password, - parameters.client_id, - parameters.client_secret - ) + if i == 'client_secret' or i == 'password' then + broker_log:info(1, "Init " .. i .. " : *********") + else + broker_log:info(1, "Init " .. i .. " : " .. v) + end + end + + queue = EventQueue:new(parameters) end -function write(data) - local sendData = { - source = "centreon", - event_class = "centreon", - severity = 5 - } +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} eventData, the data related to the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add () + self.events[#self.events + 1] = self.sendData + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") - broker_log:info(1, "Prepare Go category " .. tostring(data.category) .. " element " .. tostring(data.element)) + retval = self:send_data() - if data.category == 1 then - broker_log:info(1, "Broker event data") - for k, v in pairs(data) do - broker_log:info(1, tostring(k) .. " : " .. tostring(v)) - end - broker_log:info(1, "------") + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end - -- Doesn't process if the host is acknowledged or disabled - if data.acknowledged or not data.enabled then - broker_log:info(1, "Dropped because acknowledged or not enabled") - return true - end - -- Doesn't process if the host state is not hard - if data.state_type ~= 1 then - broker_log:info(1, "Dropped because state is not hard") - return true +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = '' + local authToken = self:getAuthToken() + local counter = 0 + + for _, raw_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(raw_event) + counter = counter + 1 + else + data = data .. ',' .. broker.json_encode(raw_event) end - hostname = broker_cache:get_hostname(data.host_id) - if not hostname then - broker_log:info(1, "Dropped missing hostname") - return true + end + + data = '{"records":[' .. data .. ']}' + broker_log:info(2, 'EventQueue:send_data: creating json: ' .. data) + + if self:call( + "api/global/em/jsonv2", + "POST", + data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + + -- drop event if wrong category + if not queue:is_valid_category(event.category) then + return true + end + + -- drop event if wrong element + if not queue:is_valid_element(event.category, event.element) then + return false + end + + queue.currentEvent = event + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- do not compute event if it is duplicated + if queue:is_event_duplicated() then + return true + end + -- END OF FIX + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- adding event to the queue + if queue:is_valid_event() then + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- create id from event data + if queue.currentEvent.element == 14 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.last_check) + else + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.service_id) .. '_' .. tostring(queue.currentEvent.last_check) end - sendData.node = hostname - sendData.description = data.output - sendData.time_of_event = os.date("%Y-%m-%d %H:%M:%S", data.last_check) - if data.element == 14 then - sendData.resource = hostname - if data.current_state == 0 then - sendData.severity = 0 - elseif data.current_state then - sendData.severity = 1 - end - else - service_description = broker_cache:get_service_description(data.host_id, data.service_id) - if not service_description then - broker_log:info(1, "Droped missing service description") - return true - end - if data.current_state == 0 then - sendData.severity = 0 - elseif data.current_state == 1 then - sendData.severity = 3 - elseif data.current_state == 2 then - sendData.severity = 1 - elseif data.current_state == 3 then - sendData.severity = 4 - end - sendData.resource = service_description + + -- remove oldest event from sent events list + if #queue.validatedEvents >= queue.max_stored_events then + table.remove(queue.validatedEvents, 1) end + + -- add event in the sent events list and add list to queue + table.insert(queue.validatedEvents, eventId) + -- END OF FIX + + queue:add() else return true end - return serviceNow:sendEvent(sendData) + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true end -function filter(category, element) - if category == 1 then - if element == 14 or element == 24 then - broker_log:info(1, "Go category " .. tostring(category) .. " element " .. tostring(element)) +-------------------------------------------------------------------------------- +-- EventQueue:is_event_duplicated, create an id from the neb event and check if id is in an already sent events list +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_event_duplicated() + local eventId = '' + if self.currentEvent.element == 14 then + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.last_check) + else + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.service_id) .. '_' .. tostring(self.currentEvent.last_check) + end + + for i, v in ipairs(self.validatedEvents) do + if eventId == v then return true end end + return false end From ca951308a112fbef33e24e3e5410137d9c0b5560 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 25 Feb 2021 16:16:33 +0100 Subject: [PATCH 054/219] add opsgenie stream connector (#42) * create opsgenie stream connector * add severity to priority conversion * fix priority + add alerts dedup + fix endpoint * add bam incidents * nil managment * reset log level * simplify documentation --- stream-connectors/opsgenie/opsgenie.lua | 1038 +++++++++++++++++++++++ 1 file changed, 1038 insertions(+) create mode 100644 stream-connectors/opsgenie/opsgenie.lua diff --git a/stream-connectors/opsgenie/opsgenie.lua b/stream-connectors/opsgenie/opsgenie.lua new file mode 100644 index 00000000000..cb6655b58ad --- /dev/null +++ b/stream-connectors/opsgenie/opsgenie.lua @@ -0,0 +1,1038 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Opsgenie connector +-- documentation available at https://docs.centreon.com/current/en/integrations/stream-connectors/opsgenie.html +-------------------------------------------------------------------------------- + +-- libraries +local curl = require "cURL" + +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + + if not hostgroups then + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- get_severity: retrieve severity from host or service +-- @param {number} host_id, +-- @param {number} [optional] service_id +-- @return {array} severity, +-------------------------------------------------------------------------------- +local function get_severity (host_id, service_id) + local service_id = service_id or nil + local severity = nil + + if host_id == nil then + broker_log:warning(1, "get_severity: host id is nil") + return false + end + + if service_id == nil then + severity = broker_cache:get_severity(host_id) + else + severity = broker_cache:get_severity(host_id, service_id) + end + + return severity +end + +-------------------------------------------------------------------------------- +-- get_ba_name: retrieve ba name from ba id +-- @param {number} ba_id, +-- @return {string} ba_name, the name of the ba +-- @return {string} ba_description, the description of the ba +-------------------------------------------------------------------------------- +local function get_ba_name (ba_id) + if ba_id == nil then + broker_log:warning(1, "get_ba_name: ba id is nil") + return false + end + + local ba_info = broker_cache:get_ba(ba_id) + if ba_info == nil then + broker_log:warning(1, "get_ba_name: couldn't get ba informations in cache") + return false + end + + return ba_info.ba_name, ba_info.ba_description +end + +-------------------------------------------------------------------------------- +-- get_bvs: retrieve bv name from ba id +-- @param {number} ba_id, +-- @return {array} bv_names, the bvs' name +-- @return {array} bv_names, the bvs' description +-------------------------------------------------------------------------------- +local function get_bvs (ba_id) + if ba_id == nil then + broker_log:warning(1, "get_bvs: ba id is nil") + return false + end + + local bv_id = broker_cache:get_bvs(ba_id) + + if bv_id == nil then + broker_log:warning(1, "get_bvs: couldn't get bvs for ba id: " .. tostring(ba_id)) + return false + end + + local bv_names = {} + local bv_descriptions = {} + local bv_infos = {} + + for i, v in ipairs(bv_id) do + bv_infos = broker_cache:get_bv(v) + if (bv_infos.bv_name ~= nil and bv_infos.bv_name ~= '') then + table.insert(bv_names,bv_infos.bv_name) + -- handle nil descriptions on BV + if bv_infos.bv_description ~= nil then + table.insert(bv_descriptions,bv_infos.bv_description) + else + broker_log:info(3, 'get_bvs: BV: ' .. bv_infos.bv_name .. ' has no description') + end + end + end + + return bv_names, bv_descriptions +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + + -- return empty string if text is nil + if text == nil then + broker_log:error(1, 'split: could not split text because it is nil') + return '' + end + + -- set default separator + seperator = ifnil_or_empty(separator, ',') + + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_event_status: check the status of an event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown, + ba_status = "0,1,2", -- = ok, warning, critical + hard_only = 1, + acknowledged = 0, + element_type = "host_status,service_status,ba_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "neb,bam", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 0, + max_buffer_size = 1, + max_buffer_age = 5, + max_stored_events = 10, -- do not use values above 100 + skip_anon_events = 1, + skip_nil_id = 1, + element_mapping = {}, + category_mapping = {}, + status_mapping = {}, + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + validatedEvents = {}, + app_api_token = '', + integration_api_token = '', + api_url = 'https://api.opsgenie.com', + date_format = '%Y-%m-%d %H:%M:%S', + host_alert_message = '{last_update_date} {hostname} is {state}', + host_alert_description = '', + host_alert_alias = '{hostname}_{state}', + service_alert_message = '{last_update_date} {hostname} // {serviceDescription} is {state}', + service_alert_description = '', + service_alert_alias = '{hostname}_{serviceDescription}_{state}', + ba_incident_message = '{baName} is {state}, health level reached {level_nominal}', + ba_incident_description = '', + ba_incident_tags = 'centreon,applications', + enable_incident_tags = 1, + get_bv = 1, + enable_severity = 0, + priority_must_be_set = 0, + priority_matching = 'P1=1,P2=2,P3=3,P4=4,P5=5', + opsgenie_priorities = 'P1,P2,P3,P4,P5', + priority_mapping = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + retval.status_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.status_mapping[1][14] = { + [0] = 'UP', + [1] = 'DOWN', + [2] = 'UNREACHABLE' + } + + retval.status_mapping[1][24] = { + [0] = 'OK', + [1] = 'WARNING', + [2] = 'CRITICAL', + [3] = 'UNKNOWN' + } + + retval.status_mapping[6][1] = { + [0] = 'OK', + [1] = 'WARNING', + [2] = 'CRITICAL' + } + + -- retval.status_mapping[14] = { + -- [0] = 'UP', + -- [1] = 'DOWN', + -- [2] = 'UNREACHABLE' + -- } + + -- retval.status_mapping[24] = { + -- [0] = 'OK', + -- [1] = 'WARNING', + -- [2] = 'CRITICAL', + -- [3] = 'UNKNOWN' + -- } + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == 'app_api_token' or i == 'integration_api_token' then + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => *********") + else + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + end + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + retval.host_alert_message = ifnil_or_empty(retval.host_alert_message, '{last_update_date} {hostname} is {state}') + retval.service_alert_message = ifnil_or_empty(retval.service_alert_message, '{last_update_date} {hostname} // {serviceDescription} is {state}') + retval.enable_severity = check_boolean_number_option_syntax(retval.enable_severity, 1) + retval.priority_must_be_set = check_boolean_number_option_syntax(retval.priority_must_be_set, 0) + retval.priority_matching = ifnil_or_empty(retval.priority_matching, 'P1=1,P2=2,P3=3,P4=4,P5=5') + retval.opsgenie_priorities = ifnil_or_empty(retval.opsgenie_priorities, 'P1,P2,P3,P4,P5') + retval.host_alert_alias = ifnil_or_empty(retval.host_alert_alias, '{hostname}_{state}') + retval.service_alert_alias = ifnil_or_empty(retval.service_alert_alias, '{hostname}_{serviceDescription}_{state}') + retval.ba_incident_message = ifnil_or_empty(retval.ba_incident_message, '{baName} is {state}, health level reached {level_nominal}') + retval.enable_incident_tags = check_boolean_number_option_syntax(retval.enable_incident_tags, 1) + retval.get_bv = check_boolean_number_option_syntax(retval.get_bv, 1) + + local severity_to_priority = {} + + if retval.enable_severity == 1 then + retval.priority_matching_list = split(retval.priority_matching, ',') + + for i, v in ipairs(retval.priority_matching_list) do + severity_to_priority = split(v, '=') + + if string.match(retval.opsgenie_priorities, severity_to_priority[1]) == nil then + broker_log:warning(1, "EventQueue.new: severity is enabled but the priority configuration is wrong. configured matching: " .. retval.priority_matching_list .. + ", invalid parsed priority: " .. severity_to_priority[1] .. ", known Opsgenie priorities: " .. opsgenie_priorities .. + ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") + break + end + + retval.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + end + end + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} data, the data we want to send to opsgenie +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (data, url_path, token) + method = method or "GET" + data = data or nil + + local endpoint = self.api_url .. url_path + broker_log:info(3, "EventQueue:call: Prepare url " .. endpoint) + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + + broker_log:info(3, "EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + broker_log:info(3, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: GenieKey " .. token + } + ) + + broker_log:info(3, "EventQueue:call: Add post data") + request:setopt_postfields(data) + + broker_log:info(3, "EventQueue:call: request body " .. tostring(data)) + broker_log:info(3, "EventQueue:call: request header " .. tostring(token)) + broker_log:info(3, "EventQueue:call: Call url " .. tostring(endpoint)) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + broker_log:info(3, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(3, "EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + broker_log:info(1, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end + +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.currentEvent.element == 14 or self.currentEvent.element == 24 then + -- prepare api info + self.currentEvent.endpoint = '/v2/alerts' + self.currentEvent.token = self.integration_api_token + + self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) + -- can't find hostname in cache + if self.currentEvent.hostname == self.currentEvent.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.currentEvent.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + if (string.find(self.currentEvent.hostname, '^_Module_BAM_*')) then + return false + end + + -- check hard state + if not compare_numbers(self.currentEvent.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.currentEvent.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.currentEvent.scheduled_downtime_depth, '>=') then + return false + end + + if not self:is_valid_hostgroup() then + return false + end + + if self.enable_severity == 1 then + if not self:set_priority() then + return false + end + end + + self.currentEvent.output = ifnil_or_empty(string.match(self.currentEvent.output, "^(.*)\n"), 'no output') + end + + if self.currentEvent.element == 14 then + + if not check_event_status(self.currentEvent.state, self.host_status) then + return false + end + + self.sendData.message = self:buildMessage(self.host_alert_message, nil) + self.sendData.description = self:buildMessage(self.host_alert_description, self.currentEvent.output) + self.sendData.alias = self:buildMessage(self.host_alert_alias, nil) + + elseif self.currentEvent.element == 24 then + + self.currentEvent.serviceDescription = get_service_description(self.currentEvent.host_id, self.currentEvent.service_id) + + -- can't find service description in cache + if self.currentEvent.serviceDescription == self.currentEvent.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_event_status(self.currentEvent.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.currentEvent.serviceDescription == 0 and self.skip_nil_id == 1 then + return false + end + + self.sendData.message = self:buildMessage(self.service_alert_message, nil) + self.sendData.description = self:buildMessage(self.service_alert_description, self.currentEvent.output) + self.sendData.alias = self:buildMessage(self.service_alert_alias, nil) + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @return {boolean} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + if self.currentEvent.element == 1 then + broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 1') + -- prepare api info + self.currentEvent.endpoint = '/v1/incidents/create' + self.currentEvent.token = self.app_api_token + + -- check if ba event status is valid + broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 2') + if not check_event_status(self.currentEvent.state, self.ba_status) then + return false + end + + self.currentEvent.baName, self.currentEvent.baDescription = get_ba_name(self.currentEvent.ba_id) + + if self.currentEvent.baName and self.currentEvent.baName ~= nil then + if self.enable_incident_tags == 1 then + self.currentEvent.bv_names, self.currentEvent.bv_descriptions = get_bvs(self.currentEvent.ba_id) + self.sendData.tags = self.currentEvent.bv_names + + if ba_incident_tags ~= '' then + local custom_tags = split(self.ba_incident_tags, ',') + for i, v in ipairs(custom_tags) do + broker_log:info(3, 'EventQueue:is_valid_bam_event: adding ' .. tostring(v) .. ' to the list of tags') + table.insert(self.sendData.tags, v) + end + end + end + + self.sendData.message = self:buildMessage(self.ba_incident_message, nil) + return true + end + end + return false +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + self.sendData = {} + if self.currentEvent.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.currentEvent.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.currentEvent.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + +-------------------------------------------------------------------------------- +-- : check if the event is associated to an accepted hostgroup +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.currentEvent.hostgroups = get_hostgroups(self.currentEvent.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then + return true + end + + -- drop event if we can't find any hostgroup on the host + if not self.currentEvent.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.currentEvent.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.currentEvent.hostgroups) + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.currentEvent.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.currentEvent.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval +end + + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-opsgenie.log" + + if not parameters.app_api_token or not parameters.integration_api_token then + broker_log:error(1,'Required parameters are: api_token. There type must be string') + end + + broker_log:set_parameters(1, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + if i == 'app_api_token' or i == 'integration_api_token' then + broker_log:info(1, "Init " .. i .. " : *********") + else + broker_log:info(1, "Init " .. i .. " : " .. v) + end + end + + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} eventData, the data related to the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add () + self.events[#self.events + 1] = self.sendData + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + retval = self:send_data() + + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = '' + local counter = 0 + + for _, raw_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(raw_event) + counter = counter + 1 + else + data = data .. ',' .. broker.json_encode(raw_event) + end + end + + broker_log:info(2, 'EventQueue:send_data: creating json: ' .. data) + + if self:call(data, self.currentEvent.endpoint, self.currentEvent.token) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:buildMessage, creates a message from a template +-- @param {string} template, the message template that needs to be converted +-- @param {string} default_template, a default message that will be parsed if template is empty +-- @return {string} template, the template vith converted values +-------------------------------------------------------------------------------- +function EventQueue:buildMessage (template, default_template) + if template == '' then + template = default_template + end + + for variable in string.gmatch(template, "{(.-)}") do + -- converts from timestamp to human readable date + if string.match(variable, '.-_date') then + template = template:gsub("{" .. variable .. "}", os.date(self.date_format, self.currentEvent[variable:sub(1, -6)])) + -- replaces numeric state value for human readable state (warning, critical...) + elseif variable == 'state' then + template = template:gsub("{" .. variable .. "}", self.status_mapping[self.currentEvent.category][self.currentEvent.element][self.currentEvent.state]) + else + if self.currentEvent[variable] ~= nil then + template = template:gsub("{" .. variable .. "}", self.currentEvent[variable]) + else + broker_log:warning(1, "EventQueue:buildMessage: {" .. variable .. "} is not a valid template variable") + end + end + end + + return template +end + +-------------------------------------------------------------------------------- +-- EventQueue:set_priority, set opsgenie priority using centreon severity +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:set_priority () + local severity = nil + + -- get host severity + if self.currentEvent.service_id == nil then + broker_log:info(3, "EventQueue:set_priority: getting severity for host: " .. self.currentEvent.host_id) + severity = get_severity(self.currentEvent.host_id) + -- get service severity + else + broker_log:info(3, "EventQueue:set_priority: getting severity for service: " .. self.currentEvent.service_id) + severity = get_severity(self.currentEvent.host_id, self.currentEvent.service_id) + end + + -- find the opsgenie priority depending on the found severity + local matching_priority = self.priority_mapping[tostring(severity)] + + -- drop event if no severity is found and opsgenie priority must be set + if matching_priority == nil and self.priority_must_be_set == 1 then + broker_log:info(3, "EventQueue:set_priority: couldn't find a matching priority for severity: " .. tostring(severity) .. " and priority is mandatory. Dropping event") + return false + -- ignore priority if it is not found, opsgenie will affect a default one (P3) + elseif matching_priority == nil then + broker_log:info(3, 'EventQueue:set_priority: could not find matching priority for severity: ' .. tostring(severity) .. '. Skipping priority...') + return true + else + self.sendData.priority = matching_priority + end + + return true +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + + -- drop event if wrong category + if not queue:is_valid_category(event.category) then + return true + end + + -- drop event if wrong element + if not queue:is_valid_element(event.category, event.element) then + return false + end + + queue.currentEvent = event + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- do not compute event if it is duplicated + if queue:is_event_duplicated() then + return true + end + -- END OF FIX + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- adding event to the queue + if queue:is_valid_event() then + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- create id from event data + if queue.currentEvent.element == 14 and queue.currentEvent.category == 1 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.last_check) + elseif queue.currentEvent.element == 24 and queue.currentEvent.category == 1 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.service_id) .. '_' .. tostring(queue.currentEvent.last_check) + end + + -- remove oldest event from sent events list + if #queue.validatedEvents >= queue.max_stored_events then + table.remove(queue.validatedEvents, 1) + end + + -- add event in the sent events list and add list to queue + table.insert(queue.validatedEvents, eventId) + -- END OF FIX + + queue:add() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:is_event_duplicated, create an id from the neb event and check if id is in an already sent events list +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_event_duplicated() + local eventId = '' + if self.currentEvent.element == 14 then + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.last_check) + else + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.service_id) .. '_' .. tostring(self.currentEvent.last_check) + end + + for i, v in ipairs(self.validatedEvents) do + if eventId == v then + return true + end + end + + return false +end \ No newline at end of file From 75a7cabee32c1fa7f4b91bcd08ae7ace1659e2a4 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 5 Apr 2021 16:20:36 +0200 Subject: [PATCH 055/219] Stream connectors v2 (#45) --- stream-connectors/CONTRIBUTE.md | 56 ++ stream-connectors/README.md | 713 +----------------- .../bsm/bsm_connector-apiv1.lua} | 0 .../elasticsearch/elastic-metrics-apiv1.lua} | 0 .../elasticsearch/elastic-neb-apiv1.lua} | 0 .../influxdb/influxdb-metrics-apiv1.lua} | 0 .../influxdb/influxdb-neb-apiv1.lua} | 0 .../ndo/ndo-module-apiv1.lua} | 0 .../ndo/ndo-output-apiv1.lua} | 0 .../omi/omi_connector-apiv1.lua} | 0 .../opsgenie/opsgenie-apiv1.lua} | 0 .../pagerduty/pagerduty-apiv1.lua} | 0 .../prometheus}/prometheus-gateway.lua | 0 .../servicenow/connector-servicenow.lua | 0 .../splunk}/splunk-conf1.png | Bin .../splunk}/splunk-conf2.png | Bin .../splunk/splunk-events-http.lua | 0 .../splunk/splunk-events-luacurl.lua | 0 .../splunk/splunk-metrics-http.lua | 0 .../splunk/splunk-metrics-luacurl.lua | 0 .../splunk/splunk-states-http.lua | 0 .../splunk}/splunk.png | Bin .../warp10/export-warp10.lua | 0 .../community-powered/canopsis/README.md | 183 +++++ .../canopsis/bbdo2canopsis.lua | 0 .../centreon-configuration-screenshot.png | Bin .../sc_broker.lua | 355 +++++++++ .../sc_common.lua | 146 ++++ .../sc_event.lua | 528 +++++++++++++ .../sc_logger.lua | 85 +++ .../sc_params.lua | 208 +++++ .../sc_test.lua | 31 + ...treon-stream-connectors-lib-0.1.0.rockspec | 26 + .../modules/tests/bam_stream_connector.lua | 203 +++++ .../modules/tests/neb_stream_connector.lua | 213 ++++++ .../modules/tests/sc_common-test.lua | 115 +++ 36 files changed, 2187 insertions(+), 675 deletions(-) create mode 100644 stream-connectors/CONTRIBUTE.md rename stream-connectors/{bsm/bsm_connector.lua => centreon-certified/bsm/bsm_connector-apiv1.lua} (100%) rename stream-connectors/{elasticsearch/elastic-metrics.lua => centreon-certified/elasticsearch/elastic-metrics-apiv1.lua} (100%) rename stream-connectors/{elasticsearch/elastic-neb.lua => centreon-certified/elasticsearch/elastic-neb-apiv1.lua} (100%) rename stream-connectors/{influxdb/influxdb-metrics.lua => centreon-certified/influxdb/influxdb-metrics-apiv1.lua} (100%) rename stream-connectors/{influxdb/influxdb-neb.lua => centreon-certified/influxdb/influxdb-neb-apiv1.lua} (100%) rename stream-connectors/{ndo/ndo-module.lua => centreon-certified/ndo/ndo-module-apiv1.lua} (100%) rename stream-connectors/{ndo/ndo-output.lua => centreon-certified/ndo/ndo-output-apiv1.lua} (100%) rename stream-connectors/{omi/omi_connector.lua => centreon-certified/omi/omi_connector-apiv1.lua} (100%) rename stream-connectors/{opsgenie/opsgenie.lua => centreon-certified/opsgenie/opsgenie-apiv1.lua} (100%) rename stream-connectors/{pagerduty/pagerduty.lua => centreon-certified/pagerduty/pagerduty-apiv1.lua} (100%) rename stream-connectors/{prometheus-gateway => centreon-certified/prometheus}/prometheus-gateway.lua (100%) rename stream-connectors/{ => centreon-certified}/servicenow/connector-servicenow.lua (100%) rename stream-connectors/{pictures => centreon-certified/splunk}/splunk-conf1.png (100%) rename stream-connectors/{pictures => centreon-certified/splunk}/splunk-conf2.png (100%) rename stream-connectors/{ => centreon-certified}/splunk/splunk-events-http.lua (100%) rename stream-connectors/{ => centreon-certified}/splunk/splunk-events-luacurl.lua (100%) rename stream-connectors/{ => centreon-certified}/splunk/splunk-metrics-http.lua (100%) rename stream-connectors/{ => centreon-certified}/splunk/splunk-metrics-luacurl.lua (100%) rename stream-connectors/{ => centreon-certified}/splunk/splunk-states-http.lua (100%) rename stream-connectors/{pictures => centreon-certified/splunk}/splunk.png (100%) rename stream-connectors/{ => centreon-certified}/warp10/export-warp10.lua (100%) create mode 100644 stream-connectors/community-powered/canopsis/README.md rename stream-connectors/{ => community-powered}/canopsis/bbdo2canopsis.lua (100%) rename stream-connectors/{ => community-powered/canopsis}/pictures/centreon-configuration-screenshot.png (100%) create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua create mode 100644 stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec create mode 100644 stream-connectors/modules/tests/bam_stream_connector.lua create mode 100644 stream-connectors/modules/tests/neb_stream_connector.lua create mode 100644 stream-connectors/modules/tests/sc_common-test.lua diff --git a/stream-connectors/CONTRIBUTE.md b/stream-connectors/CONTRIBUTE.md new file mode 100644 index 00000000000..523558243b9 --- /dev/null +++ b/stream-connectors/CONTRIBUTE.md @@ -0,0 +1,56 @@ +# Contribute to the Centreon Stream Connectors project + +## How to contribute + +There are many ways you can contribute to this project and everyone should be able to help in its own way. + +### For code lovers + +You can work on Stream Connectors + +- Create a new stream connector +- Update an existing stream connector +- [Fix issues](https://github.com/centreon/centreon-stream-connector-scripts/issues) + +You can improve our Lua modules + +- Add a new module + - Comment it + - Document it + - *optional* Provide usage examples +- Update an existing module + - Update the documentation (if it changes the input and/or output of a method) + - Update usage examples if there are any and if they are impacted by the change + +### For everybody + +Since we are not all found of code, there are still ways to be part of this project + +- Open issues for bugs or feedbacks (or help people) +- Update an already existing example or provide new ones + +## Code guidelines + +If you want to work on our LUA modules, you must follow the coding style provided by luarocks +[Coding style guidelines](https://github.com/luarocks/lua-style-guide) + +While it is mandatory to follow those guidelines for modules, they will not be enforced on community powered Stream Connectors scripts. +It is however recommened to follow them as much as possible. + +## Documentations + +When creating a module you must comment your methods as follow + +```lua +--- This is a local function that does things +-- @param first_name (string) the first name +-- @param last_name (string) the last name +-- @return age (number) the age of the person +local function get_age(first_name, last_name) + -- some code -- +end +``` + +You should comment complicated or long code blocks to help people review your code. + +It is also required to create or update the module documentation for a more casual reading to help people use your module in their Stream Connector diff --git a/stream-connectors/README.md b/stream-connectors/README.md index be223830be3..50912d7f38b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -1,689 +1,52 @@ -# Centreon Stream Connectors # - -Here are several stream connectors for the -[Centreon Broker](https://github.com/centreon/centreon-broker). - -# Stream connectors - -The goal is to provide useful scripts to the community to extend the open source solution Centreon. - -You can find Lua scripts written to export Centreon data to several outputs. - -If one script is the good one for you, it is recommended to copy it on the Centreon central server -into the **/usr/share/centreon-broker/lua** directory. If it does not exist, you can create it. This -directory must be readable by the *centreon-broker* user. - -When the script is copied, you have to configure it through the centreon web interface. - -Stream connector documentation are provided here: -* https://documentation.centreon.com/docs/centreon/en/latest/developer/writestreamconnector.html -* https://documentation.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html - -Don't hesitate to propose improvements and/or contact the community through our Slack workspace. - -Here is a list of the available scripts: - -* [Elasticsearch](#elasticsearch) -* [InfluxDB](#InfluxDB) -* [Warp 10](#Warp10) -* [Splunk](#Splunk) -* [ServiceNow](#service-now) -* [NDO](#NDO) -* [HP OMI](#OMI) -* [PagerDuty](#PagerDuty) -* [Canopsis](#Canopsis) - -# Elasticsearch - -## Elasticsearch from metrics events: *elasticsearch/elastic-metrics.lua* - -This stream connector works with **metric events**. So you need them to be configured in Centreon broker. - -Parameters to specify in the stream connector configuration are: - -* log-file as **string**: it is the *complete file name* of this script logs. -* elastic-address as **string**: it is the *ip address* of the Elasticsearch server -* elastic-port as **number**: it is the port, if not provided, this value is *9200*. -* max-row as **number**: it is the max number of events before sending them to the elastic server. If not specified, its value is 100 - -## Elasticsearch from NEB events: *elasticsearch/elastic-neb.lua* - -This stream connector is an alternative to the previous one, but works with **neb service\_status events**. -As those events are always available on a Centreon platform, this script should work more often. - -To use this script, one need to install the lua-socket and lua-sec libraries. - -Parameters to specify in the stream connector configuration are: - -* http\_server\_address as **string**: the *(ip) address* of the Elasticsearch server -* http\_server\_port as **number**: the port of the Elasticsearch server, by default *9200* -* http\_server\_protocol as **string**: the connection scheme, by default *http* -* http\_timeout as **number**: the connection timeout, by default *5* seconds -* filter\_type as **string**: filter events to compute, by default *metric,status* -* elastic\_index\_metric as **string**: the index name for metrics, by default *centreon_metric* -* elastic\_index\_status as **string**: the index name for status, by default *centreon_status* -* elastic\_username as **string**: the API username if set -* elastic\_password as **password**: the API password if set -* max\_buffer\_size as **number**: the number of events to stock before the next flush, by default *5000* -* max\_buffer\_age as **number**: the delay to wait before the next flush, by default *30* seconds -* skip\_anon\_events as **number**: skip events without name in broker cache, by default *1* -* log\_level as **number**: log level from 1 to 3, by default *3* -* log\_path as **string**: path to log file, by default */var/log/centreon-broker/stream-connector-elastic-neb.log* - -If one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. - -Two indices need to be created on the Elasticsearch server: -``` -curl -X PUT "http://elasticsearch/centreon_metric" -H 'Content-Type: application/json' --d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, -"instance":{"type":"keyword"},"metric":{"type":"keyword"},"value":{"type":"double"}, -"min":{"type":"double"},"max":{"type":"double"},"uom":{"type":"text"}, -"type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}' - -curl -X PUT "http://elasticsearch/centreon_status" -H 'Content-Type: application/json' --d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, -"output":{"type":"text"},"status":{"type":"keyword"},"state":{"type":"keyword"}, -"type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}'' -``` - -# InfluxDB - -## InfluxDB from metrics events: *influxdb/influxdb-metrics.lua* - -This stream connector works with **metric events**. So you need them to be configured in Centreon broker. - -To use this script, one need to install the lua-socket library. - -Parameters to specify in the stream connector configuration are: - -* http\_server\_address as **string**: it is the *ip address* of the InfluxDB server -* http\_server\_port as **number**: it is the port, if not provided, this value is *8086* -* http\_server\_protocol as **string**: by default, this value is *http* -* influx\_database as **string**: The database name, *mydb* is the default value -* max\_buffer\_size as **number**: The number of events to stock before them to be sent to InfluxDB -* max\_buffer\_age as **number**: The delay in seconds to wait before the next flush. - -if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. - -## InfluxDB from neb events: *influxdb/influxdb-neb.lua* - -This stream connector is an alternative to the previous one, but works with **neb service\_status events**. -As those events are always available on a Centreon platform, this script should work more often. - -To use this script, one need to install the lua-socket and lua-sec libraries. - -Parameters to specify in the stream connector configuration are: - -* measurement as **string**: the InfluxDB *measurement*, overwrites the service description if set -* http\_server\_address as **string**: the *(ip) address* of the InfluxDB server -* http\_server\_port as **number**: the port of the InfluxDB server, by default *8086* -* http\_server\_protocol as **string**: the connection scheme, by default *https* -* http\_timeout as **number**: the connection timeout, by default *5* seconds -* influx\_database as **string**: the database name, by default *mydb* -* influx\_retention\_policy as **string**: the database retention policy, default is database's default -* influx\_username as **string**: the database username, no authentication performed if not set -* influx\_password as **string**: the database password, no authentication performed if not set -* max\_buffer\_size as **number**: the number of events to stock before the next flush, by default *5000* -* max\_buffer\_age as **number**: the delay to wait before the next flush, by default *30* seconds -* skip\_anon\_events as **number**: skip events without name in broker cache, by default *1* -* log\_level as **number**: log level from 1 to 3, by default *3* -* log\_path as **string**: path to log file, by default */var/log/centreon-broker/stream-connector-influxdb-neb.log* - -if one of max\_buffer\_size or max\_buffer\_age is reached, events are sent. - -# Warp10 - -## Warp10 from neb events: *warp10/export-warp10.lua* - -This stream connector works with **neb service\_status events**. - -This stream connector need at least centreon-broker-18.10.1. - -To use this script, one need to install the lua-curl library. - -Parameters to specify in the stream connector configuration are: - -* ipaddr as **string**: the ip address of the Warp10 server -* logfile as **string**: the log file -* port as **number**: the Warp10 server port -* token as **string**: the Warp10 write token -* max\_size as **number**: how many queries to store before sending them to the Warp10 server. - -# Splunk - -There are two ways to use our stream connector with Splunk. The first and probably most common way uses Splunk Universal Forwarder. The second -method uses Splunk HEC (HTTP Event Collector). - -## The Splunk Universal Forwarder method - -In that case, you're going to use "Centreon4Splunk", it comes with: -* A Splunk App. you may find on Splunkbase [here](https://splunkbase.splunk.com/app/4304/) -* The LUA script and documentation [here](https://github.com/lkco/centreon4Splunk) - -Thanks to lkco! - -## The Splunk HEC method - -There are two Lua scripts proposed here: - -* splunk-events-luacurl.lua that sends states to Splunk. -* splunk-metrics-luacurl.lua that sends metrics to Splunk. - -### Splunk Configuration - -An HTTP events collector has be configured in data entries. - -![alt text](pictures/splunk.png "Splunk configuration") - -### Installation - -Login as `root` on the Centreon central server using your favorite SSH client. - -In case your Centreon central server must use a proxy server to reach the Internet, you will have to export the `https_proxy` environment variable and configure `yum` to be able to install everything. - -```bash -export https_proxy=http://my.proxy.server:3128 -echo "proxy=http://my.proxy.server:3128" >> /etc/yum.conf -``` - -Now that your Centreon central server is able to reach the Internet, you can run: - -```bash -yum install -y lua-curl -``` - -These packages are necessary for the script to run. - -Then copy the `splunk-events-luacurl.lua` and `splunk-metrics-luacurl.lua` scripts to `/usr/share/centreon-broker/lua`. - -### Minimal configuration - -Here are the steps to configure your stream connector for the Events: - -* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. -* Name it as wanted and set the right path: - -| Name | Splunk Events | -| ---- | -------------------------------------------------------- | -| Path | /usr/share/centreon-broker/lua/splunk-events-luacurl.lua | - -* Add at least 3 string parameters containing your Splunk configuration: - -| Type | String | -| ----------------- | --------------------------------------- | -| `http_server_url` | `http://x.x.x.:8088/services/collector` | -| `splunk_token` | `your hec token` | -| `splunk_index` | `your event index` | - -Here are the steps to configure your stream connector for the Metrics: - -* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. -* Name it as wanted and set the right path - -| Name | Splunk Metrics | -| ---- | --------------------------------------------------------- | -| Path | /usr/share/centreon-broker/lua/splunk-metrics-luacurl.lua | - -* Add at least 3 string parameters containing your Splunk configuration: - -| Type | String | -| ----------------- | --------------------------------------- | -| `http_server_url` | `http://x.x.x.:8088/services/collector` | -| `splunk_token` | `your hec token` | -| `splunk_index` | `your metric index` | - -Thats all for now! - -Then save your configuration, export it and restart the broker daemon: - -```bash -systemctl restart cbd -``` - -### Advanced configuration - -#### Splunk Host - -If you want to change the `host` value in the HTTP POST data to identify from which Centreon Plateform the data is sent: - -| Type | String | -| ------------- | ------------ | -| `splunk_host` | `Poller-ABC` | - -#### Proxy - -If your Centreon central server has no direct access to Splunk but needs a proxy server, you will have to add a new string parameter: - -| Type | String | -| ------------------- | ------------------------------- | -| `http_proxy_string` | `http://your.proxy.server:3128` | - -#### Log level / file - -The default value of 2 is fine for initial troubleshooting, but generates a huge amount of logs if you have a lot of hosts. In order to get less log messages, you are should add this parameter: - -| Type | Number | -| ----------- | ------ | -| `log_level` | 1 | - -The default log file is `/var/log/centreon-broker/stream-connector-splunk-*.log`. If it does not suit you, you can set it with the `log_path` parameter: - -| Type | String | -| ---------- | ---------------------------------------------- | -| `log_path` | `/var/log/centreon-broker/my-custom-logfile.log` | - -# Service Now - -The stream connector sends the check results received from Centreon Engine to ServiceNow. Only the host and service check results are sent. - -This stream connector is in **BETA** version because it has not been used enough time in production environments. - -## Installation - -This stream connector needs the lua-curl library available for example with *luarocks*: - -`luarocks install lua-curl` - -## Configuration - -In *Configuration > Pollers > Broker configuration*, you need to modify the Central Broker Master configuration. - -Add an output whose type is Stream Connector. -Choose a name for your configuration. -Enter the path to the **connector-servicenow.lua** file. - -Configure the *lua parameters* with the following informations: - -Name | Type | Description ---- | --- | --- -client\_id | String | The client id for OAuth authentication -client\_secret | String | The client secret for OAuth authentication -username | String | Username for OAuth authentication -password | Password | Password for OAuth authentication -instance | String | The ServiceNow instance -logfile | String | The log file with its full path (optional) - -## Protocol description - -The following table describes the matching information between Centreon and the -ServiceNow Event Manager. - - -**Host event** - -Centreon | ServiceNow Event Manager field | Description ---- | --- | --- -hostname | node | The hostname -output | description | The Centreon Plugin output -last\_check | time\_of\_event | The time of the event -hostname | resource | The hostname -severity | The level of severity depends on the host status - -**Service event** - -Centreon | ServiceNow Event Manager field | Description ---- | --- | --- -hostname | node | The hostname -output | description | The Centreon Plugin output -last\_check | time\_of\_event | The time of the event -service\_description | resource | The service name -severity | The level of severity depends on the host status - -# NDO - -## Send service status events in the historical NDO protocol format : *ndo/ndo-output.lua* -NDO protocol is no longer supported by Centreon Broker. It is now replaced by BBDO (lower network footprint, automatic compression and encryption). -However it is possible to emulate the historical NDO protocol output with this stream connector. - -Parameters to specify in the broker output web ui are: - -* ipaddr as **string**: the ip address of the listening server -* port as **number**: the listening server port -* max-row as **number**: the number of event to store before sending the data + +[![Contributors][contributors-shield]][contributors-url] +[![Stars][stars-shield]][stars-url] +[![Forks][forks-shield]][forks-url] +[![Issues][issues-shield]][issues-url] -By default logs are in /var/log/centreon-broker/ndo-output.log -# OMI - -## stream connector for HP OMI : *omi/omi_connector.lua* - -### Prerequisites - -* lua version >= 5.1.4 -* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) - * from sources, you have to install also gcc + lua-devel packages - -### Configuration - -Create a broker output for HP OMI Connector - -Parameters to specify in the broker output web ui are: - -* `ipaddr` as **string**: the ip address of the listening server -* `port` as **number**: the listening server port -* `logfile` as **string**: where to send logs -* `loglevel` as **number**: the log level (0, 1, 2, 3) where 3 is the maximum level -* `max_size` as **number**: how many events to store before sending them to the server -* `max_age` as **number**: flush the events when the specified time (in second) is reach (even if `max_size` is not reach) - -# BSM - -## Installation - -Login as `root` on the Centreon central server using your favorite SSH client. - -In case your Centreon central server must use a proxy server to reach the Internet, you will have to export the `https_proxy` environment variable and configure `yum` to be able to install everything. - -```bash -export https_proxy=http://my.proxy.server:3128 -echo "proxy=http://my.proxy.server:3128" >> /etc/yum.conf -``` - -Now that your Centreon central server is able to reach the Internet, you can run: - -```bash -yum install -y lua-curl epel-release -yum install -y luarocks -luarocks install luaxml -``` - -These packages are necessary for the script to run. Now let's download the script: - -```bash -wget -O /usr/share/centreon-broker/lua/bsm_connector.lua https://raw.githubusercontent.com/centreon/centreon-stream-connector-scripts/master/bsm/bsm_connector.lua -chmod 644 /usr/share/centreon-broker/lua/bsm_connector.lua -``` - -The BSM StreamConnnector is now installed on your Centreon central server! - -## Configuration - -Create a broker output for HP BSM Connector. - -Parameters to specify in the broker output WUI are: - -* `source_ci` (string): Name of the transmiter, usually Centreon server name -* `http_server_url` (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. -* `http_proxy_string` (string): the full proxy URL if needed to reach the BSM server. Default: empty. -* `log_path` (string): the log file to use -* `log_level` (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbose. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. -* `max_buffer_size` (number): how many events to store before sending them to the server. -* `max_buffer_age` (number): flush the events when the specified time (in second) is reached (even if `max_buffer_size` is not reached). - -# PagerDuty - -## Installation / prerequisites - -The `lua-curl` and `luatz` libraries are required by this script: - -```bash -yum install -y lua-curl epel-release -yum install -y luarocks -luarocks install luatz -``` - -Then copy the `pagerduty.lua` script to `/usr/share/centreon-broker/lua`. - -## Configuration - -### Minimal configuration - -Here are the steps to configure your stream connector: - -* Add a new "Generic - Stream connector" output to the central broker in the "Configuration / Poller / Broker configuration" menu. -* Name it as wanted and set the right path: - -| Name | pagerduty | -| ---- | -------------------------------------------- | -| Path | /usr/share/centreon-broker/lua/pagerduty.lua | - -* Add at least one string parameter containing your PagerDuty routing key/token. - -| Type | String | -| ----------------- | -------------------- | -| `pdy_routing_key` | `` | - -Thats all for now! - -Then save your configuration, export it and restart the broker daemon: - -```bash -systemctl restart cbd -``` - -### Advanced configuration - -#### Proxy - -If your Centreon central server has no direct access to PagerDuty but needs a proxy server, you will have to add a new string parameter: - -| Type | String | -| ------------------- | ------------------------------- | -| `http_proxy_string` | `http://your.proxy.server:3128` | - -#### Centreon URL - -In order to have working links/URL in your PagerDuty events, you are encouraged to add this parameter: - -| Type | String | -| ------------------ | ----------------------------- | -| `pdy_centreon_url` | `http://your.centreon.server` | - -#### Log level / file - -The default value of 2 is fine for initial troubleshooting, but generates a huge amount of logs if you have a lot of hosts. In order to get less log messages, you are should add this parameter: - -| Type | Number | -| ----------- | ------ | -| `log_level` | 1 | - -The default log file is `/var/log/centreon-broker/stream-connector-pagerduty.log`. If it does not suit you, you can set it with the `log_path` parameter: - -| Type | String | -| ---------- | ---------------------------------------------- | -| `log_path` | `/var/log/centreon-broker/my-custom-logfile.log` | - - -#### Buffer size / age - -In case you want to tune the maximum number of events sent in a row for optimization purpose, you may add this parameter: - -| Type | Number | -| ----------------- | ------------------ | -| `max_buffer_size` | 10 (default value) | - - -In case you want to shorten the delay (in seconds) between the reception of an event and its transmission to PagerDuty, you can set this parameter: - -| Type | Number | -| ---------------- | ------------------ | -| `max_buffer_age` | 30 (default value) | - - -# Canopsis - -## Links - -**Canopsis** - -- https://doc.canopsis.net/guide-developpement/struct-event/ - -## Description - -This script use the stream-connector mechanism of Centreon to get events from -the pollers. The event is then translated to a Canopsis event and sent to the -HTTP REST API. - -## Technical description - -This connector follow the best practices of the Centreon documentation -(see the listed links in the first section). - -The script is in lua language as imposed by the stream-connector specification. - -It get all the events from Centreon and convert these events in -a Canopsis compatible json format. - -Filtered events are sent to HTTP API of Canopsis by chunk to reduce the number of -connections. - -The filtered events are : - -- acknowledgment events (category 1, element 1) -- downtime events (category 1, element 5) -- host events (category 1, element 14) -- service events (category 1, element 24) - -Extra informations are added to the host and services as bellow : - -- action_url -- notes_url -- hostgroups -- servicegroups (for service events) - -### Acknowledgment - -Two kinds of ack are sent to Canopsis : - -- Ack creation -- Ack deletion - -An ack is positioned on the resource/component reference - -### Downtime - -Two kinds of downtime are sent to Canopsis as "pbehavior" : - -- Downtime creation -- Downtime cancellation - -A uniq ID is generated from the informations of the downtime carried by Centreon. - -*Note : The recurrent downtimes are not implemented by the stream connector yet.* - -### Host status - -All HARD events with a state changed from hosts are sent to Canopsis. - -Take care of the state mapping as below : - -``` --- CENTREON // CANOPSIS --- --------------------- --- UP (0) // INFO (0) --- DOWN (1) // CRITICAL (3) --- UNREACHABLE (2) // MAJOR (2) -``` - -### Service status - -All HARD events with a state changed from services are sent to Canopsis. - -Take care of the state mapping as below : - -``` --- CENTREON // CANOPSIS --- --------------------- --- OK (0) // INFO (0) --- WARNING (1) // MINOR (1) --- CRITICAL (2) // CRITICAL (3) --- UNKNOWN (3) // MAJOR (2) -``` - -## Howto - -### Prerequisites - -* lua version >= 5.1.4 -* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) - * >= 3.0rc1-2 ( from sources, you have to install also gcc + lua-devel packages ) available into canopsis repository -* centreon-broker version 19.10.5 or >= 20.04.2 - -### Installation - -**Software deployment from sources (centreon-broker 19.10.5 or >= 20.04.2) :** - -1. Copy the lua script `bbdo2canopsis.lua` from `canopsis` dir to `/usr/share/centreon-broker/lua/bbdo2canopsis.lua` -2. Change the permissions to this file `chown centreon-engine:centreon-engine /usr/share/centreon-broker/lua/bbdo2canopsis.lua` - -**Software deployment from packages (centreon-broker >= 20.04.2) :** - -1. Install canopsis repository first - -``` -echo "[canopsis] -name = canopsis -baseurl=https://repositories.canopsis.net/pulp/repos/centos7-canopsis/ -gpgcheck=0 -enabled=1" > /etc/yum.repos.d/canopsis.repo -``` - -2. install connector with Yum -``` -yum install canopsis-connector-centreon-stream-connector -``` - -**Enable the connector :** - -1. add a new "Generic - Stream connector" output on the central-broker-master (see the official documentation) -2. export the poller configuration (see the official documentation) -3. restart services 'systemctl restart cbd centengine gorgoned' - -If you modify this script in development mode ( directly into the centreon host ), -you will need to restart the Centreon services (at least the centengine service). - -### Configuration - -All the configuration can be made througt the Centreon interface as described in -the official documentation. - -**The main parameters you have to set are :** - -``` -connector_name = "your connector source name" -canopsis_user = "your Canopsis API user" -canopsis_password = "your Canopsis API password" -canopsis_host = "your Canopsis host" -``` - -**If you want to customize your queue parameters (optional) :** - -``` -max_buffer_age = 60 -- retention queue time before sending data -max_buffer_size = 10 -- buffer size in number of events -``` - -**The init spread timer (optional) :** - -``` -init_spread_timer = 360 -- time to spread events in seconds at connector starts -``` +# Centreon Stream Connectors # -This timer is needed for the start of the connector. +Centreon stream connectors are LUA scripts that help you send your Centreon monitoring datas to your favorite tools -During this time, the connector send all HARD state events (with state change or -not) to update the events informations from Centreon to Canopsis. In that way -the level of information tends to a convergence. +# Stream connectors -*This implies a burst of events and a higher load for the server during this time.* +Available scripts -**On the Centreon WUI you can set these parameters as below :** +Here is a list of the Centreon powered scripts: -In Configuration > Pollers > Broker configuration > central-broker-master > -Output > Select "Generic - Stream connector" > Add +| Software | Connectors | Documentations | +| -------- | ---------- | -------------- | +| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/bsm.html) | +| ElasticSearch | [ElasticSearch Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/elasticsearch) | [Events Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/elasticsearch-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/elasticsearch-metrics.html) | +| InfluxDB | [InfluxDB Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/influxdb) | WIP | +| NDO | [NDO Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/ndo) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/ndo.html) | +| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/hp-omi.html) | +| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/opsgenie.html) | +| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/pagerduty.html) | +| Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | +| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html) | +| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/splunk-events.html) | +| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/warp10.html) | -![centreon-configuration-screenshot](pictures/centreon-configuration-screenshot.png) +Here is a list of the Community powered scripts -### Check the output +| Software | Connectors | Documentations | Contributors | Organizations | +| -------- | ---------- | -------------- | ------------ | ------------- | +| Canopsis | [Canopsis Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis) | [Documentation](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis/README.md) | [ppremont-capensis](https://github.com/ppremont-capensis) | [Capensis](https://www.capensis.fr/en/) | -By default the connector use the HTTP REST API of Canopsis to send events. +# Contribute -Check your alarm view to see the events from Centreon. +If you wish to help us improve this project, feel free to read the [Contribute.md](https://github.com/centreon/centreon-stream-connector-scripts/blob/master/CONTRIBUTE.md) file. -All logs are dumped into the default log file "/var/log/centreon-broker/debug.log" -#### Advanced usage + +[contributors-shield]: https://img.shields.io/github/contributors/centreon/centreon-stream-connector-scripts?color=%2384BD00&label=CONTRIBUTORS&style=for-the-badge +[stars-shield]: https://img.shields.io/github/stars/centreon/centreon-stream-connector-scripts?color=%23433b02a&label=STARS&style=for-the-badge +[forks-shield]: https://img.shields.io/github/forks/centreon/centreon-stream-connector-scripts?color=%23009fdf&label=FORKS&style=for-the-badge +[issues-shield]: https://img.shields.io/github/issues/centreon/centreon-stream-connector-scripts?color=%230072ce&label=ISSUES&style=for-the-badge -You can also use a raw log file to dump all Canopsis events and manage your -own way to send events (by example with logstash) by editing the "sending_method" -variable en set the "file" method. +[contributors-url]: https://github.com/centreon/centreon-stream-connector-scripts/graphs/contributors +[forks-url]: https://github.com/centreon/centreon-stream-connector-scripts/network/members +[stars-url]: https://github.com/centreon/centreon-stream-connector-scripts/stargazers +[issues-url]: https://github.com/centreon/centreon-stream-connector-scripts/issues \ No newline at end of file diff --git a/stream-connectors/bsm/bsm_connector.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua similarity index 100% rename from stream-connectors/bsm/bsm_connector.lua rename to stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua diff --git a/stream-connectors/elasticsearch/elastic-metrics.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua similarity index 100% rename from stream-connectors/elasticsearch/elastic-metrics.lua rename to stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua diff --git a/stream-connectors/elasticsearch/elastic-neb.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua similarity index 100% rename from stream-connectors/elasticsearch/elastic-neb.lua rename to stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua diff --git a/stream-connectors/influxdb/influxdb-metrics.lua b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua similarity index 100% rename from stream-connectors/influxdb/influxdb-metrics.lua rename to stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua diff --git a/stream-connectors/influxdb/influxdb-neb.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua similarity index 100% rename from stream-connectors/influxdb/influxdb-neb.lua rename to stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua diff --git a/stream-connectors/ndo/ndo-module.lua b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua similarity index 100% rename from stream-connectors/ndo/ndo-module.lua rename to stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua diff --git a/stream-connectors/ndo/ndo-output.lua b/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua similarity index 100% rename from stream-connectors/ndo/ndo-output.lua rename to stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua diff --git a/stream-connectors/omi/omi_connector.lua b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua similarity index 100% rename from stream-connectors/omi/omi_connector.lua rename to stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua diff --git a/stream-connectors/opsgenie/opsgenie.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua similarity index 100% rename from stream-connectors/opsgenie/opsgenie.lua rename to stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua diff --git a/stream-connectors/pagerduty/pagerduty.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua similarity index 100% rename from stream-connectors/pagerduty/pagerduty.lua rename to stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua diff --git a/stream-connectors/prometheus-gateway/prometheus-gateway.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway.lua similarity index 100% rename from stream-connectors/prometheus-gateway/prometheus-gateway.lua rename to stream-connectors/centreon-certified/prometheus/prometheus-gateway.lua diff --git a/stream-connectors/servicenow/connector-servicenow.lua b/stream-connectors/centreon-certified/servicenow/connector-servicenow.lua similarity index 100% rename from stream-connectors/servicenow/connector-servicenow.lua rename to stream-connectors/centreon-certified/servicenow/connector-servicenow.lua diff --git a/stream-connectors/pictures/splunk-conf1.png b/stream-connectors/centreon-certified/splunk/splunk-conf1.png similarity index 100% rename from stream-connectors/pictures/splunk-conf1.png rename to stream-connectors/centreon-certified/splunk/splunk-conf1.png diff --git a/stream-connectors/pictures/splunk-conf2.png b/stream-connectors/centreon-certified/splunk/splunk-conf2.png similarity index 100% rename from stream-connectors/pictures/splunk-conf2.png rename to stream-connectors/centreon-certified/splunk/splunk-conf2.png diff --git a/stream-connectors/splunk/splunk-events-http.lua b/stream-connectors/centreon-certified/splunk/splunk-events-http.lua similarity index 100% rename from stream-connectors/splunk/splunk-events-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-events-http.lua diff --git a/stream-connectors/splunk/splunk-events-luacurl.lua b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl.lua similarity index 100% rename from stream-connectors/splunk/splunk-events-luacurl.lua rename to stream-connectors/centreon-certified/splunk/splunk-events-luacurl.lua diff --git a/stream-connectors/splunk/splunk-metrics-http.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-http.lua similarity index 100% rename from stream-connectors/splunk/splunk-metrics-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-metrics-http.lua diff --git a/stream-connectors/splunk/splunk-metrics-luacurl.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl.lua similarity index 100% rename from stream-connectors/splunk/splunk-metrics-luacurl.lua rename to stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl.lua diff --git a/stream-connectors/splunk/splunk-states-http.lua b/stream-connectors/centreon-certified/splunk/splunk-states-http.lua similarity index 100% rename from stream-connectors/splunk/splunk-states-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-states-http.lua diff --git a/stream-connectors/pictures/splunk.png b/stream-connectors/centreon-certified/splunk/splunk.png similarity index 100% rename from stream-connectors/pictures/splunk.png rename to stream-connectors/centreon-certified/splunk/splunk.png diff --git a/stream-connectors/warp10/export-warp10.lua b/stream-connectors/centreon-certified/warp10/export-warp10.lua similarity index 100% rename from stream-connectors/warp10/export-warp10.lua rename to stream-connectors/centreon-certified/warp10/export-warp10.lua diff --git a/stream-connectors/community-powered/canopsis/README.md b/stream-connectors/community-powered/canopsis/README.md new file mode 100644 index 00000000000..34d3b08ad12 --- /dev/null +++ b/stream-connectors/community-powered/canopsis/README.md @@ -0,0 +1,183 @@ +**Canopsis** + +- https://doc.canopsis.net/guide-developpement/struct-event/ + +## Description + +This script use the stream-connector mechanism of Centreon to get events from +the pollers. The event is then translated to a Canopsis event and sent to the +HTTP REST API. + +## Technical description + +This connector follow the best practices of the Centreon documentation +(see the listed links in the first section). + +The script is in lua language as imposed by the stream-connector specification. + +It get all the events from Centreon and convert these events in +a Canopsis compatible json format. + +Filtered events are sent to HTTP API of Canopsis by chunk to reduce the number of +connections. + +The filtered events are : + +- acknowledgment events (category 1, element 1) +- downtime events (category 1, element 5) +- host events (category 1, element 14) +- service events (category 1, element 24) + +Extra informations are added to the host and services as bellow : + +- action_url +- notes_url +- hostgroups +- servicegroups (for service events) + +### Acknowledgment + +Two kinds of ack are sent to Canopsis : + +- Ack creation +- Ack deletion + +An ack is positioned on the resource/component reference + +### Downtime + +Two kinds of downtime are sent to Canopsis as "pbehavior" : + +- Downtime creation +- Downtime cancellation + +A uniq ID is generated from the informations of the downtime carried by Centreon. + +*Note : The recurrent downtimes are not implemented by the stream connector yet.* + +### Host status + +All HARD events with a state changed from hosts are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- UP (0) // INFO (0) +-- DOWN (1) // CRITICAL (3) +-- UNREACHABLE (2) // MAJOR (2) +``` + +### Service status + +All HARD events with a state changed from services are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- OK (0) // INFO (0) +-- WARNING (1) // MINOR (1) +-- CRITICAL (2) // CRITICAL (3) +-- UNKNOWN (3) // MAJOR (2) +``` + +## Howto + +### Prerequisites + +* lua version >= 5.1.4 +* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) + * >= 3.0rc1-2 ( from sources, you have to install also gcc + lua-devel packages ) available into canopsis repository +* centreon-broker version 19.10.5 or >= 20.04.2 + +### Installation + +**Software deployment from sources (centreon-broker 19.10.5 or >= 20.04.2) :** + +1. Copy the lua script `bbdo2canopsis.lua` from `canopsis` dir to `/usr/share/centreon-broker/lua/bbdo2canopsis.lua` +2. Change the permissions to this file `chown centreon-engine:centreon-engine /usr/share/centreon-broker/lua/bbdo2canopsis.lua` + +**Software deployment from packages (centreon-broker >= 20.04.2) :** + +1. Install canopsis repository first + +``` +echo "[canopsis] +name = canopsis +baseurl=https://repositories.canopsis.net/pulp/repos/centos7-canopsis/ +gpgcheck=0 +enabled=1" > /etc/yum.repos.d/canopsis.repo +``` + +2. install connector with Yum +``` +yum install canopsis-connector-centreon-stream-connector +``` + +**Enable the connector :** + +1. add a new "Generic - Stream connector" output on the central-broker-master (see the official documentation) +2. export the poller configuration (see the official documentation) +3. restart services 'systemctl restart cbd centengine gorgoned' + +If you modify this script in development mode ( directly into the centreon host ), +you will need to restart the Centreon services (at least the centengine service). + +### Configuration + +All the configuration can be made througt the Centreon interface as described in +the official documentation. + +**The main parameters you have to set are :** + +``` +connector_name = "your connector source name" +canopsis_user = "your Canopsis API user" +canopsis_password = "your Canopsis API password" +canopsis_host = "your Canopsis host" +``` + +**If you want to customize your queue parameters (optional) :** + +``` +max_buffer_age = 60 -- retention queue time before sending data +max_buffer_size = 10 -- buffer size in number of events +``` + +**The init spread timer (optional) :** + +``` +init_spread_timer = 360 -- time to spread events in seconds at connector starts +``` + +This timer is needed for the start of the connector. + +During this time, the connector send all HARD state events (with state change or +not) to update the events informations from Centreon to Canopsis. In that way +the level of information tends to a convergence. + +*This implies a burst of events and a higher load for the server during this time.* + +**On the Centreon WUI you can set these parameters as below :** + +In Configuration > Pollers > Broker configuration > central-broker-master > +Output > Select "Generic - Stream connector" > Add + +![centreon-configuration-screenshot](pictures/centreon-configuration-screenshot.png) + +### Check the output + +By default the connector use the HTTP REST API of Canopsis to send events. + +Check your alarm view to see the events from Centreon. + +All logs are dumped into the default log file "/var/log/centreon-broker/debug.log" + +#### Advanced usage + +You can also use a raw log file to dump all Canopsis events and manage your +own way to send events (by example with logstash) by editing the "sending_method" +variable en set the "file" method. \ No newline at end of file diff --git a/stream-connectors/canopsis/bbdo2canopsis.lua b/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua similarity index 100% rename from stream-connectors/canopsis/bbdo2canopsis.lua rename to stream-connectors/community-powered/canopsis/bbdo2canopsis.lua diff --git a/stream-connectors/pictures/centreon-configuration-screenshot.png b/stream-connectors/community-powered/canopsis/pictures/centreon-configuration-screenshot.png similarity index 100% rename from stream-connectors/pictures/centreon-configuration-screenshot.png rename to stream-connectors/community-powered/canopsis/pictures/centreon-configuration-screenshot.png diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua new file mode 100644 index 00000000000..5e3c5edd570 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -0,0 +1,355 @@ +#!/usr/bin/lua + +--- +-- Module with Centreon broker related methods for easier usage +-- @module sc_broker +-- @alias sc_broker + +local sc_broker = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +local ScBroker = {} + +function sc_broker.new(logger) + local self = {} + + self.logger = logger + if not self.logger then + self.logger = sc_logger.new("/var/log/centreon-broker/stream-connector.log", 1) + end + + setmetatable(self, { __index = ScBroker }) + + return self +end + + +--- get_host_all_infos: retrieve all informations from a host +-- @param host_id (number) +-- @return false (boolean) if host_id isn't valid or no information were found in broker cache +-- @return host_info (table) all the informations from the host +function ScBroker:get_host_all_infos(host_id) + -- return because host_id isn't valid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_host_all_infos]: host id is nil") + return false + end + + -- get host information from broker cache + local host_info = broker_cache:get_host(host_id) + + -- return false only if no host information were found in broker cache + if not host_info then + self.logger:warning("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + return false + end + + return host_info +end + +--- get_service_all_infos: retrieve informations from a service +-- @param host_id (number) +-- @params service_id (number) +-- @return false (boolean) if host id or service id aren't valid +-- @return service (table) all the informations from the service +function ScBroker:get_service_all_infos(host_id, service_id) + -- return because host_id or service_id isn't valid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_service_all_infos]: host id or service id is nil") + return false + end + + -- get service information from broker cache + local service_info = broker_cache:get_service(host_id, service_id) + + -- return false only if no service information were found in broker cache + if not service_info then + self.logger:warning("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) + .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") + return false + end + + return service_info +end + +--- get_host_infos: retrieve the the desired host informations +-- @param host_id (number) +-- @params info (string|table) the name of the wanted host parameter or a table of all wanted host parameters +-- @return false (boolean) if host_id is nil or empty +-- @return host (any) a table of all wanted host params if input param is a table. The single parameter if input param is a string +function ScBroker:get_host_infos(host_id, info) + -- return because host_id isn't valid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_host_infos]: host id is nil") + return false + end + + -- prepare return table with host information + local host = { + host_id = host_id + } + + -- return host_id only if no specific param is asked + if info == nil then + return host + end + + -- get host information from broker cache + local host_info = broker_cache:get_host(host_id) + + -- return host_id only if no host information were found in broker cache + if not host_info then + self.logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + return host + end + + -- get the desired param and return the information + if type(info) == "string" then + if host_info[info] then + return host_info[info] + end + end + + -- get all the desired params and return the information + if type(info) == "table" then + for _, param in ipairs(info) do + if host_info[param] then + host[param] = host_info[param] + end + end + + return host + end +end + +--- get_service_infos: retrieve the the desired service informations +-- @param host_id (number) +-- @param service_id (number) +-- @params info (string|table) the name of the wanted host parameter or a table of all wanted service parameters +-- @return false (boolean) if host_id and/or service_id are nil or empty +-- @return service (any) a table of all wanted service params if input param is a table. A single parameter if input param is a string +function ScBroker:get_service_infos(host_id, service_id, info) + -- return because host_id or service_id isn't valid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_service_infos]: host id or service id is invalid") + return false + end + + -- prepare return table with service information + local service = { + host_id = host_id, + service_id = service_id + } + + -- return host_id and service_id only if no specific param is asked + if info == nil then + return service + end + + -- get service information from broker cache + local service_info = broker_cache:get_service(host_id, service_id) + + -- return host_id and service_id only if no host information were found in broker cache + if not service_info then + self.logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) + .. ". Restarting centengine should fix this.") + return service + end + + -- get the desired param and return the information + if type(info) == "string" then + if service_info[info] then + return service_info[info] + end + end + + -- get all the desired params and return the information + if type(info) == "table" then + for _, param in ipairs(info) do + if service_info[param] then + service[param] = service_info[param] + end + end + + return service + end +end + +--- get_hostgroups: retrieve hostgroups from host_id +-- @param host_id (number) +-- @return false (boolean) if host id is invalid or no hostgroup found +-- @return hostgroups (table) a table of all hostgroups for the host +function ScBroker:get_hostgroups(host_id) + -- return false if host id is invalid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_hostgroup]: host id is nil or empty") + return false + end + + -- get hostgroups + local hostgroups = broker_cache:get_hostgroups(host_id) + + -- return false if no hostgroups were found + if not hostgroups then + return false + end + + return hostgroups +end + +--- get_servicegroups: retrieve servicegroups from service_id +-- @param host_id (number) +-- @param service_id (number) +-- @return false (boolean) if host_id or service_id are invalid or no service group found +-- @return servicegroups (table) a table of all servicegroups for the service +function ScBroker:get_servicegroups(host_id, service_id) + -- return false if service id is invalid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_servicegroups]: service id is nil or empty") + return false + end + + -- get servicegroups + local servicegroups = broker_cache:get_servicegroups(host_id) + + -- return false if no servicegroups were found + if not servicegroups then + return false + end + + return servicegroups +end + +--- get_severity: retrieve severity from host or service +-- @param host_id (number) +-- @param [opt] service_id (number) +-- @return false (boolean) if host id is invalid or no severity were found +-- @return severity (table) all the severity from the host or the service +function ScBroker:get_severity(host_id, service_id) + -- return false if host id is invalid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_severity]: host id is nil or empty") + return false + end + + local service_id = service_id or nil + local severity = nil + + -- get host severity + if service_id == nil then + severity = broker_cache:get_severity(host_id) + + -- return false if no severity were found + if not severity then + self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host: " .. tostring(host_id)) + return false + end + + return severity + end + + -- get severity for service + severity = broker_cache:get_severity(host_id, service_id) + + -- return false if no severity were found + if not severity then + self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host id: " .. tostring(host_id) .. " and service id: " .. tostring(service_id)) + return false + end + + return severity +end + +--- get_instance: retrieve poller from instance_id +-- @param host_id (number) +-- @return false (boolean) if host_id is invalid or no instance found in cache +-- @return name (string) the name of the instance +function ScBroker:get_instance(instance_id) + -- return false if instance_id is invalid + if intance_id == nil or instance_id == "" then + self.logger:warning("[sc_broker:get_instance]: instance id is nil or empty") + return false + end + + -- get instance name + local name = broker_cache:get_instance_name(instance_id) + + -- return false if no instance name is found + if not name then + self.logger:warning("[sc_broker:get_instance]: couldn't get instance name from broker cache for instance id: " .. tostring(instance_id)) + return false + end + + return name +end + +--- get_ba_info: retrieve ba name and description from ba id +-- @param ba_id (number) +-- @return false (boolean) if the ba_id is invalid or no information were found in the broker cache +-- @return ba_info (table) a table with the name and description of the ba +function ScBroker:get_ba_infos(ba_id) + -- return false if ba_id is invalid + if ba_id == nil or ba_id == "" then + self.logger:warning("[sc_broker:get_ba_infos]: ba id is nil or empty") + return false + end + + -- get ba info + local ba_info = broker_cache:get_ba(ba_id) + + -- return false if no informations are found + if ba_info == nil then + self.logger:warning("[sc_broker:get_ba_infos]: couldn't get ba informations in cache for ba_id: " .. tostring(ba_id)) + return false + end + + return ba_info +end + +--- get_bv_infos: retrieve bv name and description from ba_id +-- @param ba_id (number) +-- @param false (boolean) if ba_id is invalid or no information are found in the broker_cache +-- @return bvs (table) name and description of all the bvs +function ScBroker:get_bv_infos(ba_id) + -- return false if ba_id is invalid + if ba_id == nil or ba_id == "" then + self.logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") + return false + end + + -- get bvs id + local bvs_id = broker_cache:get_bvs(ba_id) + + -- return false if no bv id are found for ba_id + if bvs_id == nil or bvs_id == "" then + self.logger:warning("[sc_broker:get_bvs]: couldn't get bvs for ba id: " .. tostring(ba_id)) + return false + end + + local bv_infos = nil + local found_bv = false + local bvs = {} + + -- get bv info (name + description) for each found bv + for _, id in ipairs(bv_id) do + bv_infos = broker_cache:get_bv(v) + + -- add bv information to the list + if bv_infos then + table.insert(bvs,bv_infos) + found_bv = true + else + self.logger:warning("[sc_broker:get_bvs]: couldn't get bv information for bv id: " .. tostring(bv_id)) + end + end + + -- return false if there are no bv information + if not found_bv then + return false + end + + return bv_infos +end + +return sc_broker diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua new file mode 100644 index 00000000000..427bf1f5d4a --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -0,0 +1,146 @@ +#!/usr/bin/lua + +--- +-- Module with common methods for Centreon Stream Connectors +-- @module sc_common +-- @alias sc_common + +local sc_common = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +local function ifnil_or_empty(var, alt) + if var == nil or var == "" then + return alt + else + return var + end +end + +local ScCommon = {} + +function sc_common.new(logger) + local self = {} + + self.logger = logger + if not self.logger then + self.logger = sc_logger.new("/var/log/centreon-broker/stream-connector.log", 1) + end + + setmetatable(self, { __index = ScCommon }) + + return self +end + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +function ScCommon:ifnil_or_empty(var, alt) + return ifnil_or_empty(var, alt) +end + +--- if_wrong_type: change a wrong type variable with a default value +-- @param var (any) the variable that needs to be checked +-- @param type (string) the expected type of the variable +-- @param default (any) the default value for the variable if type is wrong +-- @return var or default (any) the variable if type is good or the default value +function ScCommon:if_wrong_type(var, var_type, default) + if type(var) == var_type then + return var + end + + return default +end + +--- boolean_to_number: convert boolean variable to number +-- @param boolean (boolean) the boolean that will be converted +-- @return (number) a number according to the boolean value +function ScCommon:boolean_to_number(boolean) + return boolean and 1 or 0 +end + + +--- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param number (number) the boolean number that must be validated +-- @param default (number) the default value that is going to be return if the default number is not validated +-- @return number (number) a boolean number +function ScCommon:check_boolean_number_option_syntax(number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +--- split: convert a string into a table +-- @param string (string) the string that is going to be splitted into a table +-- @param [opt] separator (string) the separator character that will be used to split the string +-- @return table (table) a table of strings +function ScCommon:split (text, separator) + -- return empty string if text is nil + if text == nil or text == "" then + self.logger:error("[sc_common:split]: could not split text because it is nil or empty") + return "" + end + + local hash = {} + + -- set default separator + separator = ifnil_or_empty(separator, ",") + + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +--- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param firstNumber {number} +-- @param secondNumber {number} +-- @param operator {string} the mathematical operator that is used for the comparison +-- @return {boolean} +function ScCommon:compare_numbers(firstNumber, secondNumber, operator) + if operator ~= "==" and operator ~= "~=" and operator ~= "<" and operator ~= ">" and operator ~= ">=" and operator ~= "<=" then + return nil + end + + if type(firstNumber) ~= "number" or type(secondNumber) ~= "number" then + return nil + end + + if operator == "<" then + if firstNumber < secondNumber then + return true + end + elseif operator == ">" then + if firstNumber > secondNumber then + return true + end + elseif operator == ">=" then + if firstNumber >= secondNumber then + return true + end + elseif operator == "<=" then + if firstNumber <= secondNumber then + return true + end + elseif operator == "==" then + if firstNumber == secondNumber then + return true + end + elseif operator == "~=" then + if firstNumber ~= secondNumber then + return true + end + end + + return false +end + +return sc_common \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua new file mode 100644 index 00000000000..ca9216f8281 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -0,0 +1,528 @@ +#!/usr/bin/lua + +--- +-- Module to help handle events from Centreon broker +-- @module sc_event +-- @alias sc_event + +local sc_event = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +local ScEvent = {} + +function sc_event.new(event, params, common, logger, broker) + local self = {} + + self.sc_logger = logger + self.sc_common = common + self.params = params + self.event = event + self.sc_broker = broker + + self.event.cache = {} + + setmetatable(self, { __index = ScEvent }) + + return self +end + +--- is_valid_category: check if the event is in an accepted category +-- @retun true|false (boolean) +function ScEvent:is_valid_category() + return self:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, self.event.category) +end + +--- is_valid_element: check if the event is an accepted element +-- @return true|false (boolean) +function ScEvent:is_valid_element() + return self:find_in_mapping(self.params.element_mapping[self.event.category], self.params.accepted_elements, self.event.element) +end + +--- find_in_mapping: check if item type is in the mapping and is accepted +-- @param mapping (table) the mapping table +-- @param reference (string) the accepted values for the item +-- @param item (string) the item we want to find in the mapping table and in the reference +-- @return (boolean) +function ScEvent:find_in_mapping(mapping, reference, item) + for mapping_index, mapping_value in pairs(mapping) do + for reference_index, reference_value in pairs(self.sc_common:split(reference, ",")) do + if item == mapping_value and mapping_index == reference_value then + return true + end + end + end + + return false +end + +--- is_valid_event: check if the event is accepted depending on configured conditions +-- @return true|false (boolean) +function ScEvent:is_valid_event() + local is_valid_event = false + + -- run validation tests depending on the category of the event + if self.event.category == 1 then + is_valid_event = self:is_valid_neb_event() + elseif self.event.category == 3 then + is_valid_event = self:is_valid_storage_event() + elseif self.event.category == 6 then + is_valid_event = self:is_valid_bam_event() + end + + return is_valid_event +end + +--- is_valid_neb_event: check if the event is an accepted neb type event +-- @return true|false (boolean) +function ScEvent:is_valid_neb_event() + local is_valid_event = false + + -- run validation tests depending on the element type of the neb event + if self.event.element == 14 then + is_valid_event = self:is_valid_host_status_event() + elseif self.event.element == 24 then + is_valid_event = self:is_valid_service_status_event() + end + + return is_valid_event +end + +--- is_valid_host_status_event: check if the host status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_host_status_event() + -- return false if we can't get hostname or host id is nil + if not self:is_host_valid() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.host_status) then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) + return false + end + + -- return false if one of event ack, downtime or state type (hard soft) aren't valid + if not self:are_all_event_states_valid() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in a validated downtime, ack or hard/soft state") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup") + return false + end + + return true +end + +--- is_valid_service_status_event: check if the service status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_service_status_event() + -- return false if we can't get hostname or host id is nil + if not self:is_host_valid() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: host_id: " .. tostring(self.event.host_id) + .. " hasn't been validated for service with id: " .. tostring(self.event.service_id)) + return false + end + + -- return false if we can't get service description of service id is nil + if not self:is_service_valid() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.service_status) then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) + return false + end + + -- return false if one of event ack, downtime or state type (hard soft) aren't valid + if not self:are_all_event_states_valid() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in a validated downtime, ack or hard/soft state") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + + return true +end + +--- is_host_valid: check if host name and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_host_valid() + local host_infos = self.sc_broker:get_host_all_infos(self.event.host_id) + + -- return false if we can't get hostname or host id is nil + if (not host_infos and self.params.skip_nil_id) or (not host_infos.name and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_host_valid]: Invalid host with id: " .. tostring(self.event.host_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id) + .. " host name is: " .. tostring(host_infos.name) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + return false + end + + -- force host name to be its id if no name has been found + if not host_infos.name then + self.event.cache.name = host_infos.host_id or self.event.host_id + else + self.event.cache = host_infos + end + + -- return false if event is coming from fake bam host + if string.find(self.event.cache.name, "^_Module_BAM_*") then + self.sc_logger:debug("[sc_event:is_host_valid]: Host is a BAM fake host: " .. tostring(self.event.cache.name)) + return false + end + + return true +end + +--- is_service_valid: check if service description and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_service_valid() + local service_infos = self.sc_broker:get_service_all_infos(self.event.host_id, self.event.service_id) + + -- return false if we can't get service description or if service id is nil + if (not service_infos and self.params.skip_nil_id) or (not service_infos.description and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_host_valid]: Invalid service with id: " .. tostring(self.event.service_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id) + .. " service description is: " .. tostring(service_infos.description) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + return false + end + + -- force service description to its id if no description has been found + if not service_infos.description then + self.event.cache.description = service_infos.service_id or self.event.service_id + else + self.event.cache = service_infos + end + + return true +end + +--- are_all_event_states_valid: wrapper method that checks common aspect of an event such as ack and state_type +-- @return true|false (boolean) +function ScEvent:are_all_event_states_valid() + -- return false if state_type (HARD/SOFT) is not valid + if not self:is_valid_event_state_type() then + return false + end + + -- return false if acknowledge state is not valid + if not self:is_valid_event_acknowledge_state() then + return false + end + + -- return false if downtime state is not valid + if not self:is_valid_event_downtime_state() then + return false + end + + return true +end + +--- is_valid_event_status: check if the event has an accepted status +-- @param accepted_status_list (string) a coma separated list of accepted status ("ok,warning,critical") +-- @return true|false (boolean) +function ScEvent:is_valid_event_status(accepted_status_list) + for _, status_id in ipairs(self.sc_common:split(accepted_status_list, ",")) do + if tostring(self.event.state) == status_id then + return true + end + end + + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) + return false +end + +--- is_valid_event_state_type: check if the state type (HARD/SOFT) is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_event_state_type() + if not self.sc_common:compare_numbers(self.event.state_type, self.params.hard_only, ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_state_type]: event is not in an valid state type. Event state type must be above or equal to " .. tostring(self.params.hard_only) + .. ". Current state type: " .. tostring(self.event.state_type)) + return false + end + + return true +end + +--- is_valid_event_acknowledge_state: check if the acknowledge state of the event is valid +-- @return true|false (boolean) +function ScEvent:is_valid_event_acknowledge_state() + if not self.sc_common:compare_numbers(self.params.acknowledged, self.sc_common:boolean_to_number(self.event.acknowledged), ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be above or equal to " .. tostring(self.params.acknowledged) + .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) + return false + end + + return true +end + +--- is_valid_event_downtime_state: check if the event is in an accepted downtime state +-- @return true|false (boolean) +function ScEvent:is_valid_event_downtime_state() + if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid ack state. Event ack state must be above or equal to " .. tostring(self.params.acknowledged) + .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) + return false + end + + return true +end + +--- is_valid_hostgroup: check if the event is in an accepted hostgroup +-- @return true|false (boolean) +function ScEvent:is_valid_hostgroup() + -- return true if option is not set + if self.params.accepted_hostgroups == "" then + return true + end + + self.event.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) + + -- return false if no hostgroups were found + if not self.event.hostgroups then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) + return false + end + + local accepted_hostgroup_name = self:find_hostgroup_in_list() + + -- return false if the host is not in a valid hostgroup + if not accepted_hostgroup_name then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not in an accepted hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) + return false + else + self.sc_logger:debug("[sc_event:is_valid_hostgroup]: event for host with id: " .. tostring(self.event.host_id) + .. "matched hostgroup: " .. accepted_hostgroup_name) + end + + return true +end + +--- find_hostgroup_in_list: compare accepted hostgroups from parameters with the event hostgroups +-- @return accepted_name (string) the name of the first matching hostgroup +-- @return false (boolean) if no matching hostgroup has been found +function ScEvent:find_hostgroup_in_list() + for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_hostgroups, ",")) do + for _, event_hostgroup in pairs(self.event.hostgroups) do + if accepted_name == event_hostgroup.group_name then + return accepted_name + end + end + end + + return false +end + +--- is_valid_servicegroup: check if the event is in an accepted servicegroup +-- @return true|false (boolean) +function ScEvent:is_valid_servicegroup() + -- return true if option is not set + if self.params.accepted_servicegroups == "" then + return true + end + + self.event.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) + + -- return false if no servicegroups were found + if not self.event.servicegroups then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) + return false + end + + local accepted_servicegroup_name = self:find_servicegroup_in_list() + + -- return false if the host is not in a valid servicegroup + if not accepted_servicegroup_name then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is not in an accepted servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) + return false + else + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) + .. "matched servicegroup: " .. accepted_servicegroup_name) + end + + return true +end + +--- find_servicegroup_in_list: compare accepted servicegroups from parameters with the event servicegroups +-- @return accepted_name or false (string|boolean) the name of the first matching servicegroup if found or false if not found +function ScEvent:find_servicegroup_in_list() + for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_servicegroups, ",")) do + for _, event_servicegroup in pairs(self.event.servicegroups) do + if accepted_name == event_servicegroup.group_name then + return accepted_name + end + end + end + + return false +end + +--- is_valid_bam_event: check if the event is an accepted bam type event +-- @return true|false (boolean) +function ScEvent:is_valid_bam_event() + -- return false if ba name is invalid or ba_id is nil + if not self:is_ba_valid() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " hasn't been validated") + return false + end + + -- return false if BA status is not accepted + if not self:is_valid_ba_status_event() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " has an invalid state") + return false + end + + -- return false if BA downtime state is not accepted + if not self:is_valid_ba_downtime_state() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in a validated downtime state") + return false + end + + -- DO NOTHING FOR THE MOMENT + if not self:is_valid_ba_acknowledge_state() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in a validated acknowledge state") + return false + end + + -- return false if BA is not in an accepted BV + if not self:is_valid_bv() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in an accepted BV") + return false + end + + return true +end + +--- is_ba_valid: check if ba name and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_ba_valid() + self.event.cache = self.sc_broker:get_ba_infos(self.event.ba_id) + + -- return false if we can't get ba name or ba id is nil + if (not self.event.cache.ba_name and self.params.skip_nil_id) or (not self.event.cache.ba_name and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". And skip nil id is set to: " .. tostring(self.params.skip_nil_id) + .. ". Found BA name is: " .. tostring(self.event.cache.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) + return false + end + + -- force ba name to be its id if no name has been found + if not self.event.cache.name then + self.event.cache.name = self.event.cache.name or self.event.ba_id + end + + return true +end + +--- is_valid_ba_status_event: check if the ba status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_status_event() + if not self:is_valid_event_status(self.params.ba_status) then + self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Acceptes states are: " .. tostring(self.params.ba_status)) + return false + end + + return true +end + +--- is_valid_ba_downtime_state: check if the ba downtime state is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_downtime_state() + if not self.sc_common:compare_numbers(self.params.in_downtime, self.sc_common:boolean_to_number(self.event.in_downtime), ">=") then + self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) + .. " and accepted downtime state must be below or equal to: " .. tostring(self.params.in_downtime)) + return false + end + + return true +end + +--- is_valid_ba_acknowledge_state: check if the ba acknowledge state is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_acknowledge_state() + -- if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.in_downtime, '>=') then + -- return false + -- end + + return true +end + +--- is_valid_bv: check if the event is in an accepted BV +-- @return true|false (boolean) +function ScEvent:is_valid_bv() + -- return true if option is not set + if self.params.accepted_bvs == "" then + return true + end + + self.event.bvs = self.sc_broker:get_bv_infos(self.event.host_id) + + -- return false if no hostgroups were found + if not self.event.bvs then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs) + return false + end + + local accepted_bv_name = self:find_bv_in_list() + + -- return false if the BA is not in a valid BV + if not accepted_bv_name then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + .. " is not in an accepted BV. Accepted BVs are: " .. self.params.accepted_bvs) + return false + else + self.sc_logger:debug("[sc_event:is_valid_bv]: event for BA with id: " .. tostring(self.event.ba_id) + .. "matched BV: " .. accepted_bv_name) + end + + return true +end + +--- find_bv_in_list: compare accepted BVs from parameters with the event BVs +-- @return accepted_name (string) the name of the first matching BV +-- @return false (boolean) if no matching BV has been found +function ScEvent:find_bv_in_list() + for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_bvs,",")) do + for _, event_bv in pairs(self.event.bvs) do + if accepted_name == event_bv.bv_name then + return accepted_name + end + end + end + + return false +end + +--- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead +-- return true (boolean) +function ScEvent:is_valid_storage_event() + return true +end + +return sc_event diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua new file mode 100644 index 00000000000..dc5af962607 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -0,0 +1,85 @@ +#!/usr/bin/lua + +--- +-- Logging module for centreon stream connectors +-- @module sc_logger +-- @alias sc_logger + +local sc_logger = {} + +--- build_message: prepare log message +-- @param severity (string) the severity of the message (WARNING, CRITIAL...) +-- @param message (string) the log message +-- @return ouput (string) the formated log message +local function build_message(severity, message) + local date = os.date("%a %b %d %H:%M:%S %Y") + local output = date .. ": " .. severity .. ": " .. message .. "\n" + + return output +end + + +--- write_message: write a message in a file +-- @param message (string) the message to write +-- @param logfile (string) the file in which the message will be written +local function write_message(message, logfile) + local file = io.open(logfile, "a") + io.output(file) + io.write(message) + io.close(file) +end + +--- file_logging: log message in a file +-- @param message (string) the message that need to be written +-- @param severity (string) the severity of the log +-- @param logfile (string) the ouput file +local function file_logging(message, severity, logfile) + write_message(build_message(severity, message), logfile) +end + +local ScLogger = {} + +--- sc_logger.new: sc_logger constructor +-- @param [opt] logfile (string) output file for logs +-- @param [opt] severity (integer) the accepted severity level +function sc_logger.new(logfile, severity) + local self = {} + self.severity = severity + + if type(severity) ~= "number" then + self.severity = 1 + end + + self.logfile = logfile or "/var/log/centreon-broker/stream-connector.log" + broker_log:set_parameters(self.severity, self.logfile) + + setmetatable(self, { __index = ScLogger }) + + return self +end + +--- error: write an error message +-- @param message (string) the message that will be written +function ScLogger:error(message) + broker_log:error(1, message) +end + +--- warning: write a warning message +-- @param message (string) the message that will be written +function ScLogger:warning(message) + broker_log:warning(2, message) +end + +--- notice: write a notice message +-- @param message (string) the message that will be written +function ScLogger:notice(message) + broker_log:info(1, message) +end + +--- debug: write a debug message +-- @param message (string) the message that will be written +function ScLogger:debug(message) + broker_log:info(3, message) +end + +return sc_logger \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua new file mode 100644 index 00000000000..26d0419ac20 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -0,0 +1,208 @@ +#!/usr/bin/lua + +--- +-- Module to help initiate a stream connector with all paramaters +-- @module sc_params +-- @alias sc_params + +local sc_params = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local ScParams = {} + +--- sc_params.new: sc_params constructor +-- @param common (object) object instance from sc_common module +-- @param logger (object) object instance from sc_logger module +function sc_params.new(common, logger) + local self = {} + + -- initiate mandatory libs + self.logger = logger + self.common = common + + -- initiate params + self.params = { + -- filter broker events + accepted_categories = "neb,bam", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_elements = "host_status,service_status,ba_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + + -- filter status + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown, + ba_status = "0,1,2", -- = ok, warning, critical + + -- filter state type + hard_only = 1, + acknowledged = 0, + in_downtime = 0, + + -- objects filter + accepted_hostgroups = "", + accepted_servicegroups = "", + accepted_bvs = "", + + -- filter anomalous events + skip_anon_events = 1, + skip_nil_id = 1, + + -- communication parameters + max_buffer_size = 1, + max_buffer_age = 5, + + -- internal parameters + __internal_ts_last_flush = os.time(), + + -- initiate mappings + element_mapping = {}, + category_mapping = {}, + status_mapping = {}, + state_type_mapping = { + [0] = "SOFT", + [1] = "HARD" + }, + validatedEvents = {}, + + -- FIX BROKER ISSUE + max_stored_events = 10 -- do not use values above 100 + } + + -- maps category id and name + self.params.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + -- initiate category and element mapping + self.params.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + -- maps category id with element name and element id + -- neb elements + self.params.element_mapping[1].acknowledgement = 1 + self.params.element_mapping[1].comment = 2 + self.params.element_mapping[1].custom_variable = 3 + self.params.element_mapping[1].custom_variable_status = 4 + self.params.element_mapping[1].downtime = 5 + self.params.element_mapping[1].event_handler = 6 + self.params.element_mapping[1].flapping_status = 7 + self.params.element_mapping[1].host_check = 8 + self.params.element_mapping[1].host_dependency = 9 + self.params.element_mapping[1].host_group = 10 + self.params.element_mapping[1].host_group_member = 11 + self.params.element_mapping[1].host = 12 + self.params.element_mapping[1].host_parent = 13 + self.params.element_mapping[1].host_status = 14 + self.params.element_mapping[1].instance = 15 + self.params.element_mapping[1].instance_status = 16 + self.params.element_mapping[1].log_entry = 17 + self.params.element_mapping[1].module = 18 + self.params.element_mapping[1].service_check = 19 + self.params.element_mapping[1].service_dependency = 20 + self.params.element_mapping[1].service_group = 21 + self.params.element_mapping[1].service_group_member = 22 + self.params.element_mapping[1].service = 23 + self.params.element_mapping[1].service_status = 24 + self.params.element_mapping[1].instance_configuration = 25 + + -- metric elements mapping + self.params.element_mapping[3].metric = 1 + self.params.element_mapping[3].rebuild = 2 + self.params.element_mapping[3].remove_graph = 3 + self.params.element_mapping[3].status = 4 + self.params.element_mapping[3].index_mapping = 5 + self.params.element_mapping[3].metric_mapping = 6 + + -- bam elements mapping + self.params.element_mapping[6].ba_status = 1 + self.params.element_mapping[6].kpi_status = 2 + self.params.element_mapping[6].meta_service_status = 3 + self.params.element_mapping[6].ba_event = 4 + self.params.element_mapping[6].kpi_event = 5 + self.params.element_mapping[6].ba_duration_event = 6 + self.params.element_mapping[6].dimension_ba_event = 7 + self.params.element_mapping[6].dimension_kpi_event = 8 + self.params.element_mapping[6].dimension_ba_bv_relation_event = 9 + self.params.element_mapping[6].dimension_bv_event = 10 + self.params.element_mapping[6].dimension_truncate_table_signal = 11 + self.params.element_mapping[6].bam_rebuild = 12 + self.params.element_mapping[6].dimension_timeperiod = 13 + self.params.element_mapping[6].dimension_ba_timeperiod_relation = 14 + self.params.element_mapping[6].dimension_timeperiod_exception = 15 + self.params.element_mapping[6].dimension_timeperiod_exclusion = 16 + self.params.element_mapping[6].inherited_downtime = 17 + + -- initiate category and status mapping + self.params.status_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + -- maps neb category statuses with host status element + self.params.status_mapping[1][14] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + } + + -- maps neb category statuses with service status element + self.params.status_mapping[1][24] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" + } + + -- maps bam category statuses with ba status element + self.params.status_mapping[6][1] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL" + } + + setmetatable(self, { __index = ScParams }) + + return self +end + +--- param_override: change default param values with the one provides from the web configuration +-- @param user_params (table) the table of all parameters from the web interface +function ScParams:param_override(user_params) + if type(user_params) ~= "table" then + self.logger:error("User parameters are not a table. Using default parameters instead") + return + end + + for param_name, param_value in pairs(user_params) do + if self.params[param_name] then + self.params[param_name] = param_value + self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) + else + self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name) .. " is not handled by this stream connector") + end + end +end + +--- check_params: check standard params syntax +function ScParams:check_params() + self.params.hard_only = self.common:check_boolean_number_option_syntax(self.params.hard_only, 1) + self.params.acknowledged = self.common:check_boolean_number_option_syntax(self.params.acknowledged, 0) + self.params.in_downtime = self.common:check_boolean_number_option_syntax(self.params.in_downtime, 0) + self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) + self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) + self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") + self.params.accepted_servicegroups = self.common:if_wrong_type(self.params.accepted_servicegroups, "string", "") + self.params.accepted_bvs = self.common:if_wrong_type(self.params.accepted_bvs, "string", "") +end + +return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua new file mode 100644 index 00000000000..d82e076b397 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua @@ -0,0 +1,31 @@ +#!/usr/bin/lua + +--- +-- Test module to help check modules reliability +-- @module sc_test +-- @alias sc_test + +local sc_test = {} + +function sc_test.compare_result(expected, result) + local state = '' + if expected == result then + state = '\27[32mOK\27[0m' + else + state = '\27[31mNOK\27[0m' + end + + return "[EXPECTED] " .. tostring(expected) .. " [RESULT] " .. tostring(result) .. ' ' .. state +end + +function sc_test.compare_tables(expected, result) + for i, v in pairs(expected) do + if v ~= result[i] then + return 'tables are not equal \27[31mNOK\27[0m' + end + end + + return 'tables are equal \27[32mOK\27[0m' +end + +return sc_test \ No newline at end of file diff --git a/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec new file mode 100644 index 00000000000..fdb9e7948ab --- /dev/null +++ b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec @@ -0,0 +1,26 @@ +source = { + url = "git+https://github.com/centreon/centreon-stream-connectors", + tag = "0.1.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/tests/bam_stream_connector.lua b/stream-connectors/modules/tests/bam_stream_connector.lua new file mode 100644 index 00000000000..0b8e0cbef95 --- /dev/null +++ b/stream-connectors/modules/tests/bam_stream_connector.lua @@ -0,0 +1,203 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector-bam.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "bam" + params.accepted_elements = "ba_status" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- starting to handle information from BA + self.sc_event.event.formated_event = { + -- name of BA has been stored in a cache table when calling is_valid_even() + my_ba = self.sc_event.event.cache.ba_name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + -- like the name of the BA, BA description is stored in the cache table of the event + my_description = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.ba_description, "no description found") + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/neb_stream_connector.lua b/stream-connectors/modules/tests/neb_stream_connector.lua new file mode 100644 index 00000000000..4827499604f --- /dev/null +++ b/stream-connectors/modules/tests/neb_stream_connector.lua @@ -0,0 +1,213 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "host_status,service_status" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + -- get output of the event + my_output = self.sc_common:ifnil_or_empty(string.match(self.sc_event.event.output, "^(.*)\n"), "no output"), + -- like the name of the host, notes are stored in the cache table of the event + my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.notes, "no notes found") + } + + -- handle service specific information + if self.sc_event.event.element == 24 then + -- like the name of the host, service description is stored in the cache table of the event + self.sc_event.event.formated_event.my_description = self.sc_event.event.cache.description + -- if the service doesn't have notes, we can retrieve the ones from the host by fetching it from the broker cache + self.sc_event.event.formated_event.my_notes = self.sc_common:ifnil_or_empty(self.sc_broker:get_host_infos(self.sc_event.event.host_id, "notes"), "no notes found") + end + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/sc_common-test.lua b/stream-connectors/modules/tests/sc_common-test.lua new file mode 100644 index 00000000000..f5ed474a479 --- /dev/null +++ b/stream-connectors/modules/tests/sc_common-test.lua @@ -0,0 +1,115 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_test = require("centreon-stream-connectors-lib.sc_test") + +local common = sc_common.new() + +local vempty = '' +local vnil = nil +local string = 'value_1,value_2,value_3' +local vtbool = true +local vfbool = false + +--- +-- test1: ifnil_or_empty +local test1_alt = 'alternate' + +print("-- test1: ifnil_or_empty --") +-- test nil value +print("test nil value: " .. sc_test.compare_result('alternate', common:ifnil_or_empty(vnil, test1_alt))) + +-- test empty value +print("test empty value: " .. sc_test.compare_result('alternate', common:ifnil_or_empty(vempty, test1_alt))) + +-- test a value +print("test a value: " .. sc_test.compare_result(string, common:ifnil_or_empty(string, test1_alt))) + +--- +-- test2: boolean_to_number +print("-- test2: boolean_to_number --") + +-- test a true and false boolean +print("test a true value: " .. sc_test.compare_result(1, common:boolean_to_number(vtbool))) +print("test a false value: " .. sc_test.compare_result(0, common:boolean_to_number(vfbool))) + +-- test invalid type (string) +print("test a string value: " .. sc_test.compare_result(1, common:boolean_to_number(string))) + +-- test invalid type (nil) +print("test a nil value: " .. sc_test.compare_result(0, common:boolean_to_number(vnil))) + +--- +-- test3: check_boolean_number_option_syntax +local test3_default = 0 +local test3_good_1 = 1 +local test3_good_0 = 0 + +print("-- test3: check_boolean_number_option_syntax --") + +-- test a string value +print("test a string value: " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(string, test3_default))) + +-- test boolean numbers (0 and 1) +print("test a boolean number: " .. sc_test.compare_result(1, common:check_boolean_number_option_syntax(test3_good_1, test3_default))) +print("test a boolean number: " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(test3_good_0, test3_default))) + +-- test a boolean (true) +print("test a boolean (true): " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(vtbool, test3_default))) + +--- +-- test4: split +local test4_no_separator = 'a string without separator' +local test4_custom_separator = 'value_1:value_2:value_3' +print("-- test4: split --") + +-- test a coma separated string +local table = { + [1] = value_1, + [2] = value_2, + [3] = value_3 +} +print("test a coma separated string: " .. sc_test.compare_tables(table, common:split(string))) + +-- test a colon separated string +print("test a colon separated string: " .. sc_test.compare_tables(table, common:split(test4_custom_separator, ':'))) + +-- test a string without separator +table = { + [1] = test4_no_separator +} +print("test a string without separators: " .. sc_test.compare_tables(table, common:split(test4_no_separator))) + +-- test an empty string +print("test an empty string: " .. sc_test.compare_result('', common:split(vempty))) + +-- test a nil value +print("test a nil value: " .. sc_test.compare_result('', common:split(vnil))) + +--- +-- test5: compare_numbers + +print("-- test5: compare_numbers --") +-- test inferior number +print("test a <= b. " .. sc_test.compare_result(true, common:compare_numbers(1, 3, '<='))) + +-- test with fload number +print("test a <= b (b is a float number): " .. sc_test.compare_result(true, common:compare_numbers(1, 3.5, '<='))) + +-- test superior number +print("test a <= b: " .. sc_test.compare_result(false, common:compare_numbers(3, 1, '<='))) + +-- test nil operator +print("test with a nil operator: " .. sc_test.compare_result(nil, common:compare_numbers(3, 1, nil))) + +-- test empty operator +print("test with a empty operator: " .. sc_test.compare_result(nil, common:compare_numbers(3, 1, ''))) + +-- test nil number +print("test with a nil number: " .. sc_test.compare_result(nil, common:compare_numbers(nil, 1, '<='))) + +-- test empty number +print("test with a empty number: " .. sc_test.compare_result(nil, common:compare_numbers('', 1, '<='))) + +-- test with string as number +print("test with a string: " .. sc_test.compare_result(nil, common:compare_numbers(string, 1, '<='))) \ No newline at end of file From bdc1408a4557b4e41bbdb1a9d97946153dd3e6a7 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 4 May 2021 10:30:43 +0200 Subject: [PATCH 056/219] Modules Docs + Modules bug fixes + Modules new features (#46) * add doc index and logger module documentation * add sc_common module documentation * fix wrong link * add table of content for sc_common doc * refacto cache structure for the event * method naming * fix get_servicegroups method * fix get_instance method * fix get_bv_infos method * add sc_broker module documentation * fix example in doc * better sc_logger initialization * adds sc_param module documentation * better skip_nil_id handling * put broker cache info in cache table of the event * add scope details for params * add sc_event module documentation * fix links in documentation * fix links again * fix links for is_service_valid * fix downtime link * better method naming * fix wrong method name in doc * adds the possibility to filter using poller name * adds severity filter feature * fix rockspec 0.1.0-1 * add rockspec file for production ready release * typo Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> * typo Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> * typo Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> * typo Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> * typo Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> Co-authored-by: pkriko <32265250+pkriko@users.noreply.github.com> --- .../sc_broker.lua | 16 +- .../sc_common.lua | 2 +- .../sc_event.lua | 254 ++++-- .../sc_params.lua | 15 +- stream-connectors/modules/docs/README.md | 95 ++ stream-connectors/modules/docs/sc_broker.md | 616 +++++++++++++ stream-connectors/modules/docs/sc_common.md | 273 ++++++ stream-connectors/modules/docs/sc_event.md | 833 ++++++++++++++++++ stream-connectors/modules/docs/sc_logger.md | 129 +++ stream-connectors/modules/docs/sc_param.md | 129 +++ ...treon-stream-connectors-lib-0.1.0.rockspec | 4 +- ...treon-stream-connectors-lib-1.0.0.rockspec | 28 + .../modules/tests/bam_stream_connector.lua | 4 +- .../modules/tests/neb_stream_connector.lua | 8 +- 14 files changed, 2336 insertions(+), 70 deletions(-) create mode 100644 stream-connectors/modules/docs/README.md create mode 100644 stream-connectors/modules/docs/sc_broker.md create mode 100644 stream-connectors/modules/docs/sc_common.md create mode 100644 stream-connectors/modules/docs/sc_event.md create mode 100644 stream-connectors/modules/docs/sc_logger.md create mode 100644 stream-connectors/modules/docs/sc_param.md create mode 100644 stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua index 5e3c5edd570..3057d0d4748 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -16,7 +16,7 @@ function sc_broker.new(logger) self.logger = logger if not self.logger then - self.logger = sc_logger.new("/var/log/centreon-broker/stream-connector.log", 1) + self.logger = sc_logger.new() end setmetatable(self, { __index = ScBroker }) @@ -211,7 +211,7 @@ function ScBroker:get_servicegroups(host_id, service_id) end -- get servicegroups - local servicegroups = broker_cache:get_servicegroups(host_id) + local servicegroups = broker_cache:get_servicegroups(host_id, service_id) -- return false if no servicegroups were found if not servicegroups then @@ -267,7 +267,7 @@ end -- @return name (string) the name of the instance function ScBroker:get_instance(instance_id) -- return false if instance_id is invalid - if intance_id == nil or instance_id == "" then + if instance_id == nil or instance_id == "" then self.logger:warning("[sc_broker:get_instance]: instance id is nil or empty") return false end @@ -307,11 +307,11 @@ function ScBroker:get_ba_infos(ba_id) return ba_info end ---- get_bv_infos: retrieve bv name and description from ba_id +--- get_bvs_infos: retrieve bv name and description from ba_id -- @param ba_id (number) -- @param false (boolean) if ba_id is invalid or no information are found in the broker_cache -- @return bvs (table) name and description of all the bvs -function ScBroker:get_bv_infos(ba_id) +function ScBroker:get_bvs_infos(ba_id) -- return false if ba_id is invalid if ba_id == nil or ba_id == "" then self.logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") @@ -332,8 +332,8 @@ function ScBroker:get_bv_infos(ba_id) local bvs = {} -- get bv info (name + description) for each found bv - for _, id in ipairs(bv_id) do - bv_infos = broker_cache:get_bv(v) + for _, id in ipairs(bvs_id) do + bv_infos = broker_cache:get_bv(id) -- add bv information to the list if bv_infos then @@ -349,7 +349,7 @@ function ScBroker:get_bv_infos(ba_id) return false end - return bv_infos + return bvs end return sc_broker diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 427bf1f5d4a..b81a954ef40 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -28,7 +28,7 @@ function sc_common.new(logger) self.logger = logger if not self.logger then - self.logger = sc_logger.new("/var/log/centreon-broker/stream-connector.log", 1) + self.logger = sc_logger.new() end setmetatable(self, { __index = ScCommon }) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index ca9216f8281..51c55644686 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -17,7 +17,10 @@ local ScEvent = {} function sc_event.new(event, params, common, logger, broker) local self = {} - self.sc_logger = logger + self.logger = logger + if not self.logger then + self.logger = sc_logger.new() + end self.sc_common = common self.params = params self.event = event @@ -95,7 +98,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_host_status_event() -- return false if we can't get hostname or host id is nil - if not self:is_host_valid() then + if not self:is_valid_host() then self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") return false end @@ -108,11 +111,24 @@ function ScEvent:is_valid_host_status_event() end -- return false if one of event ack, downtime or state type (hard soft) aren't valid - if not self:are_all_event_states_valid() then + if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in a validated downtime, ack or hard/soft state") return false end + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + return false + end + -- return false if host is not in an accepted hostgroup if not self:is_valid_hostgroup() then self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup") @@ -126,14 +142,14 @@ end -- @return true|false (boolean) function ScEvent:is_valid_service_status_event() -- return false if we can't get hostname or host id is nil - if not self:is_host_valid() then + if not self:is_valid_host() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated for service with id: " .. tostring(self.event.service_id)) return false end -- return false if we can't get service description of service id is nil - if not self:is_service_valid() then + if not self:is_valid_service() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") return false end @@ -146,11 +162,32 @@ function ScEvent:is_valid_service_status_event() end -- return false if one of event ack, downtime or state type (hard soft) aren't valid - if not self:are_all_event_states_valid() then + if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in a validated downtime, ack or hard/soft state") return false end + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + -- return false if host is not in an accepted hostgroup if not self:is_valid_hostgroup() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) @@ -167,59 +204,69 @@ function ScEvent:is_valid_service_status_event() return true end ---- is_host_valid: check if host name and/or id are valid +--- is_valid_host: check if host name and/or id are valid -- @return true|false (boolean) -function ScEvent:is_host_valid() - local host_infos = self.sc_broker:get_host_all_infos(self.event.host_id) +function ScEvent:is_valid_host() + + -- return false if host id is nil + if (not self.event.host_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_host]: Invalid host with id: " .. tostring(self.event.host_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.host = self.sc_broker:get_host_all_infos(self.event.host_id) - -- return false if we can't get hostname or host id is nil - if (not host_infos and self.params.skip_nil_id) or (not host_infos.name and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_host_valid]: Invalid host with id: " .. tostring(self.event.host_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id) - .. " host name is: " .. tostring(host_infos.name) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + -- return false if we can't get hostname + if (not self.event.cache.host.name and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_host]: Invalid host with id: " .. tostring(self.event.host_id) + .. " host name is: " .. tostring(self.event.cache.host.name) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false end -- force host name to be its id if no name has been found - if not host_infos.name then - self.event.cache.name = host_infos.host_id or self.event.host_id - else - self.event.cache = host_infos + if not self.event.cache.host.name then + self.event.cache.host.name = self.event.cache.host.host_id or self.event.host_id end -- return false if event is coming from fake bam host - if string.find(self.event.cache.name, "^_Module_BAM_*") then - self.sc_logger:debug("[sc_event:is_host_valid]: Host is a BAM fake host: " .. tostring(self.event.cache.name)) + if string.find(self.event.cache.host.name, "^_Module_BAM_*") then + self.sc_logger:debug("[sc_event:is_valid_host]: Host is a BAM fake host: " .. tostring(self.event.cache.host.name)) return false end return true end ---- is_service_valid: check if service description and/or id are valid +--- is_valid_service: check if service description and/or id are valid -- @return true|false (boolean) -function ScEvent:is_service_valid() - local service_infos = self.sc_broker:get_service_all_infos(self.event.host_id, self.event.service_id) +function ScEvent:is_valid_service() + + -- return false if service id is nil + if (not self.event.service_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid service with id: " .. tostring(self.event.service_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.service = self.sc_broker:get_service_all_infos(self.event.host_id, self.event.service_id) - -- return false if we can't get service description or if service id is nil - if (not service_infos and self.params.skip_nil_id) or (not service_infos.description and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_host_valid]: Invalid service with id: " .. tostring(self.event.service_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id) - .. " service description is: " .. tostring(service_infos.description) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + -- return false if we can't get service description + if (not self.event.cache.service.description and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid service with id: " .. tostring(self.event.service_id) + .. " service description is: " .. tostring(self.event.cache.service.description) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false end -- force service description to its id if no description has been found - if not service_infos.description then - self.event.cache.description = service_infos.service_id or self.event.service_id - else - self.event.cache = service_infos + if not self.event.cache.service.description then + self.event.cache.service.description = service_infos.service_id or self.event.service_id end return true end ---- are_all_event_states_valid: wrapper method that checks common aspect of an event such as ack and state_type +--- is_valid_event_states: wrapper method that checks common aspect of an event such as ack and state_type -- @return true|false (boolean) -function ScEvent:are_all_event_states_valid() +function ScEvent:is_valid_event_states() -- return false if state_type (HARD/SOFT) is not valid if not self:is_valid_event_state_type() then return false @@ -297,10 +344,10 @@ function ScEvent:is_valid_hostgroup() return true end - self.event.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) + self.event.cache.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) -- return false if no hostgroups were found - if not self.event.hostgroups then + if not self.event.cache.hostgroups then self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) return false @@ -326,7 +373,7 @@ end -- @return false (boolean) if no matching hostgroup has been found function ScEvent:find_hostgroup_in_list() for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_hostgroups, ",")) do - for _, event_hostgroup in pairs(self.event.hostgroups) do + for _, event_hostgroup in pairs(self.event.cache.hostgroups) do if accepted_name == event_hostgroup.group_name then return accepted_name end @@ -344,10 +391,10 @@ function ScEvent:is_valid_servicegroup() return true end - self.event.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) + self.event.cache.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) -- return false if no servicegroups were found - if not self.event.servicegroups then + if not self.event.cache.servicegroups then self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) return false @@ -372,7 +419,7 @@ end -- @return accepted_name or false (string|boolean) the name of the first matching servicegroup if found or false if not found function ScEvent:find_servicegroup_in_list() for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_servicegroups, ",")) do - for _, event_servicegroup in pairs(self.event.servicegroups) do + for _, event_servicegroup in pairs(self.event.cache.servicegroups) do if accepted_name == event_servicegroup.group_name then return accepted_name end @@ -386,7 +433,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_bam_event() -- return false if ba name is invalid or ba_id is nil - if not self:is_ba_valid() then + if not self:is_valid_ba() then self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " hasn't been validated") return false end @@ -418,21 +465,28 @@ function ScEvent:is_valid_bam_event() return true end ---- is_ba_valid: check if ba name and/or id are valid +--- is_valid_ba: check if ba name and/or id are valid -- @return true|false (boolean) -function ScEvent:is_ba_valid() - self.event.cache = self.sc_broker:get_ba_infos(self.event.ba_id) +function ScEvent:is_valid_ba() + + -- return false if ba_id is nil + if (not self.event.ba_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". And skip nil id is set to: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.ba = self.sc_broker:get_ba_infos(self.event.ba_id) - -- return false if we can't get ba name or ba id is nil - if (not self.event.cache.ba_name and self.params.skip_nil_id) or (not self.event.cache.ba_name and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". And skip nil id is set to: " .. tostring(self.params.skip_nil_id) - .. ". Found BA name is: " .. tostring(self.event.cache.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) + -- return false if we can't get ba name + if (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) + .. ". Found BA name is: " .. tostring(self.event.cache.ba.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) return false end -- force ba name to be its id if no name has been found - if not self.event.cache.name then - self.event.cache.name = self.event.cache.name or self.event.ba_id + if not self.event.cache.ba.ba_name then + self.event.cache.ba.ba_name = self.event.cache.ba.ba_name or self.event.ba_id end return true @@ -442,7 +496,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_ba_status_event() if not self:is_valid_event_status(self.params.ba_status) then - self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Acceptes states are: " .. tostring(self.params.ba_status)) return false end @@ -454,7 +508,7 @@ end -- @return true|false (boolean) function ScEvent:is_valid_ba_downtime_state() if not self.sc_common:compare_numbers(self.params.in_downtime, self.sc_common:boolean_to_number(self.event.in_downtime), ">=") then - self.sc_logger:warning("[sc_event:is_ba_valid]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) .. " and accepted downtime state must be below or equal to: " .. tostring(self.params.in_downtime)) return false end @@ -480,10 +534,10 @@ function ScEvent:is_valid_bv() return true end - self.event.bvs = self.sc_broker:get_bv_infos(self.event.host_id) + self.event.cache.bvs = self.sc_broker:get_bvs_infos(self.event.host_id) -- return false if no hostgroups were found - if not self.event.bvs then + if not self.event.cache.bvs then self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs) return false @@ -509,7 +563,7 @@ end -- @return false (boolean) if no matching BV has been found function ScEvent:find_bv_in_list() for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_bvs,",")) do - for _, event_bv in pairs(self.event.bvs) do + for _, event_bv in pairs(self.event.cache.bvs) do if accepted_name == event_bv.bv_name then return accepted_name end @@ -519,6 +573,100 @@ function ScEvent:find_bv_in_list() return false end +--- is_valid_poller: check if the event is monitored from an accepted poller +-- @return true|false (boolean) +function ScEvent:is_valid_poller() + -- return true if option is not set + if self.params.accepted_pollers == "" then + return true + end + + -- return false if instance id is not found in cache + if not self.event.cache.host.instance then + self.sc_logger:warning("[sc_event:is_valid_poller]: no instance ID found for host ID: " .. tostring(self.event.host_id)) + return false + end + + self.event.cache.poller = self.sc_broker:get_instance(self.event.cache.host.instance) + + -- return false if no poller found in cache + if not self.event.cache.poller then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to an accepted poller (no poller found in cache). Accepted pollers are: " .. self.params.accepted_pollers) + return false + end + + local accepted_poller_name = self:find_poller_in_list() + + -- return false if the host is not monitored from a valid poller + if not accepted_poller_name then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to an accepted poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Accepted pollers are: " .. self.params.accepted_pollers) + return false + else + self.sc_logger:debug("[sc_event:is_valid_poller]: event for host with id: " .. tostring(self.event.host_id) + .. "matched poller: " .. accepted_poller_name) + end + + return true +end + +--- find_poller_in_list: compare accepted pollers from parameters with the event poller +-- @return accepted_name or false (string|boolean) the name of the first matching poller if found or false if not found +function ScEvent:find_poller_in_list() + for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_pollers, ",")) do + if accepted_name == self.event.cache.poller then + return accepted_name + end + end + + return false +end + +--- is_valid_host_severity: checks if the host severity is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_host_severity() + -- return true if there is no severity filter + if self.params.host_severity_threshold == nil then + return true + end + + -- get severity of the host from broker cache + self.event.cache.host_severity = self.sc_broker:get_severity(self.event.host_id) + + -- return false if host severity doesn't match + if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.host_severity, self.params.host_severity_operator) then + self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " + .. tostring(self.event.cache.host_severity) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.host_severity) .. ")") + return false + end + + return true +end + +--- is_valid_service_severity: checks if the service severity is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_host_severity() + -- return true if there is no severity filter + if self.params.service_severity_threshold == nil then + return true + end + + -- get severity of the host from broker cache + self.event.cache.service_severity = self.sc_broker:get_severity(self.event.host_id, self.event.service_id) + + -- return false if service severity doesn't match + if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.service_severity, self.params.service_severity_operator) then + self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " + .. tostring(self.event.cache.service_severity) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.service_severity) .. ")") + return false + end + + return true +end + --- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead -- return true (boolean) function ScEvent:is_valid_storage_event() diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 26d0419ac20..29904006005 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -20,6 +20,9 @@ function sc_params.new(common, logger) -- initiate mandatory libs self.logger = logger + if not self.logger then + self.logger = sc_logger.new() + end self.common = common -- initiate params @@ -42,7 +45,12 @@ function sc_params.new(common, logger) accepted_hostgroups = "", accepted_servicegroups = "", accepted_bvs = "", - + accepted_pollers = "", + service_severity_threshold = nil, + service_severity_operator = ">=", + host_severity_threshold = nil, + host_severity_operator = ">=", + -- filter anomalous events skip_anon_events = 1, skip_nil_id = 1, @@ -203,6 +211,11 @@ function ScParams:check_params() self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") self.params.accepted_servicegroups = self.common:if_wrong_type(self.params.accepted_servicegroups, "string", "") self.params.accepted_bvs = self.common:if_wrong_type(self.params.accepted_bvs, "string", "") + self.params.accepted_pollers = self.common:if_wrong_type(self.params.accepted_pollers, "string", "") + self.params.host_severity_threshold = self.common:if_wrong_type(self.params.host_severity_threshold, "number", nil) + self.params.service_severity_threshold = self.common:if_wrong_type(self.params.service_severity_threshold, "number", nil) + self.params.host_severity_operator = self.common:if_wrong_type(self.params.host_severity_operator, "string", ">=") + self.params.service_severity_operator = self.common:if_wrong_type(self.params.service_severity_operator, "string", ">=") end return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md new file mode 100644 index 00000000000..59e2373bdb9 --- /dev/null +++ b/stream-connectors/modules/docs/README.md @@ -0,0 +1,95 @@ +# Stream Connectors lib documentation + +- [Stream Connectors lib documentation](#stream-connectors-lib-documentation) + - [Libraries list](#libraries-list) + - [sc_common methods](#sc_common-methods) + - [sc_logger methods](#sc_logger-methods) + - [sc_broker methods](#sc_broker-methods) + - [sc_param methods](#sc_param-methods) + - [sc_event methods](#sc_event-methods) + +## Libraries list + +| Lib name | Content | Usage | Documentation | +| --------- | ------------------------------------------------ | ------------------------------------------------------------------------- | ----------------------------- | +| sc_common | basic methods for lua | you can use it when you want to simplify your code | [Documentation](sc_common.md) | +| sc_logger | methods that handle logging with centreon broker | When you want to log a message from your stream connector | [Documentation](sc_logger.md) | +| sc_broker | wrapper methods for broker cache | when you need something from the broker cache | [Documentation](sc_broker.md) | +| sc_param | handles parameters for stream connectors | when you want to initiate a stream connector with all standard parameters | [Documentation](sc_param.md) | +| sc_event | methods to help you interact with a broker event | when you to perform a specific action on an event | [Documentation](sc_event.md) | + +## sc_common methods + +| Method name | Method description | Link | +| ---------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| ifnil_or_empty | check if a variable is empty or nil and replace it with a default value if it is the case | [Documentation](sc_common.md#ifnil_or_empty-method) | +| if_wrong_type | check the type of a variable, if it is wrong, replace the variable with a default value | [Documentation](sc_common.md#if_wrong_type-method) | +| boolean_to_number | change a true/false boolean to a 1/0 value | [Documentation](sc_common.md#boolean_to_number-method) | +| check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | +| split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | +| compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | + +## sc_logger methods + +| Method name | Method description | Link | +| ----------- | ------------------------------------------- | -------------------------------------------- | +| error | write an error message in the log file | [Documentation](sc_logger.md#error-method) | +| warning | write a warning message in the log file | [Documentation](sc_logger.md#warning-method) | +| notice | write a notice/info message in the log file | [Documentation](sc_logger.md#notice-method) | +| debug | write a debug message in the log file | [Documentation](sc_logger.md#debug-method) | + +## sc_broker methods + +| Method name | Method description | Link | +| --------------------- | -------------------------------------------------------------------------------- | ---------------------------------------------------------- | +| get_host_all_infos | retrieve all informations about a host from the broker cache | [Documentation](sc_broker.md#get_host_all_infos-method) | +| get_service_all_infos | retrieve all informations about a service from the broker cache | [Documentation](sc_broker.md#get_service_all_infos-method) | +| get_host_infos | retrieve one or more specific informations about a host from the broker cache | [Documentation](sc_broker.md#get_host_infos-method) | +| get_service_infos | retrieve one or more specific informations about a service from the broker cache | [Documentation](sc_broker.md#get_service_infos-method) | +| get_hostgroups | retrieve the hostgroups linked to a host from the broker cache | [Documentation](sc_broker.md#get_hostgroups-method) | +| get_servicegroups | retrieve the servicegroups linked to a service from the broker cache | [Documentation](sc_broker.md#get_servicegroups-method) | +| get_severity | retrieve the severity of a host or a service from the broker cache | [Documentation](sc_broker.md#get_severity-method) | +| get_instance | retrieve the name of the poller using the instance id from the broker cache | [Documentation](sc_broker.md#get_instance-method) | +| get_ba_infos | retrieve the name and description of a BA from the broker cache | [Documentation](sc_broker.md#get_ba_infos-method) | +| get_bvs_infos | retrieve the name and description of all BV linked to a BA | [Documentation](sc_broker.md#get_bvs_infos-method) | + +## sc_param methods + +| Method name | Method description | Link | +| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | +| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | +| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | + +## sc_event methods + +| Method name | Method description | Link | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------- | +| is_valid_category | check if the category of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_category-method) | +| is_valid_element | check if the element of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_element-method) | +| is_valid_event | check if the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event-method) | +| is_valid_neb_event | check if the neb event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_neb_event-method) | +| is_valid_host_status_event | check the "host status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host_status_event-method) | +| is_valid_service_status_event | check the "servce status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service_status_event-method) | +| is_valid_host | check if the host name and/or ID are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host-method) | +| is_valid_service | check if the service description and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service-method) | +| is_valid_event_states | check if the state (HARD/SOFT), acknowledgement state and downtime state are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_states-method) | +| is_valid_event_status | check if the status (OK, DOWN...) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_status-method) | +| is_valid_event_state_type | check if the state (HARD/SOFT) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_state_type-method) | +| is_valid_event_acknowledge_state | check if the acknowledgement state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_acknowledge_state-method) | +| is_valid_event_downtime_state | check if the downtime state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_downtime_state-method) | +| is_valid_hostgroup | check if the host is in an accepted hostgroup according to the stream connector params | [Documentation](sc_event.md#is_valid_hostgroup-method) | +| find_hostgroup_in_list | check if one of the hostgroups of the event is in the list of accepted hostgroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_hostgroup_in_list-method) | +| is_valid_servicegroup | check if the service is in an accepted servicegroup according to the stream connector params | [Documentation](sc_event.md#is_valid_servicegroup-method) | +| find_servicegroup_in_list | check if one of the servicegroups of the event is in the list of accepted servicegroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_servicegroup_in_list-method) | +| is_valid_bam_event | check if the BAM event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_bam_event-method) | +| is_valid_ba | check if the BA name and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba-method) | +| is_valid_ba_status_event | check if the "ba status" (OK, WARNING, CRITICAL) event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_status_event-method) | +| is_valid_ba_downtime_state | check if the BA downtime state is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_downtime_state-method) | +| is_valid_ba_acknowledge_state | DOES NOTHING | [Documentation](sc_event.md#is_valid_ba_acknowledge_state-method) | +| is_valid_bv | check if the BA is in an accepted BV according to the stream connector params | [Documentation](sc_event.md#is_valid_bv-method) | +| find_bv_in_list | check if one of the BV of the event is in the list of accepted BV provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_bv_in_list-method) | +| is_valid_poller | check if the host is monitored from an accepted poller according to the stream connector params | [Documentation](sc_event.md#is_valid_poller-method) | +| find_poller_in_list | check if the poller that monitores the host is in the list of accepted pollers provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_poller_in_list-method) | +| is_valid_host_severity | check if a host has a valid severity | [Documentation](sc_event.md#is_valid_host_severity-method) | +| is_valid_service_severity | check if a service has a valid severity | [Documentation](sc_event.md#is_valid_service_severity-method) | +| is_valid_storage_event | DO NOTHING (deprecated, you should use neb event to send metrics) | [Documentation](sc_event.md#is_valid_storage_event-method) | diff --git a/stream-connectors/modules/docs/sc_broker.md b/stream-connectors/modules/docs/sc_broker.md new file mode 100644 index 00000000000..d873d87469c --- /dev/null +++ b/stream-connectors/modules/docs/sc_broker.md @@ -0,0 +1,616 @@ +# Documentation of the sc_broker module + +- [Documentation of the sc_broker module](#documentation-of-the-sc_broker-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [get_host_all_infos method](#get_host_all_infos-method) + - [get_host_all_infos: parameters](#get_host_all_infos-parameters) + - [get_host_all_infos: returns](#get_host_all_infos-returns) + - [get_host_all_infos: example](#get_host_all_infos-example) + - [get_service_all_infos method](#get_service_all_infos-method) + - [get_service_all_infos: parameters](#get_service_all_infos-parameters) + - [get_service_all_infos: returns](#get_service_all_infos-returns) + - [get_service_all_infos: example](#get_service_all_infos-example) + - [get_host_infos method](#get_host_infos-method) + - [get_host_infos: parameters](#get_host_infos-parameters) + - [get_host_infos: returns](#get_host_infos-returns) + - [get_host_infos: example](#get_host_infos-example) + - [get_service_infos method](#get_service_infos-method) + - [get_service_infos: parameters](#get_service_infos-parameters) + - [get_service_infos: returns](#get_service_infos-returns) + - [get_service_infos: example](#get_service_infos-example) + - [get_hostgroups method](#get_hostgroups-method) + - [get_hostgroups: parameters](#get_hostgroups-parameters) + - [get_hostgroups: returns](#get_hostgroups-returns) + - [get_hostgroups: example](#get_hostgroups-example) + - [get_servicegroups method](#get_servicegroups-method) + - [get_servicegroups: parameters](#get_servicegroups-parameters) + - [get_servicegroups: returns](#get_servicegroups-returns) + - [get_servicegroups: example](#get_servicegroups-example) + - [get_severity method](#get_severity-method) + - [get_severity: parameters](#get_severity-parameters) + - [get_severity: returns](#get_severity-returns) + - [get_severity: example](#get_severity-example) + - [get_instance method](#get_instance-method) + - [get_instance: parameters](#get_instance-parameters) + - [get_instance: returns](#get_instance-returns) + - [get_instance: example](#get_instance-example) + - [get_ba_infos method](#get_ba_infos-method) + - [get_ba_infos: parameters](#get_ba_infos-parameters) + - [get_ba_infos: returns](#get_ba_infos-returns) + - [get_ba_infos: example](#get_ba_infos-example) + - [get_bvs_infos method](#get_bvs_infos-method) + - [get_bvs_infos: parameters](#get_bvs_infos-parameters) + - [get_bvs_infos: returns](#get_bvs_infos-returns) + - [get_bvs_infos: example](#get_bvs_infos-example) + +## Introduction + +The sc_broker module provides wrapper methods for broker cache. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- sc_logger. This is an instance of the sc_logger module + +If you don't provider this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_broker.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_broker = sc_broker.new(test_logger) +``` + +## get_host_all_infos method + +The **get_host_all_infos** method returns all the broker cache information about a host using its ID + +### get_host_all_infos: parameters + +| parameter | type | optional | default value | +| ------------------ | ------ | -------- | ------------- | +| the ID of the host | number | no | | + +### get_host_all_infos: returns + +| return | type | always | condition | +| -------------------------------------------- | ------- | ------ | ------------------------------------------- | +| a table with all cache information from host | table | no | host id must be found in broker cache | +| false | boolean | no | if host id wasn't found in the broker cache | + +### get_host_all_infos: example + +```lua +local host_id = 2712 + +local result = test_broker:get_host_all_infos(host_id) + +--[[ + --> result structure is: + { + _type = 65548, + acknowledged = false, + acknowledgement_type = 0, + action_url = "", + active_checks = true, + address = "10.30.2.85", + alias = "NRPE", + category = 1, + check_attempt = 1, + check_command = "base_host_alive", + check_freshness = false, + check_interval = 5, + check_period = "24x7", + check_type = 0, + checked = true, + default_active_checks = true, + default_event_handler_enabled = true, + default_flap_detection = true, + default_notify = true, + default_passive_checks = false, + display_name = "NRPE", + element = 12, + enabled = true, + event_handler = "", + event_handler_enabled = true, + execution_time = 0.002, + first_notification_delay = 0, + flap_detection = true, + flap_detection_on_down = true, + flap_detection_on_unreachable = true, + flap_detection_on_up = true, + flapping = false, + freshness_threshold = 0, + high_flap_threshold = 0, + host_id = 2712, + icon_image = "ppm/operatingsystems-linux-snmp-linux-128.png", + icon_image_alt = "", + instance_id = 1, + last_check_value = 1619727378, + last_hard_state = 0, + last_hard_state_change = 1616086363, + last_time_down = 1617692579, + last_time_up = 1619727370, + last_update = 1619727378, + last_state_change = 1617692639, + latency = 0.903, + low_flap_threshold = 0, + max_check_attempts = 3, + name = "NRPE", + next_check = 1619727378, + no_more_notification = false, + notes = "", + notes_url = "", + notification_interval = 0, + notification_number = 0, + notification_period = "24x7", + notify = true, + notify_on_down = true, + notify_on_downtime = true, + notify_on_flapping = true, + notify_on_recovery = true, + notify_on_unreachable = true, + passive_checks = false, + percent_state_change = 0, + perfdata = "rta=0,263ms;3000,000;5000,000;0; rtmax=0,263ms;;;; rtmin=0,263ms;;;; pl=0%;80;100;0;100", + obsess_over_host = true, + output = "OK - 10.30.2.85 rta 0,263ms lost 0%", + retain_nonstatus_information = true, + retain_status_information = true, + retry_interval = 1, + scheduled_downtime_depth = 0, + should_be_scheduled = true, + stalk_on_down = false, + stalk_on_unreachable = false, + stalk_on_up = false, + state = 0, + state_type = 1, + statusmap_image = "", + timezone = "" + } + + --> result.output is "OK - 10.30.2.85 rta 0,263ms lost 0%" +--]] +``` + +## get_service_all_infos method + +The **get_service_all_infos** method returns all the broker cache information about a service using its host and service ID + +### get_service_all_infos: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | + +### get_service_all_infos: returns + +| return | type | always | condition | +| ----------------------------------------------- | ------- | ------ | ------------------------------------------------------ | +| a table with all cache information from service | table | no | host id and service id must be found in broker cache | +| false | boolean | no | if host or service ID wasn't found in the broker cache | + +### get_service_all_infos: example + +```lua +local host_id = 2712 +local service_id = 1991 + +local result = test_broker:get_service_all_infos(host_id, service_id) + +--[[ + --> result structure is: + { + _type = 65559, + action_url = "", + acknowledged = false, + acknowledgement_type = 0, + active_checks = true, + category = 1, + check_attempt = 1, + check_command = base_centreon_ping, + check_freshness = false, + check_interval = 5, + check_period = 24x7, + check_type = 0, + checked = true, + default_active_checks = true, + default_event_handler_enabled = true, + default_flap_detection = true, + default_passive_checks = false, + description = Ping, + display_name = Ping, + default_notify = true, + element = 23, + enabled = true, + event_handler = "", + event_handler_enabled = true, + execution_time = 0.004, + first_notification_delay = 0, + flap_detection = true, + flap_detection_on_critical = true, + flap_detection_on_ok = true, + flap_detection_on_unknown = true, + flap_detection_on_warning = true, + flapping = false, + freshness_threshold = 0, + high_flap_threshold = 0, + host_id = 2712, + icon_image = "", + icon_image_alt = "", + last_check = 1619730350, + last_hard_state = 0, + last_hard_state_change = 1609343081, + last_state_change = 1609343081, + last_time_critical = 1609342781, + last_time_ok = 1619730350, + last_update = 1619730437, + latency = 0.76, + low_flap_threshold = 0, + max_check_attempts = 3, + next_check = 1619730910, + no_more_notifications = false, + notes = "", + notes_url = "", + notification_interval = 0, + notification_number = 0, + notification_period = 24x7, + notify = true, + notify_on_critical = true, + notify_on_downtime = true, + notify_on_flapping = true, + notify_on_recovery = true, + notify_on_unknown = true, + notify_on_warning = true, + obsess_over_service = true, + output = OK - 10.30.2.15 rta 0,110ms lost 0% + passive_checks = false, + percent_state_change = 0, + perfdata = rta=0,110ms;200,000;400,000;0; rtmax=0,217ms;;;; rtmin=0,079ms;;;; pl=0%;20;50;0;100, + retain_nonstatus_information = true, + retain_status_information = true, + retry_interval = 1, + scheduled_downtime_depth = 0, + service_id = 1991, + should_be_scheduled = true, + state_type = 1, + stalk_on_critical = false, + stalk_on_ok = false, + stalk_on_unknown = false, + stalk_on_warning = false, + state = 0, + volatile = false + } + + --> result.output is: "OK - 10.30.2.15 rta 0,110ms lost 0%" +--]] +``` + +## get_host_infos method + +The **get_host_infos** method returns asked information about a host from the broker cache using the host ID. + +### get_host_infos: parameters + +| parameter | type | optional | default value | +| ----------------------- | --------------- | -------- | ------------- | +| the ID of the host | number | no | | +| the desired information | string or table | no | | + +### get_host_infos: returns + +| return | type | always | condition | +| ------------------------------------- | ------- | ------ | -------------------------- | +| a table with the desired informations | table | no | it must be a valid host id | +| false | boolean | no | if host ID is nil or empty | + +### get_host_infos: example + +```lua +local host_id = 2712 +local desired_infos = {"retain_nonstatus_information", "obsess_over_host"} +-- if you want a single information you can also use = "retain_nonstatus_information" or {"retain_nonstatus_information"} + +local result = test_broker:get_host_infos(host_id, desired_infos) + +--[[ + --> result structure is: + { + host_id = 2712, + retain_nonstatus_information = false, + obsess_over_host = true + } + + --> result.obsess_over_host is: true +]] +``` + +## get_service_infos method + +The **get_service_infos** method returns asked information about a service from the broker cache using the host and service ID. + +### get_service_infos: parameters + +| parameter | type | optional | default value | +| ----------------------- | --------------- | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | +| the desired information | string or table | no | | + +### get_service_infos: returns + +| return | type | always | condition | +| ------------------------------------- | ------- | ------ | ------------------------------------- | +| a table with the desired informations | table | no | it must be a valid host or service id | +| false | boolean | no | if host or service ID is nil or empty | + +### get_service_infos: example + +```lua +local host_id = 2712 +local service_id = 1991 +local desired_infos = {"description", "obsess_over_service"} +-- if you want a single information you can also use = "retain_nonstatus_information" or {"retain_nonstatus_information"} + +local result = test_broker:get_host_infos(host_id, service_id, desired_infos) + +--[[ + --> result structure is: + { + host_id = 2712, + service_id = 1991, + description = "Ping", + obsess_over_service = true + } + + --> result.obsess_over_service is: true +]] +``` + +## get_hostgroups method + +The **get_hostgroups** method retrieves hostgroups linked to a host from the broker cache using the host ID. + +### get_hostgroups: parameters + +| parameter | type | optional | default value | +| ------------------ | ------ | -------- | ------------- | +| the ID of the host | number | no | | + +### get_hostgroups: returns + +| return | type | always | condition | +| ---------------------------------------------------------- | ------- | ------ | ---------------------------------------------------------------- | +| a table with all hostgroups information linked to the host | table | no | host id must have linked hostgroups found in broker cache | +| false | boolean | no | if host ID is invalid (empty or nil) or no hostgroups were found | + +### get_hostgroups: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local host_id = 2712 + +local result = test_broker:get_hostgroups(host_id) + +--[[ + --> result structure is: + { + [1] = { + group_id = 2, + group_name = "NetworkSecurity" + }, + [2] = { + group_id = 9, + group_name = "Archimede_Sydney" + } + } + + --> result[2].group_name is: "Archimede_Sydney" +--]] +``` + +## get_servicegroups method + +The **get_servicegroups** method retrieves servicegroups linked to a service from the broker cache using the host and service ID + +### get_servicegroups: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | + +### get_servicegroups: returns + +| return | type | always | condition | +| ---------------------------------------------------------------- | ------- | ------ | ------------------------------------------------------------------------------ | +| a table with all servicegroups information linked to the service | table | no | service must have linked servicegroups found in broker cache | +| false | boolean | no | if host or service ID is invalid (empty or nil) or no servicegroups were found | + +### get_servicegroups: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local host_id = 2712 +local service_id = 1991 + +local result = test_broker:get_servicegroups(host_id, service_id) + +--[[ + --> result structure is: + { + [1] = { + group_id = 2, + group_name = "Net_Services" + }, + [2] = { + group_id = 5, + group_name = "Another_SG" + } + } + + --> result[2].group_name is: "Another_SG" +--]] +``` + +## get_severity method + +The **get_severity** method retrieves the severity of a host or service from the broker cache using the ID of the service or host. + +### get_severity: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | yes | | + +### get_severity: returns + +| return | type | always | condition | +| ----------------------------------------- | ------- | ------ | ------------------------------------------------------------------------ | +| the severity level of the host or service | number | no | service or host must have a severity found in broker cache | +| false | boolean | no | if host or service ID is invalid (empty or nil) or no severity was found | + +### get_severity: example + +```lua +-- severity for a host +local host_id = 2712 + +local result = test_broker:get_severity(host_id) +--> result is: 2 + +-- severity for a service +local service_id = 1991 + +result = test_broker:get_severity(host_id, service_id) +--> result is: 42 +``` + +## get_instance method + +The **get_instance** method returns the poller name using the instance ID. + +### get_instance: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the ID of the instance | number | no | | + +### get_instance: returns + +| return | type | always | condition | +| --------------- | ------- | ------ | -------------------------------------------------------------------------------- | +| the poller name | string | no | instance ID must be found in broker cache | +| false | boolean | no | if instance ID is invalid (empty or nil) or ID was not found in the broker cache | + +### get_instance: example + +```lua +local instance_id = 2712 + +local result = test_broker:get_instance(instance_id) +--> result is: "awesome-poller" +``` + +## get_ba_infos method + +The **get_ba_infos** method retrieves the name and description of a BA from the broker cache using its ID. + +### get_ba_infos: parameters + +| parameter | type | optional | default value | +| ---------------- | ------ | -------- | ------------- | +| the ID of the BA | number | no | | + +### get_ba_infos: returns + +| return | type | always | condition | +| ----------------------------------------------- | ------- | ------ | -------------------------------------------------------------------------- | +| a table with the name and description of the BA | table | no | BA ID must be found in the broker cache | +| false | boolean | no | if BA ID is invalid (empty or nil) or ID was not found in the broker cache | + +### get_ba_infos: example + +```lua +local ba_id = 2712 + +local result = test_broker:get_ba_infos(ba_id) +--[[ + --> result structure is: + { + ba_id = 2712, + ba_name = "awesome-BA", + ba_description = "awesome-BA-description" + } + + --> result.ba_name is: "awesome-BA" + +--]] +``` + +## get_bvs_infos method + +The **get_bvs_infos** method retrieves the name and description of all BVs linked to a BA from the broker cache. + +### get_bvs_infos: parameters + +| parameter | type | optional | default value | +| ---------------- | ------ | -------- | ------------- | +| the ID of the BA | number | no | | + +### get_bvs_infos: returns + +| return | type | always | condition | +| --------------------------------------------------- | ------- | ------ | --------------------------------------------------------------------------- | +| a table with the name and description of all the BV | table | no | There must be BV found in the broker cache | +| false | boolean | no | if BA ID is invalid (empty or nil) or no BVs were found in the broker cache | + +### get_bvs_infos: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local ba_id = 2712 + +local result = test_broker:get_ba_infos(ba_id) +--[[ + --> result structure is: + { + [1] = { + bv_id = 9, + bv_name = "awesome-BV", + bv_description = "awesome-BV-description" + }, + [2] = { + bv_id = 33, + bv_name = "another-BV", + bv_description = "another-BV-description" + } + } + + --> result[2].bv_name is: "another-BV" +--]] +``` diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md new file mode 100644 index 00000000000..aeb42d5228e --- /dev/null +++ b/stream-connectors/modules/docs/sc_common.md @@ -0,0 +1,273 @@ +# Documentation of the sc_common module + +- [Documentation of the sc_common module](#documentation-of-the-sc_common-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [ifnil_or_empty method](#ifnil_or_empty-method) + - [ifnil_or_empty: parameters](#ifnil_or_empty-parameters) + - [ifnil_or_empty: returns](#ifnil_or_empty-returns) + - [ifnil_empty: example](#ifnil_empty-example) + - [if_wrong_type method](#if_wrong_type-method) + - [if_wrong_type: parameters](#if_wrong_type-parameters) + - [if_wrong_type: returns](#if_wrong_type-returns) + - [if_wrong_type: example](#if_wrong_type-example) + - [boolean_to_number method](#boolean_to_number-method) + - [boolean_to_number: parameters](#boolean_to_number-parameters) + - [boolean_to_number: returns](#boolean_to_number-returns) + - [boolean_to_number: example](#boolean_to_number-example) + - [check_boolean_number_option_syntax method](#check_boolean_number_option_syntax-method) + - [check_boolean_number_option_syntax: parameters](#check_boolean_number_option_syntax-parameters) + - [check_boolean_number_option_syntax: returns](#check_boolean_number_option_syntax-returns) + - [check_boolean_number_option_syntax: example](#check_boolean_number_option_syntax-example) + - [split method](#split-method) + - [split: parameters](#split-parameters) + - [split: returns](#split-returns) + - [split: example](#split-example) + - [compare_numbers method](#compare_numbers-method) + - [compare_numbers: parameters](#compare_numbers-parameters) + - [compare_numbers: returns](#compare_numbers-returns) + - [compare_numbers: example](#compare_numbers-example) + +## Introduction + +The sc_common module provides methods to help with common needs when writing stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- sc_logger. This is an instance of the sc_logger module + +If you don't provider this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) +``` + +## ifnil_or_empty method + +The **ifnil_or_empty** method checks if the first parameter is empty or nil and returns the second parameter if that is the case. Otherwise, it will return the first parameter. + +### ifnil_or_empty: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| the variable you want to check | string, number | no | | +| the default value to return | any | no | | + +### ifnil_or_empty: returns + +| return | type | always | condition | +| -------------------- | --------------------- | ------ | -------------------------------------- | +| the first parameter | first parameter type | no | if first parameter is not empty or nil | +| the second parameter | second parameter type | no | if first paramter is empty or nil | + +### ifnil_empty: example + +```lua +local first_param = "hello" +local second_param = "goodbye" + +local result = test_common:ifnil_or_empty(first_param, second_param) +--> result is "hello" + +first_param = "" +result = test_common:ifnil_or_empty(first_param, second_param) +--> result is "goodbye" +``` + +## if_wrong_type method + +This **if_wrong_type** method checks if the first parameter type is equal to the given type in the second parameter. If that is not the case, it returns the third parameter as a default value + +### if_wrong_type: parameters + +| parameter | type | optional | default value | +| ----------------------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the variable you want its type to be checked | any | no | | +| the type that you want your variable to match | string | no | | +| the default value you want to return if the type of the first parameter doesn't match your second parameter | any | no | | + +### if_wrong_type: returns + +| return | type | always | condition | +| ------------------- | ---- | ------ | ------------------------------------------------------------------------ | +| the first parameter | any | no | if the type of the first parameter is equal to your second parameter | +| the third parameter | any | no | if the type of the first parameter is not equal to your second parameter | + +### if_wrong_type: example + +```lua +local first_param = "i am a string" +local second_param = "string" +local third_param = "my default value" + +local result = test_common:if_wrong_type(first_param, second_param, third_param) +--> result is "i am a string" + +first_param = 3 +result = test_common:if_wrong_type(first_param, second_param, third_param) +--> result is "my default value" +``` + +## boolean_to_number method + +The **boolean_to_number** method converts a boolean to its number equivalent. + +### boolean_to_number: parameters + +| parameter | type | optional | default value | +| ------------------ | ------- | -------- | ------------- | +| a boolean variable | boolean | no | | + +### boolean_to_number: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| a number (0 or 1) | number | yes | | + +### boolean_to_number: example + +```lua +local my_boolean = true + +local result = test_common:boolean_to_number(my_boolean) +--> result is 1 +``` + +## check_boolean_number_option_syntax method + +The **check_boolean_number_option_syntax** method checks if the first paramter is a boolean number (0 or 1) and if that is not the case, returns the second parameter + +### check_boolean_number_option_syntax: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------- | ---- | -------- | ------------- | +| the variable you want to check | any | no | | +| a default value to return if the first parameter is wrong | any | no | | + +### check_boolean_number_option_syntax: returns + +| return | type | always | condition | +| -------------------- | ------ | ------ | -------------------------------------------- | +| the first parameter | number | no | the first parameter must be a boolean number | +| the second parameter | any | no | the first parameter is not a boolean number | + +### check_boolean_number_option_syntax: example + +```lua +local first_parameter = 1 +local second_parameter = "a default return value" + +local result = test_common:check_boolean_number_option_syntax(first_parameter, second_parameter) +--> result is 1 + +first_parameter = "not a boolean number" +result = test_common:check_boolean_number_option_syntax(first_parameter, second_parameter) +--> result is "a default return value" +``` + +## split method + +The **split** method split a string using a separator and returns a table of all the splitted parts + +### split: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| the string you need to split | string | no | | +| the separator you want to use | string | yes | "," | + +### split: returns + +| return | type | always | condition | +| ------------------------------- | ------ | ------ | ------------------------------------------- | +| a table with all splitted parts | table | no | the string to split mustn't be empty or nil | +| empty string | string | no | the string to split is empty or nil | + +### split: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local my_string = "split;using;semicolon" +local separator = ";" + +local result = test_common:split(my_string, separator) +--[[ + + --> result structure is: + { + [1] = "split", + [2] = "using", + [3] = "semicolon" + } + + --> result[2] is "using" + +--]] + +my_string = "" +result = test_common:split(my_string, separator) +--> result is "" (empty string) +``` + +## compare_numbers method + +The **compare_numbers** method compare a first number with a second one using the provided mathematical operator + +### compare_numbers: parameters + +| parameter | type | optional | default value | +| ----------------------------------------- | ------ | -------- | ------------- | +| the first number you need to compare | number | no | | +| the second number you need to compare | number | no | | +| the mathematical operator you want to use | string | no | | + +accepted operators: <, >, >=, <=, ==, ~= + +### compare_numbers: returns + +| return | type | always | condition | +| --------- | ------- | ------ | ----------------------------------------------------------------------------------- | +| a boolean | boolean | no | both numbers must be numbers and the mathematical operator must be a valid operator | +| nil | nil | no | if one of the number is not a number or the mathematical operator is not valid | + +### compare_numbers: example + +```lua +local first_number = 4 +local second_number = 12 +local operator = "==" + +local result = test_common:compare_numbers(first_number, second_number, operator) +--> result is false (4 is not equal to 12) + +operator = "~=" +result = test_common:compare_numbers(first_number, second_number, operator) +--> result is true + +first_number = "hello my friend" +result = test_common:compare_numbers(first_number, second_number, operator) +--> result is nil ("hello my friend" is not a valid number) +``` diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md new file mode 100644 index 00000000000..209dc3b5909 --- /dev/null +++ b/stream-connectors/modules/docs/sc_event.md @@ -0,0 +1,833 @@ +# Documentation of the sc_param module + +- [Documentation of the sc_param module](#documentation-of-the-sc_param-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [is_valid_category method](#is_valid_category-method) + - [is_valid_category: returns](#is_valid_category-returns) + - [is_valid_category: example](#is_valid_category-example) + - [is_valid_element method](#is_valid_element-method) + - [is_valid_element: returns](#is_valid_element-returns) + - [is_valid_element: example](#is_valid_element-example) + - [is_valid_event method](#is_valid_event-method) + - [is_valid_event: returns](#is_valid_event-returns) + - [is_valid_event: example](#is_valid_event-example) + - [is_valid_neb_event method](#is_valid_neb_event-method) + - [is_valid_neb_event: returns](#is_valid_neb_event-returns) + - [is_valid_neb_event: example](#is_valid_neb_event-example) + - [is_valid_host_status_event method](#is_valid_host_status_event-method) + - [is_valid_host_status_event: returns](#is_valid_host_status_event-returns) + - [is_valid_host_status_event: example](#is_valid_host_status_event-example) + - [is_valid_service_status_event method](#is_valid_service_status_event-method) + - [is_valid_service_status_event: returns](#is_valid_service_status_event-returns) + - [is_valid_service_status_event: example](#is_valid_service_status_event-example) + - [is_valid_host method](#is_valid_host-method) + - [is_valid_host: returns](#is_valid_host-returns) + - [is_valid_host: example](#is_valid_host-example) + - [is_valid_service method](#is_valid_service-method) + - [is_valid_service: returns](#is_valid_service-returns) + - [is_valid_service: example](#is_valid_service-example) + - [is_valid_event_states method](#is_valid_event_states-method) + - [is_valid_event_states: returns](#is_valid_event_states-returns) + - [is_valid_event_states: example](#is_valid_event_states-example) + - [is_valid_event_status method](#is_valid_event_status-method) + - [is_valid_event_status: returns](#is_valid_event_status-returns) + - [is_valid_event_status: example](#is_valid_event_status-example) + - [is_valid_event_state_type method](#is_valid_event_state_type-method) + - [is_valid_event_state_type: returns](#is_valid_event_state_type-returns) + - [is_valid_event_state_type: example](#is_valid_event_state_type-example) + - [is_valid_event_acknowledge_state method](#is_valid_event_acknowledge_state-method) + - [is_valid_event_acknowledge_state: returns](#is_valid_event_acknowledge_state-returns) + - [is_valid_event_acknowledge_state: example](#is_valid_event_acknowledge_state-example) + - [is_valid_event_downtime_state method](#is_valid_event_downtime_state-method) + - [is_valid_event_downtime_state: returns](#is_valid_event_downtime_state-returns) + - [is_valid_event_downtime_state: example](#is_valid_event_downtime_state-example) + - [is_valid_hostgroup method](#is_valid_hostgroup-method) + - [is_valid_hostgroup: returns](#is_valid_hostgroup-returns) + - [is_valid_hostgroup: example](#is_valid_hostgroup-example) + - [is_valid_servicegroup method](#is_valid_servicegroup-method) + - [is_valid_servicegroup: returns](#is_valid_servicegroup-returns) + - [is_valid_servicegroup: example](#is_valid_servicegroup-example) + - [is_valid_bam_event method](#is_valid_bam_event-method) + - [is_valid_bam_event: returns](#is_valid_bam_event-returns) + - [is_valid_bam_event: example](#is_valid_bam_event-example) + - [is_valid_ba method](#is_valid_ba-method) + - [is_valid_ba: returns](#is_valid_ba-returns) + - [is_valid_ba: example](#is_valid_ba-example) + - [is_valid_ba_status_event method](#is_valid_ba_status_event-method) + - [is_valid_ba_status_event: returns](#is_valid_ba_status_event-returns) + - [is_valid_ba_status_event: example](#is_valid_ba_status_event-example) + - [is_valid_ba_downtime_state method](#is_valid_ba_downtime_state-method) + - [is_valid_ba_downtime_state: returns](#is_valid_ba_downtime_state-returns) + - [is_valid_ba_downtime_state: example](#is_valid_ba_downtime_state-example) + - [is_valid_ba_acknowledge_state method](#is_valid_ba_acknowledge_state-method) + - [is_valid_ba_acknowledge_state: returns](#is_valid_ba_acknowledge_state-returns) + - [is_valid_ba_acknowledge_state: example](#is_valid_ba_acknowledge_state-example) + - [is_valid_bv method](#is_valid_bv-method) + - [is_valid_bv: returns](#is_valid_bv-returns) + - [is_valid_bv: example](#is_valid_bv-example) + - [find_hostgroup_in_list method](#find_hostgroup_in_list-method) + - [find_hostgroup_in_list: returns](#find_hostgroup_in_list-returns) + - [find_hostgroup_in_list: example](#find_hostgroup_in_list-example) + - [find_servicegroup_in_list method](#find_servicegroup_in_list-method) + - [find_servicegroup_in_list: returns](#find_servicegroup_in_list-returns) + - [find_servicegroup_in_list: example](#find_servicegroup_in_list-example) + - [find_bv_in_list method](#find_bv_in_list-method) + - [find_bv_in_list: returns](#find_bv_in_list-returns) + - [find_bv_in_list: example](#find_bv_in_list-example) + - [is_valid_poller method](#is_valid_poller-method) + - [is_valid_poller: returns](#is_valid_poller-returns) + - [is_valid_poller: example](#is_valid_poller-example) + - [find_poller_in_list method](#find_poller_in_list-method) + - [find_poller_in_list: returns](#find_poller_in_list-returns) + - [find_poller_in_list: example](#find_poller_in_list-example) + - [is_valid_host_severity method](#is_valid_host_severity-method) + - [is_valid_host_severity: returns](#is_valid_host_severity-returns) + - [is_valid_host_severity: example](#is_valid_host_severity-example) + - [is_valid_service_severity method](#is_valid_service_severity-method) + - [is_valid_service_severity: returns](#is_valid_service_severity-returns) + - [is_valid_service_severity: example](#is_valid_service_severity-example) + - [is_valid_storage_event method](#is_valid_storage_event-method) + +## Introduction + +The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below). It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with two parameters + +- an event table +- a params table +- a sc_common instance +- a sc_logger instance (will create a new one with default parameters if not provided) +- a sc_broker instance + +### constructor: Example + +```lua +local event = { + --- event data --- +} + + -- load module +local sc_param = require("centreon-stream-connectors-lib.sc_param") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) + +-- create a new instance of the sc_broker module +local test_broker = sc_broker.new(test_logger) + +-- create a new instance of the sc_event module +local test_event = sc_event.new(event, test_param.params, test_common, test_logger, test_broker) +``` + +## is_valid_category method + +The **is_valid_category** method checks if the event category is part of [**accepted_categories**](sc_param.md#default-parameters) + +### is_valid_category: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_category: example + +```lua +local result = test_event:is_valid_category() +--> result is true or false +``` + +## is_valid_element method + +The **is_valid_element** method checks if the event element is part of [**accepted_elements**](sc_param.md#default-parameters) + +### is_valid_element: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_element: example + +```lua +local result = test_event:is_valid_element() +--> result is true or false +``` + +## is_valid_event method + +The **is_valid_event** method checks if the event is valid based on [**default parameters**](sc_param.md#default-parameters) + +head over the following chapters for more information + +- [is_valid_neb_event](#is_valid_neb_event-method) +- [is_valid_bam_event](#is_valid_bam_event-method) +- [is_valid_storage_event](#is_valid_storage_event-method) + +### is_valid_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event: example + +```lua +local result = test_event:is_valid_event() +--> result is true or false +``` + +## is_valid_neb_event method + +The **is_valid_neb_event** method checks if the event is a valid **neb** event based on [**default parameters**](sc_param.md#default-parameters) in the **neb** scope + +head over the following chapters for more information + +- [is_valid_host_status_event](#is_valid_host_status_event-method) +- [is_valid_service_status_event](#is_valid_service_status_event-method) + +### is_valid_neb_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_neb_event: example + +```lua +local result = test_event:is_valid_neb_event() +--> result is true or false +``` + +## is_valid_host_status_event method + +The **is_valid_host_status_event** method checks if the host status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **host_status** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_event_states](#is_valid_event_states-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) + +### is_valid_host_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_status_event: example + +```lua +local result = test_event:is_valid_host_status_event() +--> result is true or false +``` + +## is_valid_service_status_event method + +The **is_valid_service_status_event** method checks if the service status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **service_status** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_event_states](#is_valid_event_states-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) + +### is_valid_service_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_status_event: example + +```lua +local result = test_event:is_valid_service_status_event() +--> result is true or false +``` + +## is_valid_host method + +The **is_valid_host** method checks if the host is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) + +If the host is valid, all broker cache information regarding this host will be added to the event in a cache.host table. More details about this cache table [**here**](sc_broker.md#get_host_all_infos-example) + +### is_valid_host: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host: example + +```lua +local result = test_event:is_valid_host() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + host = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_service method + +The **is_valid_service** method checks if the service is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) in the **service_status** scope + +If the service is valid, all broker cache information regarding this service will be added to the event in a cache.service table. More details about this cache table [**here**](sc_broker.md#get_service_all_infos-example) + +### is_valid_service: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service: example + +```lua +local result = test_event:is_valid_service() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + service = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_event_states method + +The **is_valid_event_states** method checks if the event states (downtime, hard/soft, acknowledgement) are valid based on [**hard_only, in_downtime and acknowledged**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +head over the following chapters for more information + +- [is_valid_event_state_type](#is_valid_event_state_type-method) +- [is_valid_event_acknowledge_state](#is_valid_event_acknowledge_state-method) +- [is_valid_event_downtime_state](#is_valid_event_downtime_state-method) + +### is_valid_event_states: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_states: example + +```lua +local result = test_event:is_valid_event_states() +--> result is true or false +``` + +## is_valid_event_status method + +The **is_valid_event_states** method checks if the event status is valid based on [**host_status, service_status or ba_status**](sc_param.md#default-parameters) in the **host_status, service_status or ba_status** scope + +### is_valid_event_status: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_status: example + +```lua +local result = test_event:is_valid_event_status() +--> result is true or false +``` + +## is_valid_event_state_type method + +The **is_valid_event_state_type** method checks if the event state (HARD/SOFT) is valid based on [**hard_only**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_state_type: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_state_type: example + +```lua +local result = test_event:is_valid_event_state_type() +--> result is true or false +``` + +## is_valid_event_acknowledge_state method + +The **is_valid_event_acknowledge_state** method checks if the event is in valid acknowledgement state based on [**acknowledged**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_acknowledge_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_acknowledge_state: example + +```lua +local result = test_event:is_valid_event_acknowledge_state() +--> result is true or false +``` + +## is_valid_event_downtime_state method + +The **is_valid_event_downtime_state** method checks if the event is in a valid downtime state based on [**in_downtime**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_downtime_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_downtime_state: example + +```lua +local result = test_event:is_valid_event_downtime_state() +--> result is true or false +``` + +## is_valid_hostgroup method + +The **is_valid_hostgroup** method checks if the event is in a valid hostgroup based on [**accepted_hostgroups**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **accepted_hostgroup** is configured, all broker cache information regarding the hostgroups linked to a host will be added to the event in a cache.hostgroups table. More details about this cache table [**here**](sc_broker.md#get_hostgroups-example) + +### is_valid_hostgroup: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_hostgroup: example + +```lua +local result = test_event:is_valid_hostgroup() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + hostgroups = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_servicegroup method + +The **is_valid_servicegroup** method checks if the event is in a valid servicegroup based on [**accepted_servicegroups**](sc_param.md#default-parameters) in the **service_status** scope + +If the **accepted_servicegroup** is configured, all broker cache information regarding the servicegroups linked to a service will be added to the event in a cache.servicegroups table. More details about this cache table [**here**](sc_broker.md#get_servicegroups-example) + +### is_valid_servicegroup: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_servicegroup: example + +```lua +local result = test_event:is_valid_servicegroup() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + servicegroups = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_bam_event method + +The **is_valid_bam_event** method checks if the bam status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **bam** scope + +head over the following chapters for more information + +- [is_valid_ba](#is_valid_ba-method) +- [is_valid_ba_status_event](#is_valid_ba_status_event-method) +- [is_valid_ba_downtime_state](#is_valid_ba_downtime_state-method) +- [is_valid_ba_acknowledge_state](#is_valid_ba_acknowledge_state-method) +- [is_valid_bv](#is_valid_bv-method) + +### is_valid_bam_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bam_event: example + +```lua +local result = test_event:is_valid_bam_event() +--> result is true or false +``` + +## is_valid_ba method + +The **is_valid_ba** method checks if the BA is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) + +If the BA is valid, all broker cache information regarding this BA will be added to the event in a cache.ba table. More details about this cache table [**here**](sc_broker.md#get_ba_infos-example) + +### is_valid_ba: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba: example + +```lua +local result = test_event:is_valid_ba() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + ba = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_ba_status_event method + +The **is_valid_ba_status_event** method checks if the BA status is valid based on [**ba_status**](sc_param.md#default-parameters) in the **ba_status** scope + +head over the following chapters for more information + +- [is_valid_event_status](#is_valid_event_status-method) + +### is_valid_ba_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba_status_event: example + +```lua +local result = test_event:is_valid_ba_status_event() +--> result is true or false +``` + +## is_valid_ba_downtime_state method + +The **is_valid_ba_downtime_state** method checks if the BA is in a valid downtime state based on [**in_downtime**](sc_param.md#default-parameters) in the **ba_status** scope + +### is_valid_ba_downtime_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba_downtime_state: example + +```lua +local result = test_event:is_valid_ba_downtime_state() +--> result is true or false +``` + +## is_valid_ba_acknowledge_state method + +**DOES NOTHING** The **is_valid_ba_acknowledge_state** method checks if the event is in a valid acknowledgement state based on [**acknowledged**](sc_param.md#default-parameters) in the **ba_status** scope + +### is_valid_ba_acknowledge_state: returns + +| return | type | always | condition | +| ------ | ------- | ------ | --------- | +| true | boolean | yes | | + +### is_valid_ba_acknowledge_state: example + +```lua +local result = test_event:is_valid_ba_acknowledge_state() +--> result is true +``` + +## is_valid_bv method + +The **is_valid_bv** method checks if the event is linked to a valid BV based on [**accepted_bvs**](sc_param.md#default-parameters) in the **ba_status** scope + +If the **accepted_bvs** is configured, all broker cache information regarding the BVs linked to a service will be added to the event in a cache.bvs table. More details about this cache table [**here**](sc_broker.md#get_bvs_infos-example) + +### is_valid_bv: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bv: example + +```lua +local result = test_event:is_valid_bv() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + bvs = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## find_hostgroup_in_list method + +The **find_hostgroup_in_list** method checks if one of the hostgroup in [**accepted_hostgroups**](sc_param.md#default-parameters) is linked to the host. + +### find_hostgroup_in_list: returns + +| return | type | always | condition | +| --------------------------------------------- | ------- | ------ | ----------------------- | +| the name of the first hostgroup that is found | string | no | a hostgroup must match | +| false | boolean | no | if no hostgroup matched | + +### find_hostgroup_in_list: example + +```lua +-- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 +-- host from event is linked to my_hostgroup_2 + +local result = test_event:find_hostgroup_in_list() +--> result is: "my_hostgroup_2" + +-- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 +-- host from is linked to my_hostgroup_2712 + +result = test_event:find_hostgroup_in_list() +--> result is: false +``` + +## find_servicegroup_in_list method + +The **find_servicegroup_in_list** method checks if one of the servicegroup in [**accepted_servicegroups**](sc_param.md#default-parameters) is linked to the service. + +### find_servicegroup_in_list: returns + +| return | type | always | condition | +| ------------------------------------------------ | ------- | ------ | -------------------------- | +| the name of the first servicegroup that is found | string | no | a servicegroup must match | +| false | boolean | no | if no servicegroup matched | + +### find_servicegroup_in_list: example + +```lua +-- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 +-- service from event is linked to my_servicegroup_2 + +local result = test_event:find_servicegroup_in_list() +--> result is: "my_servicegroup_2" + +-- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 +-- service from is linked to my_servicegroup_2712 + +result = test_event:find_servicegroup_in_list() +--> result is: false +``` + +## find_bv_in_list method + +The **find_bv_in_list** method checks if one of the BV in [**accepted_bvs**](sc_param.md#default-parameters) is linked to the BA. + +### find_bv_in_list: returns + +| return | type | always | condition | +| -------------------------------------- | ------- | ------ | ---------------- | +| the name of the first BV that is found | string | no | a BV must match | +| false | boolean | no | if no BV matched | + +### find_bv_in_list: example + +```lua +-- accepted_bvs are my_bv_1 and my_bv_2 +-- BA from event is linked to my_bv_2 + +local result = test_event:find_bv_in_list() +--> result is: "my_bv_2" + +-- accepted_bvs are my_bv_1 and my_bv_2 +-- BA from is linked to my_bv_2712 + +result = test_event:find_bv_in_list() +--> result is: false +``` + +## is_valid_poller method + +The **is_valid_poller** method checks if the event is monitored from an accepted poller based on [**accepted_pollers**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **accepted_pollers** is configured, all broker cache information regarding the poller linked to a host will be added to the event in a cache.poller index. More details about this cache index [**here**](sc_broker.md#get_instance-example) + +### is_valid_poller: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_poller: example + +```lua +local result = test_event:is_valid_poller() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + hostgroups = "my_poller_name" + --- other cache data type --- + } + } +]] +``` + +## find_poller_in_list method + +The **find_poller_in_list** method checks if one of the pollers in [**accepted_pollers**](sc_param.md#default-parameters) is monitoring the host. + +### find_poller_in_list: returns + +| return | type | always | condition | +| ------------------------------------------ | ------- | ------ | -------------------- | +| the name of the first poller that is found | string | no | a poller must match | +| false | boolean | no | if no poller matched | + +### find_poller_in_list: example + +```lua +-- accepted_pollers are my_poller_1 and my_poller_2 +-- host from event is monitored from my_poller_2 + +local result = test_event:find_poller_in_list() +--> result is: "my_poller_2" + +-- accepted_pollers are my_poller_1 and my_poller_2 +-- host from event is monitored from my_poller_2712 + +result = test_event:find_poller_in_list() +--> result is: false +``` + +## is_valid_host_severity method + +The **is_valid_host_severity** method checks if the event has an accepted host severity based on [**host_severity_threshold and host_severity_operator**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **host_severity_threshold** is configured, all broker cache information regarding the severity linked to a host will be added to the event in a cache.host_severity index. More details about this cache index [**here**](sc_broker.md#get_severity-example) + +### is_valid_host_severity: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_severity: example + +```lua +local result = test_event:is_valid_host_severity() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + host_severity = 2712 + --- other cache data type --- + } + } +]] +``` + +## is_valid_service_severity method + +The **is_valid_service_severity** method checks if the event has an accepted service severity based on [**service_severity_threshold and service_severity_operator**](sc_param.md#default-parameters) in the **service_status** scope + +If the **service_severity_threshold** is configured, all broker cache information regarding the severity linked to a service will be added to the event in a cache.service_severity index. More details about this cache index [**here**](sc_broker.md#get_severity-example) + +### is_valid_service_severity: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_severity: example + +```lua +local result = test_event:is_valid_service_severity() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + service_severity = 2712 + --- other cache data type --- + } + } +]] +``` + +## is_valid_storage_event method + +**DEPRECATED** does nothing diff --git a/stream-connectors/modules/docs/sc_logger.md b/stream-connectors/modules/docs/sc_logger.md new file mode 100644 index 00000000000..7e396d7ec21 --- /dev/null +++ b/stream-connectors/modules/docs/sc_logger.md @@ -0,0 +1,129 @@ +# Documentation of the sc_logger module + +- [Documentation of the sc_logger module](#documentation-of-the-sc_logger-module) + - [Introduction](#introduction) + - [Best practices](#best-practices) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [error method](#error-method) + - [error: parameters](#error-parameters) + - [error: example](#error-example) + - [warning method](#warning-method) + - [warning: parameters](#warning-parameters) + - [warning: example](#warning-example) + - [debug method](#debug-method) + - [debug: parameters](#debug-parameters) + - [debug: example](#debug-example) + - [notice method](#notice-method) + - [notice: parameters](#notice-parameters) + - [notice: example](#notice-example) + +## Introduction + +The sc_logger module provides methods to help you handle logging in your stream connectors. It has been made in OOP (object oriented programming) + +## Best practices + +All the stream-connectors-lib are using the following syntax when logging: + +"[module_name:method_name]: your error message" + +For example + +```lua +function EventQueue:do_things() + -- do things -- + + test_logger:debug("[EventQueue:do_things]: this is a debug message that is using the best practices") +end +``` + +This is important for a more efficient troubleshooting. Log messages can come from various places and using this convention drastically improves the readability of the situation + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor can be initialized with two parameters or it will use default values + +- the log file. **Default value: /var/log/centreon-broker/stream-connector.log** +- the maximum accepted severity level. Going from 1 (only error and notice message) to 3 (all messages including debug). **Default value: 1** + +### constructor: Example + +```lua +-- load module +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) +``` + +If the logfile and severity are not provided, default values are going to be used. + +## error method + +The **error** method will print an error message in the logfile if **severity is equal or superior to 1** + +### error: parameters + +- message. A string that is the error message you want to display in your logfile + +### error: example + +```lua +-- call error method +test_logger:error("[module_name:method_name]: This is an error message.") +``` + +## warning method + +The **warning** method will print a warning message in the logfile if **severity is equal or superior to 2** + +### warning: parameters + +- message. A string that is the warning message you want to display in your logfile + +### warning: example + +```lua +-- call warning method +test_logger:warning("[module_name:method_name]: This is a warning message.") +``` + +## debug method + +The **debug** method will print a debug message in the logfile if **severity is equal or superior to 3** + +### debug: parameters + +- message. A string that is the debug message you want to display in your logfile + +### debug: example + +```lua +-- call debug method +test_logger:debug("[module_name:method_name]: This is a debug message.") +``` + +## notice method + +The **notice** method will print a notice message in the logfile if **severity is equal or superior to 1**. + +### notice: parameters + +- message. A string that is the notice message you want to display in your logfile + +### notice: example + +```lua +-- call notice method +test_logger:notice("[module_name:method_name]: This is a notice message.") +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md new file mode 100644 index 00000000000..cd535c732c6 --- /dev/null +++ b/stream-connectors/modules/docs/sc_param.md @@ -0,0 +1,129 @@ +# Documentation of the sc_param module + +- [Documentation of the sc_param module](#documentation-of-the-sc_param-module) + - [Introduction](#introduction) + - [Default parameters](#default-parameters) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [param_override method](#param_override-method) + - [param_override: parameters](#param_override-parameters) + - [param_override: example](#param_override-example) + - [check_params method](#check_params-method) + - [check_params: example](#check_params-example) + +## Introduction + +The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below). It has been made in OOP (object oriented programming) + +### Default parameters + +| Parameter name | type | default value | description | default scope | additionnal information | +| -------------------------- | ------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb) | | +| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb) | | +| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb) | | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb) | | + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with two parameters + +- a sc_common instance +- a sc_logger instance (will create a new one with default parameters if not provided) + +### constructor: Example + +```lua +-- load module +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) +``` + +## param_override method + +The **param_override** method checks if a standard parameter from [**Default parameters**](#default-parameters) has been overriden by the user. If so, it replace the default value with the one provided by the user + +### param_override: parameters + +| parameter | type | optional | default value | +| -------------------------------------- | ----- | -------- | ------------- | +| the list of parameters and their value | table | no | | + +### param_override: example + +```lua +--> test_param.param.accepted_elements is: "host_status,service_status,ba_status" +--> test_param.param.in_downtime is: 0 + +-- change the value of the default parameter called accepted_elements and in_downtime (normally they come from the web configuration, we just simulate this behavior in this example) +local params = { + accepted_elements = "ba_status", + in_downtime = 1 +} + +-- use the param override method to override the default values for in_downtime and accepted_elements +test_param:param_override(params) +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 1 +``` + +## check_params method + +The **check_params** method applies various conformity checks on the default parameters. If the conformity check fails on a parameter, it is reverted to a the [**default value**](#default-parameters) + +### check_params: example + +```lua +--> test_param.param.accepted_elements is: "host_status,service_status,ba_status" +--> test_param.param.in_downtime is: 0 + +-- change the value of the default parameter called accepted_elements and in_downtime (normally they come from the web configuration, we just simulate this behavior in this example) +local params = { + accepted_elements = "ba_status", + in_downtime = 12 -- this must be 0 or 1 +} + +-- use the param override method to override the default values for in_downtime and accepted_elements +test_param:param_override(params) +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 12 + +-- checks default param validity +test_param:check_params() +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 0 (12 is not a valid value, it goes back to the default one) +``` diff --git a/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec index fdb9e7948ab..27f21877cf7 100644 --- a/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec +++ b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec @@ -1,5 +1,7 @@ +package = "centreon-stream-connectors-lib" +version = "0.1.0-1" source = { - url = "git+https://github.com/centreon/centreon-stream-connectors", + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", tag = "0.1.0" } description = { diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec new file mode 100644 index 00000000000..b3eb7de32d0 --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec @@ -0,0 +1,28 @@ +package = "centreon-stream-connectors-lib" +version = "1.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.0.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/tests/bam_stream_connector.lua b/stream-connectors/modules/tests/bam_stream_connector.lua index 0b8e0cbef95..6908f2f262d 100644 --- a/stream-connectors/modules/tests/bam_stream_connector.lua +++ b/stream-connectors/modules/tests/bam_stream_connector.lua @@ -55,11 +55,11 @@ function EventQueue:format_event() -- starting to handle information from BA self.sc_event.event.formated_event = { -- name of BA has been stored in a cache table when calling is_valid_even() - my_ba = self.sc_event.event.cache.ba_name, + my_ba = self.sc_event.event.cache.ba.ba_name, -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], -- like the name of the BA, BA description is stored in the cache table of the event - my_description = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.ba_description, "no description found") + my_description = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.ba.ba_description, "no description found") } self:add() diff --git a/stream-connectors/modules/tests/neb_stream_connector.lua b/stream-connectors/modules/tests/neb_stream_connector.lua index 4827499604f..54ac4311e27 100644 --- a/stream-connectors/modules/tests/neb_stream_connector.lua +++ b/stream-connectors/modules/tests/neb_stream_connector.lua @@ -55,21 +55,21 @@ function EventQueue:format_event() -- starting to handle shared information between host and service self.sc_event.event.formated_event = { -- name of host has been stored in a cache table when calling is_valid_even() - my_host = self.sc_event.event.cache.name, + my_host = self.sc_event.event.cache.host.name, -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], -- get output of the event my_output = self.sc_common:ifnil_or_empty(string.match(self.sc_event.event.output, "^(.*)\n"), "no output"), -- like the name of the host, notes are stored in the cache table of the event - my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.notes, "no notes found") + my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, "no notes found") } -- handle service specific information if self.sc_event.event.element == 24 then -- like the name of the host, service description is stored in the cache table of the event - self.sc_event.event.formated_event.my_description = self.sc_event.event.cache.description + self.sc_event.event.formated_event.my_description = self.sc_event.event.cache.service.description -- if the service doesn't have notes, we can retrieve the ones from the host by fetching it from the broker cache - self.sc_event.event.formated_event.my_notes = self.sc_common:ifnil_or_empty(self.sc_broker:get_host_infos(self.sc_event.event.host_id, "notes"), "no notes found") + self.sc_event.event.formated_event.my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes, self.sc_event.event.formated_event.my_notes) end self:add() From a18b40a681aa5edc468b8fcbe52f8390ad73df8d Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 6 May 2021 11:47:26 +0200 Subject: [PATCH 057/219] fix memory leak + sc naming (#47) --- .../centreon-certified/bsm/bsm_connector-apiv1.lua | 2 ++ .../centreon-certified/pagerduty/pagerduty-apiv1.lua | 2 ++ ...rometheus-gateway.lua => prometheus-gateway-apiv1.lua} | 2 ++ .../{connector-servicenow.lua => servicenow-apiv1.lua} | 0 ...plunk-events-http.lua => splunk-events-http-apiv1.lua} | 6 ++++-- ...events-luacurl.lua => splunk-events-luacurl-apiv1.lua} | 8 +++++--- ...unk-metrics-http.lua => splunk-metrics-http-apiv1.lua} | 8 +++++--- ...trics-luacurl.lua => splunk-metrics-luacurl-apiv1.lua} | 6 ++++-- ...plunk-states-http.lua => splunk-states-http-apiv1.lua} | 0 .../warp10/{export-warp10.lua => export-warp10-apiv1.lua} | 1 + .../community-powered/canopsis/bbdo2canopsis.lua | 6 +++--- 11 files changed, 28 insertions(+), 13 deletions(-) rename stream-connectors/centreon-certified/prometheus/{prometheus-gateway.lua => prometheus-gateway-apiv1.lua} (99%) rename stream-connectors/centreon-certified/servicenow/{connector-servicenow.lua => servicenow-apiv1.lua} (100%) rename stream-connectors/centreon-certified/splunk/{splunk-events-http.lua => splunk-events-http-apiv1.lua} (98%) rename stream-connectors/centreon-certified/splunk/{splunk-events-luacurl.lua => splunk-events-luacurl-apiv1.lua} (98%) rename stream-connectors/centreon-certified/splunk/{splunk-metrics-http.lua => splunk-metrics-http-apiv1.lua} (98%) rename stream-connectors/centreon-certified/splunk/{splunk-metrics-luacurl.lua => splunk-metrics-luacurl-apiv1.lua} (98%) rename stream-connectors/centreon-certified/splunk/{splunk-states-http.lua => splunk-states-http-apiv1.lua} (100%) rename stream-connectors/centreon-certified/warp10/{export-warp10.lua => export-warp10-apiv1.lua} (99%) diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua index 3b2cd9ad82c..fb16e0f36dc 100644 --- a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua @@ -246,6 +246,8 @@ function EventQueue:flush() -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code local retval = false if http_response_code == 202 or http_response_code == 200 then diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua index 20bbf6bac5a..b546a53f3b9 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -338,6 +338,8 @@ function EventQueue:flush() -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code local retval = false if http_response_code == 202 then diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua similarity index 99% rename from stream-connectors/centreon-certified/prometheus/prometheus-gateway.lua rename to stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua index 7784635d3e4..07f947705e1 100644 --- a/stream-connectors/centreon-certified/prometheus/prometheus-gateway.lua +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -883,6 +883,8 @@ function EventQueue:send_data () -- collecting results httpResponseCode = httpRequest:getinfo(curl.INFO_RESPONSE_CODE) + httpRequest:close() + -- Handling the return code local retval = false if httpResponseCode == 200 then diff --git a/stream-connectors/centreon-certified/servicenow/connector-servicenow.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua similarity index 100% rename from stream-connectors/centreon-certified/servicenow/connector-servicenow.lua rename to stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-http.lua b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua similarity index 98% rename from stream-connectors/centreon-certified/splunk/splunk-events-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua index 23cb677b5e3..a15509552f2 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-http.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua @@ -105,8 +105,8 @@ function EventQueue.new(conf) retval.events = {}, setmetatable(retval, EventQueue) -- Internal data initialization - broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) - return retval + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval end -------------------------------------------------------------------------------- @@ -212,6 +212,8 @@ function EventQueue:flush() -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code local retval = false if http_response_code == 200 then diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-luacurl.lua b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua similarity index 98% rename from stream-connectors/centreon-certified/splunk/splunk-events-luacurl.lua rename to stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua index aa867f1519a..8c56a9a7dd8 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-luacurl.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua @@ -112,8 +112,8 @@ function EventQueue.new(conf) retval.events = {}, setmetatable(retval, EventQueue) -- Internal data initialization - broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) - return retval + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval end -------------------------------------------------------------------------------- @@ -224,10 +224,12 @@ function EventQueue:flush() -- performing the HTTP request http_request:perform() - + -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code local retval = false if http_response_code == 200 then diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-http.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua similarity index 98% rename from stream-connectors/centreon-certified/splunk/splunk-metrics-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua index 74ee52c29b4..551d86ba581 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-http.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua @@ -105,8 +105,8 @@ function EventQueue.new(conf) retval.events = {}, setmetatable(retval, EventQueue) -- Internal data initialization - broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) - return retval + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval end -------------------------------------------------------------------------------- @@ -210,7 +210,9 @@ function EventQueue:flush() http_request:perform() -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() -- Handling the return code local retval = false diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua similarity index 98% rename from stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl.lua rename to stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua index 06c80ade68a..3ff1f91d7ce 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua @@ -104,8 +104,8 @@ function EventQueue.new(conf) retval.events = {}, setmetatable(retval, EventQueue) -- Internal data initialization - broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) - return retval + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval end -------------------------------------------------------------------------------- @@ -220,6 +220,8 @@ function EventQueue:flush() -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code local retval = false if http_response_code == 200 then diff --git a/stream-connectors/centreon-certified/splunk/splunk-states-http.lua b/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua similarity index 100% rename from stream-connectors/centreon-certified/splunk/splunk-states-http.lua rename to stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua diff --git a/stream-connectors/centreon-certified/warp10/export-warp10.lua b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua similarity index 99% rename from stream-connectors/centreon-certified/warp10/export-warp10.lua rename to stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua index 5878c0d8f1d..9103aa4745d 100644 --- a/stream-connectors/centreon-certified/warp10/export-warp10.lua +++ b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua @@ -66,6 +66,7 @@ local function flush() postfields = buf } c:perform() + c:close() my_data.data = {} return true end diff --git a/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua b/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua index 2e516f621ee..ecb66ccbd3c 100755 --- a/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua +++ b/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua @@ -389,7 +389,7 @@ function stateChanged(d) end if d.state_type == 1 and -- if the event is in hard state - d.last_hard_state_change ~= nil then -- if the event has been in a hard state + d.last_hard_state_change ~= nil then -- if the event has been in a hard state -- if the state has changed -- (like noted in the omi connector, it could have a slight delta between last_check and last_hard_state_change) @@ -415,8 +415,8 @@ function stateChanged(d) end - -- note : No need to send new event without last_hard_state_change because - -- there is no state either + -- note : No need to send new event without last_hard_state_change because + -- there is no state either end From a0fa3977391c641d84453635702630cd7971b647 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 May 2021 09:01:55 +0200 Subject: [PATCH 058/219] Add downtime, ack, dedup features (#48) * add ack event status param * add new filters for downtime and ack * start dedup * add dedup and finish downtime and ack * add downtime event to the list of filtered event * fix wrong filter (cat instead of elem) * fix compil error (missing method in module) * change return of split method * add status mapping for downtime event * add a downtime event test stream connector * improve downtime event handling +doc * start a broker data structure doc * ack broker structure doc +ack event small refacto * add kafka test script * kafka param methods + doc * integrate lua librdkafka into centreon sc modules * fix method name and regex in sc_params * rockespecing modules for 1.1.0 --- .../rdkafka/config.lua | 167 +++++++ .../rdkafka/librdkafka.lua | 74 +++ .../rdkafka/producer.lua | 172 +++++++ .../rdkafka/topic.lua | 55 +++ .../rdkafka/topic_config.lua | 70 +++ .../sc_common.lua | 7 +- .../sc_event.lua | 427 +++++++++++++++++- .../sc_params.lua | 69 ++- stream-connectors/modules/docs/README.md | 80 ++-- .../modules/docs/broker_data_structure.md | 111 +++++ stream-connectors/modules/docs/sc_common.md | 10 +- stream-connectors/modules/docs/sc_event.md | 244 +++++++++- stream-connectors/modules/docs/sc_param.md | 129 +++++- ...on-stream-connectors-lib-0.1.0-1.rockspec} | 0 ...on-stream-connectors-lib-1.0.0-1.rockspec} | 0 ...eon-stream-connectors-lib-1.1.0-1.rockspec | 33 ++ .../acknowledgement_stream_connector.lua | 208 +++++++++ .../tests/downtime_stream_connector.lua | 210 +++++++++ .../modules/tests/kafka_test_connexion.lua | 45 ++ 19 files changed, 2028 insertions(+), 83 deletions(-) create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua create mode 100644 stream-connectors/modules/docs/broker_data_structure.md rename stream-connectors/modules/specs/0.1.x/{centreon-stream-connectors-lib-0.1.0.rockspec => centreon-stream-connectors-lib-0.1.0-1.rockspec} (100%) rename stream-connectors/modules/specs/1.0.x/{centreon-stream-connectors-lib-1.0.0.rockspec => centreon-stream-connectors-lib-1.0.0-1.rockspec} (100%) create mode 100644 stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec create mode 100644 stream-connectors/modules/tests/acknowledgement_stream_connector.lua create mode 100644 stream-connectors/modules/tests/downtime_stream_connector.lua create mode 100644 stream-connectors/modules/tests/kafka_test_connexion.lua diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua new file mode 100644 index 00000000000..96e3d36c6e5 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua @@ -0,0 +1,167 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local ffi = require 'ffi' + +local KafkaConfig = {} +KafkaConfig.__index = KafkaConfig + +--[[ + Create configuration object or dublicate one. + Result will be set up the defaults. + + Please see CONFIGURATION.md for the default settings. +]]-- + +function KafkaConfig.new(original_config) + local config = { cb_ = {} } + setmetatable(config, KafkaConfig) + + if original_config and original_config.kafka_conf_ then + rawset(config, "kafka_conf_", librdkafka.rd_kafka_conf_dup(original_config.kafka_conf_)) + config:set_delivery_cb(original_config.cb_.dr_cb_) + config:set_stat_cb(original_config.cb_.stat_cb_) + config:set_error_cb(original_config.cb_.error_cb_) + config:set_log_cb(original_config.cb_.log_cb_) + else + rawset(config, "kafka_conf_", librdkafka.rd_kafka_conf_new()) + end + ffi.gc(config.kafka_conf_, function (config) + librdkafka.rd_kafka_conf_destroy(config) + end + ) + + return config +end + + +--[[ + Dump the configuration properties and values of `conf` to a map + with "key", "value" pairs. +]]-- + +function KafkaConfig:dump() + assert(self.kafka_conf_ ~= nil) + + local size = ffi.new("size_t[1]") + local dump = librdkafka.rd_kafka_conf_dump(self.kafka_conf_, size) + ffi.gc(dump, function(d) librdkafka.rd_kafka_conf_dump_free(d, size[0]) end) + + local result = {} + for i = 0, tonumber(size[0])-1,2 do + result[ffi.string(dump[i])] = ffi.string(dump[i+1]) + end + + return result +end + + +--[[ + Sets a configuration property. + + In case of failure "error(errstr)" is called and 'errstr' + is updated to contain a human readable error string. +]]-- + +function KafkaConfig:__newindex(name, value) + assert(self.kafka_conf_ ~= nil) + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + + if librdkafka.rd_kafka_conf_set(self.kafka_conf_, name, tostring(value), errbuf, ERRLEN) ~= librdkafka.RD_KAFKA_CONF_OK then + error(ffi.string(errbuf)) + end +end + + +--[[ + Set delivery report callback in provided conf object. + + Format: callback_function(payload, errstr) + 'payload' is the message payload + 'errstr' nil if everything is ok or readable error description otherwise +]]-- + +function KafkaConfig:set_delivery_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.dr_cb_ = callback + librdkafka.rd_kafka_conf_set_dr_cb(self.kafka_conf_, + function(rk, payload, len, err) + local errstr = nil + if err ~= librdkafka.RD_KAFKA_RESP_ERR_NO_ERROR then + errstr = ffi.string(librdkafka.rd_kafka_err2str(err)) + end + callback(ffi.string(payload, tonumber(len)), errstr) + end) + end +end + + +--[[ + Set statistics callback. + The statistics callback is called from `KafkaProducer:poll` every + `statistics.interval.ms` (needs to be configured separately). + + Format: callback_function(json) + 'json' - String containing the statistics data in JSON format +]]-- + +function KafkaConfig:set_stat_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.stat_cb_ = callback + librdkafka.rd_kafka_conf_set_stats_cb(self.kafka_conf_, + function(rk, json, json_len) + callback(ffi.string(json, json_len)) + return 0 --librdkafka will immediately free the 'json' pointer. + end) + end +end + + +--[[ + Set error callback. + The error callback is used by librdkafka to signal critical errors + back to the application. + + Format: callback_function(err_numb, reason) +]]-- + +function KafkaConfig:set_error_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.error_cb_ = callback + librdkafka.rd_kafka_conf_set_error_cb(self.kafka_conf_, + function(rk, err, reason) + callback(tonumber(err), ffi.string(reason)) + end) + end +end + +--[[ + Set logger callback. + The default is to print to stderr. + Alternatively the application may provide its own logger callback. + Or pass 'callback' as nil to disable logging. + + Format: callback_function(level, fac, buf) +]]-- + +function KafkaConfig:set_log_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.log_cb_ = callback + librdkafka.rd_kafka_conf_set_log_cb(self.kafka_conf_, + function(rk, level, fac, buf) + callback(tonumber(level), ffi.string(fac), ffi.string(buf)) + end) + end +end + +return KafkaConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua new file mode 100644 index 00000000000..df0a75de0ea --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua @@ -0,0 +1,74 @@ +#!/usr/bin/lua + +local ffi = require 'ffi' + +ffi.cdef[[ + typedef struct rd_kafka_s rd_kafka_t; + typedef struct rd_kafka_conf_s rd_kafka_conf_t; + typedef struct rd_kafka_topic_s rd_kafka_topic_t; + typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; + + typedef enum rd_kafka_type_t { + RD_KAFKA_PRODUCER, + RD_KAFKA_CONSUMER + } rd_kafka_type_t; + + typedef enum { + RD_KAFKA_RESP_ERR__BEGIN = -200, + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /* ... */ + } rd_kafka_resp_err_t; + + typedef enum { + RD_KAFKA_CONF_UNKNOWN = -2, /* Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /* Invalid configuration value. */ + RD_KAFKA_CONF_OK = 0 /* Configuration okay */ + } rd_kafka_conf_res_t; + + rd_kafka_conf_t *rd_kafka_conf_new (void); + rd_kafka_conf_t *rd_kafka_conf_dup (const rd_kafka_conf_t *conf); + void rd_kafka_conf_destroy (rd_kafka_conf_t *conf); + const char **rd_kafka_conf_dump (rd_kafka_conf_t *conf, size_t *cntp); + void rd_kafka_conf_dump_free (const char **arr, size_t cnt); + rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, const char *name, const char *value, + char *errstr, size_t errstr_size); + void rd_kafka_conf_set_dr_cb (rd_kafka_conf_t *conf, void (*dr_cb) (rd_kafka_t *rk, + void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque)); + void rd_kafka_conf_set_error_cb (rd_kafka_conf_t *conf, void (*error_cb) (rd_kafka_t *rk, int err, + const char *reason, void *opaque)); + void rd_kafka_conf_set_stats_cb (rd_kafka_conf_t *conf, int (*stats_cb) (rd_kafka_t *rk, char *json, + size_t json_len, void *opaque)); + void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf, void (*log_cb) (const rd_kafka_t *rk, int level, + const char *fac, const char *buf)); + + rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size); + void rd_kafka_destroy (rd_kafka_t *rk); + int rd_kafka_brokers_add (rd_kafka_t *rk, const char *brokerlist); + + rd_kafka_topic_conf_t *rd_kafka_topic_conf_new (void); + rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup (const rd_kafka_topic_conf_t *conf); + rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, const char *name, + const char *value, char *errstr, size_t errstr_size); + void rd_kafka_topic_conf_destroy (rd_kafka_topic_conf_t *topic_conf); + const char **rd_kafka_topic_conf_dump (rd_kafka_topic_conf_t *conf, size_t *cntp); + + rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf); + const char *rd_kafka_topic_name (const rd_kafka_topic_t *rkt); + void rd_kafka_topic_destroy (rd_kafka_topic_t *rkt); + + int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partitition, int msgflags, void *payload, size_t len, + const void *key, size_t keylen, void *msg_opaque); + + int rd_kafka_outq_len (rd_kafka_t *rk); + int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms); + + int rd_kafka_wait_destroyed (int timeout_ms); + + rd_kafka_resp_err_t rd_kafka_errno2err (int errnox); + const char *rd_kafka_err2str (rd_kafka_resp_err_t err); + int rd_kafka_thread_cnt (void); +]] + +local librdkafka = ffi.load("librdkafka.so.1") +return librdkafka + diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua new file mode 100644 index 00000000000..e6805971ad1 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua @@ -0,0 +1,172 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local KafkaConfig = require("centreon-stream-connectors-lib.rdkafka.config") +local KafkaTopic = require("centreon-stream-connectors-lib.rdkafka.topic") +local ffi = require 'ffi' + +local DEFAULT_DESTROY_TIMEOUT_MS = 3000 + +local KafkaProducer = {} +KafkaProducer.__index = KafkaProducer + + +--[[ + Creates a new Kafka producer. + + 'kafka_config' is an optional object that will be used instead of the default + configuration. + The 'kafka_config' object is reusable after this call. + + 'destroy_timeout_ms' is a parameter that is used to determine how long client + will wait while all rd_kafka_t objects will be destroyed. + + Returns the new object on success or "error(errstr)" on error in which case + 'errstr' is set to a human readable error message. +]]-- + +function KafkaProducer.new(kafka_config, destroy_timeout_ms) + local config = nil + if kafka_config ~= nil then + config = KafkaConfig.new(kafka_config).kafka_conf_ + ffi.gc(config, nil) + end + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + local kafka = librdkafka.rd_kafka_new(librdkafka.RD_KAFKA_PRODUCER, config, errbuf, ERRLEN) + + if kafka == nil then + error(ffi.string(errbuf)) + end + + local producer = {kafka_ = kafka} + KafkaTopic.kafka_topic_map_[kafka] = {} + + setmetatable(producer, KafkaProducer) + ffi.gc(producer.kafka_, function (...) + for k, topic_ in pairs(KafkaTopic.kafka_topic_map_[producer.kafka_]) do + librdkafka.rd_kafka_topic_destroy(topic_) + end + KafkaTopic.kafka_topic_map_[producer.kafka_] = nil + librdkafka.rd_kafka_destroy(...) + librdkafka.rd_kafka_wait_destroyed(destroy_timeout_ms or DEFAULT_DESTROY_TIMEOUT_MS) + end + ) + + return producer +end + + +--[[ + Adds a one or more brokers to the kafka handle's list of initial brokers. + Additional brokers will be discovered automatically as soon as rdkafka + connects to a broker by querying the broker metadata. + + If a broker name resolves to multiple addresses (and possibly + address families) all will be used for connection attempts in + round-robin fashion. + + 'broker_list' is a ,-separated list of brokers in the format: + [:],[:]... + + Returns the number of brokers successfully added. + + NOTE: Brokers may also be defined with the 'metadata.broker.list' + configuration property. +]]-- + +function KafkaProducer:brokers_add(broker_list) + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_brokers_add(self.kafka_, broker_list) +end + + +--[[ + Produce and send a single message to broker. + + `produce()` is an asynch non-blocking API. + + 'partition' is the target partition, either: + - RD_KAFKA_PARTITION_UA (unassigned) for + automatic partitioning using the topic's partitioner function, or + - a fixed partition (0..N) + + 'payload' is the message payload. + + 'key' is an optional message key, if non-nil it will be passed to the topic + partitioner as well as be sent with the message to the broker and passed + on to the consumer. + + + Returns "error(errstr)" on error in which case 'errstr' is set to a human + readable error message. +]]-- + +function KafkaProducer:produce(kafka_topic, partition, payload, key) + assert(self.kafka_ ~= nil) + assert(kafka_topic.topic_ ~= nil) + + local keylen = 0 + if key then keylen = #key end + + if payload == nil or #payload == 0 then + if keylen == 0 then + return + end + end + + local RD_KAFKA_MSG_F_COPY = 0x2 + local produce_result = librdkafka.rd_kafka_produce(kafka_topic.topic_, partition, RD_KAFKA_MSG_F_COPY, + ffi.cast("void*", payload), #payload, ffi.cast("void*", key), keylen, nil) + + if produce_result == -1 then + error(ffi.string(librdkafka.rd_kafka_err2str(librdkafka.rd_kafka_errno2err(ffi.errno())))) + end +end + +--[[ + Polls the provided kafka handle for events. + + Events will cause application provided callbacks to be called. + + The 'timeout_ms' argument specifies the minimum amount of time + (in milliseconds) that the call will block waiting for events. + For non-blocking calls, provide 0 as 'timeout_ms'. + To wait indefinately for an event, provide -1. + + Events: + - delivery report callbacks (if dr_cb is configured) [producer] + - error callbacks (if error_cb is configured) [producer & consumer] + - stats callbacks (if stats_cb is configured) [producer & consumer] + + Returns the number of events served. + + NOTE: This function doesn't use jit compilation +]]-- + +function KafkaProducer:poll(timeout_ms) + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_poll(self.kafka_, timeout_ms) +end +-- jit.off(KafkaProducer.poll) + +--[[ + Returns the current out queue length: + messages waiting to be sent to, or acknowledged by, the broker. +]]-- + +function KafkaProducer:outq_len() + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_outq_len(self.kafka_) +end + +--[[ + Retrieve the current number of threads in use by librdkafka. +]]-- + +function KafkaProducer.thread_cnt() + return librdkafka.rd_kafka_thread_cnt() +end + +return KafkaProducer diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua new file mode 100644 index 00000000000..7a55fffa40b --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua @@ -0,0 +1,55 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local KafkaTopicConfig = require("centreon-stream-connectors-lib.rdkafka.topic_config") +local ffi = require 'ffi' + +local KafkaTopic = { kafka_topic_map_ = {} } +-- KafkaProducer will delete all topics on destroy +-- It was done in order to avoid destroing topics before destroing producer + +KafkaTopic.__index = KafkaTopic + +--[[ + Creates a new topic handle for topic named 'topic_name'. + + 'conf' is an optional configuration for the topic that will be used + instead of the default topic configuration. + The 'conf' object is reusable after this call. + + Returns the new topic handle or "error(errstr)" on error in which case + 'errstr' is set to a human readable error message. +]]-- + +function KafkaTopic.new(kafka_producer, topic_name, topic_config) + assert(kafka_producer.kafka_ ~= nil) + + local config = nil + if topic_config and topic_config.topic_config_ then + config = KafkaTopicConfig.new(topic_config).topic_conf_ + ffi.gc(config, nil) + end + + local rd_topic = librdkafka.rd_kafka_topic_new(kafka_producer.kafka_, topic_name, config) + + if rd_topic == nil then + error(ffi.string(librdkafka.rd_kafka_err2str(librdkafka.rd_kafka_errno2err(ffi.errno())))) + end + + local topic = {topic_ = rd_topic} + setmetatable(topic, KafkaTopic) + table.insert(KafkaTopic.kafka_topic_map_[kafka_producer.kafka_], rd_topic) + return topic +end + + +--[[ + Returns the topic name +]]-- + +function KafkaTopic:name() + assert(self.topic_ ~= nil) + return ffi.string(librdkafka.rd_kafka_topic_name(self.topic_)) +end + +return KafkaTopic diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua new file mode 100644 index 00000000000..a9e0e0f368a --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua @@ -0,0 +1,70 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local ffi = require 'ffi' + +local KafkaTopicConfig = {} +KafkaTopicConfig.__index = KafkaTopicConfig + +--[[ + Create topic configuration object. +]]-- + +function KafkaTopicConfig.new(original_config) + local config = {} + setmetatable(config, KafkaTopicConfig) + + if original_config and original_config.topic_conf_ then + rawset(config, "topic_conf_", librdkafka.rd_kafka_topic_conf_dup(original_config.topic_conf_)) + else + rawset(config, "topic_conf_", librdkafka.rd_kafka_topic_conf_new()) + end + ffi.gc(config.topic_conf_, function (config) + librdkafka.rd_kafka_topic_conf_destroy(config) + end + ) + + return config +end + + +--[[ + Dump the topic configuration properties and values of `conf` to a map + with "key", "value" pairs. +]]-- + +function KafkaTopicConfig:dump() + assert(self.topic_conf_ ~= nil) + + local size = ffi.new("size_t[1]") + local dump = librdkafka.rd_kafka_topic_conf_dump(self.topic_conf_, size) + ffi.gc(dump, function(d) librdkafka.rd_kafka_conf_dump_free(d, size[0]) end) + + local result = {} + for i = 0, tonumber(size[0])-1,2 do + result[ffi.string(dump[i])] = ffi.string(dump[i+1]) + end + + return result +end + + +--[[ + Sets a configuration property. + + In case of failure "error(errstr)" is called and 'errstr' + is updated to contain a human readable error string. +]]-- + +function KafkaTopicConfig:__newindex(name, value) + assert(self.topic_conf_ ~= nil) + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + + if librdkafka.rd_kafka_topic_conf_set(self.topic_conf_, name, value, errbuf, ERRLEN) ~= librdkafka.RD_KAFKA_CONF_OK then + error(ffi.string(errbuf)) + end +end + +return KafkaTopicConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index b81a954ef40..fe97eb934bb 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -78,14 +78,15 @@ function ScCommon:check_boolean_number_option_syntax(number, default) end --- split: convert a string into a table --- @param string (string) the string that is going to be splitted into a table +-- @param text (string) the string that is going to be splitted into a table -- @param [opt] separator (string) the separator character that will be used to split the string +-- @return false (boolean) if text param is empty or nil -- @return table (table) a table of strings function ScCommon:split (text, separator) - -- return empty string if text is nil + -- return false if text is nil or empty if text == nil or text == "" then self.logger:error("[sc_common:split]: could not split text because it is nil or empty") - return "" + return false end local hash = {} diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 51c55644686..545499dd750 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -17,9 +17,9 @@ local ScEvent = {} function sc_event.new(event, params, common, logger, broker) local self = {} - self.logger = logger - if not self.logger then - self.logger = sc_logger.new() + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() end self.sc_common = common self.params = params @@ -89,6 +89,10 @@ function ScEvent:is_valid_neb_event() is_valid_event = self:is_valid_host_status_event() elseif self.event.element == 24 then is_valid_event = self:is_valid_service_status_event() + elseif self.event.element == 1 then + is_valid_event = self:is_valid_acknowledgement_event() + elseif self.event.element == 5 then + is_valid_event = self:is_valid_downtime_event() end return is_valid_event @@ -110,6 +114,13 @@ function ScEvent:is_valid_host_status_event() return false end + -- return false if event status is a duplicate and dedup is enabled + if self:is_host_status_event_duplicated() then + self.sc_logger:warning("[sc_event:is_host_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) + .. " is sending a duplicated event. Dedup option (enable_host_status_dedup) is set to: " .. tostring(self.params.enable_host_status_dedup)) + return false + end + -- return false if one of event ack, downtime or state type (hard soft) aren't valid if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in a validated downtime, ack or hard/soft state") @@ -161,6 +172,13 @@ function ScEvent:is_valid_service_status_event() return false end + -- return false if event status is a duplicate and dedup is enabled + if self:is_service_status_event_duplicated() then + self.sc_logger:warning("[sc_event:is_service_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) + .. " service_id: " .. tostring(self.event.service_id) .. " is sending a duplicated event. Dedup option (enable_service_status_dedup) is set to: " .. tostring(self.params.enable_service_status_dedup)) + return false + end + -- return false if one of event ack, downtime or state type (hard soft) aren't valid if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in a validated downtime, ack or hard/soft state") @@ -216,10 +234,10 @@ function ScEvent:is_valid_host() self.event.cache.host = self.sc_broker:get_host_all_infos(self.event.host_id) - -- return false if we can't get hostname - if (not self.event.cache.host.name and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_valid_host]: Invalid host with id: " .. tostring(self.event.host_id) - .. " host name is: " .. tostring(self.event.cache.host.name) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + -- return false if we can't get hostname + if (not self.event.cache.host and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) + .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false end @@ -250,9 +268,9 @@ function ScEvent:is_valid_service() self.event.cache.service = self.sc_broker:get_service_all_infos(self.event.host_id, self.event.service_id) -- return false if we can't get service description - if (not self.event.cache.service.description and self.params.skip_anon_events == 1) then - self.sc_logger:warning("[sc_event:is_valid_service]: Invalid service with id: " .. tostring(self.event.service_id) - .. " service description is: " .. tostring(self.event.cache.service.description) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + if (not self.event.cache.service and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid description for service with id: " .. tostring(self.event.service_id) + .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false end @@ -289,12 +307,27 @@ end -- @param accepted_status_list (string) a coma separated list of accepted status ("ok,warning,critical") -- @return true|false (boolean) function ScEvent:is_valid_event_status(accepted_status_list) - for _, status_id in ipairs(self.sc_common:split(accepted_status_list, ",")) do + local status_list = self.sc_common:split(accepted_status_list, ",") + + if not status_list then + self.sc_logger:error("[sc_event:is_valid_event_status]: accepted_status list is nil or empty") + return false + end + + for _, status_id in ipairs(status_list) do if tostring(self.event.state) == status_id then return true end end + -- handle downtime event specific case for logging + if (self.event.category == 1 and self.event.element == 5) then + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) + return false + end + + -- log for everything else self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) return false @@ -362,7 +395,7 @@ function ScEvent:is_valid_hostgroup() return false else self.sc_logger:debug("[sc_event:is_valid_hostgroup]: event for host with id: " .. tostring(self.event.host_id) - .. "matched hostgroup: " .. accepted_hostgroup_name) + .. " matched hostgroup: " .. accepted_hostgroup_name) end return true @@ -645,9 +678,9 @@ function ScEvent:is_valid_host_severity() return true end ---- is_valid_service_severity: checks if the service severity is accepted +--- is_valid_service_severity: checks if the service severity is accepted -- @return true|false (boolean) -function ScEvent:is_valid_host_severity() +function ScEvent:is_valid_service_severity() -- return true if there is no severity filter if self.params.service_severity_threshold == nil then return true @@ -667,8 +700,372 @@ function ScEvent:is_valid_host_severity() return true end +---is_valid_acknowledgement_event: checks if the event is a valid acknowledge event +-- @return true|false (boolean) +function ScEvent:is_valid_acknowledgement_event() + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_acknowledge_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- check if ack author is valid + if not self:is_valid_author() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: acknowledgement on host: " .. tostring(self.event.host_id) + .. "and service: " .. tostring(self.event.service_id) .. "(0 means ack is on host) is not made by a valid author. Author is: " + .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + return false + end + + local event_status = "" + -- service_id = 0 means ack is on a host + if self.event.type == 0 then + -- use dedicated ack host status configuration or host_status configuration + event_status = self.sc_common:ifnil_or_empty(self.params.ack_host_status, self.params.host_status) + + -- return false if event status is not accepted + if not self:is_valid_event_status(event_status) then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][14][self.event.state])) + return false + end + -- service_id != 0 means ack is on a service + else + -- return false if we can't get service description of service id is nil + if not self:is_valid_service() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- use dedicated ack host status configuration or host_status configuration + event_status = self.sc_common:ifnil_or_empty(self.params.ack_service_status, self.params.service_status) + + -- return false if event status is not accepted + if not self:is_valid_event_status(event_status) then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][24][self.event.state])) + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + return true +end + +--- is_vaid_downtime_event: check if the event is a valid downtime event +-- return true|false (boolean) +function ScEvent:is_valid_downtime_event() + -- return false if the event is one of all the "fake" start or end downtime event received from broker + if not self:is_downtime_event_useless() then + self.sc_logger:debug("[sc_event:is_valid_downtime_event]: dropping downtime event because it is not a start nor end of downtime event.") + return false + end + + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- check if downtime author is valid + if not self:is_valid_author() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: downtime with internal ID: " .. tostring(self.event.internal_id) + .. " is not made by a valid author. Author is: " .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- this is a host event + if self.event.type == 2 then + -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method + self.event.state = self:get_downtime_host_status() + + -- checks if the current host downtime state is an accpeted status + if not self:is_valid_event_status(self.params.dt_host_status) then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][14][self.event.state]) + .. " Accepted states are: " .. tostring(self.params.dt_host_status)) + return false + end + else + -- return false if we can't get service description or service id is nil + if not self:is_valid_service() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method + self.event.state = self:get_downtime_service_status() + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.dt_service_status) then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][24][self.event.state]) + .. " Accepted states are: " .. tostring(self.params.dt_service_status)) + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + return true +end + +--- is_valid_author: check if the author of a comment is valid based on contact alias in Centreon +-- return true|false (boolean) +function ScEvent:is_valid_author() + -- do not handle authors if it is not configured + if self.params.accepted_authors == "" then + return true + end + + -- check if author is accepted + if not self:find_author_in_list() then + self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) + .. " is not in an accepted authors list. Accepted authorss are: " .. self.params.accepted_authors) + return false + end + + return true +end + +--- find_author_in_list: compare accepted authors from parameters with the event author +-- @return accepted_alias or false (string|boolean) the alias of the first matching author if found or false if not found +function ScEvent:find_author_in_list() + for _, accepted_alias in ipairs(self.sc_common:split(self.params.accepted_authors, ",")) do + if accepted_alias == self.event.author then + return accepted_alias + end + end + + return false +end + +--- get_downtime_host_status: retrieve the status of a host based on last_time_up/down dates found in cache (self.event.cache.host must be set) +-- return status (number) the status code of the host +function ScEvent:get_downtime_host_status() + + -- affect the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.host.last_time_up), + [1] = tonumber(self.event.cache.host.last_time_down) + } + + return self:get_most_recent_status_code(timestamp) +end + +--- get_downtime_service_status: retrieve the status of a service based on last_time_ok/warning/critical/unknown dates found in cache (self.event.cache.host must be set) +-- return status (number) the status code of the service +function ScEvent:get_downtime_service_status() + + -- affect the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_ok), + [1] = tonumber(self.event.cache.service.last_time_warning), + [2] = tonumber(self.event.cache.service.last_time_critical), + [3] = tonumber(self.event.cache.service.last_time_unknown) + } + + return self:get_most_recent_status_code(timestamp) +end + +--- get_most_recent_status_code: retrieve the last status code from a list of status and timestamp +-- @param timestamp (table) a table with the association of the last known timestamp of a status and its corresponding status code +-- @return status (number) the most recent status code of the object +function ScEvent:get_most_recent_status_code(timestamp) + + -- prepare the table in wich the latest known status timestamp and status code will be stored + local status_info = { + highest_timestamp = 0, + status = nil + } + + -- compare all status timestamp and keep the most recent one and the corresponding status code + for status_code, status_timestamp in ipairs(timestamp) do + if status_timestamp > status_info.highest_timestamp then + status_info.highest_timestamp = status_timestamp + status_info.status = status_code + end + end + + return status_info.status +end + +--- is_service_status_event_duplicated: check if the service event is the same than the last one (will not work for OK(H) -> CRITICAL(S) -> OK(H)) +-- @return true|false (boolean) +function ScEvent:is_service_status_event_duplicated() + -- return false if option is not activated + if self.params.enable_service_status_dedup ~= 1 then + self.sc_logger:debug("[sc_event:is_service_status_event_duplicated]: service status is not enabled option enable_service_status_dedup is set to: " .. tostring(self.params.enable_service_status_dedup)) + return false + end + + -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event + if self.event.last_hard_state_change == self.event.last_check then + return false + end + + -- map the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_ok), + [1] = tonumber(self.event.cache.service.last_time_warning), + [2] = tonumber(self.event.cache.service.last_time_critical), + [3] = tonumber(self.event.cache.service.last_time_unknown) + } + + -- if we find a last time status older than the last_hard_state_change then we are not facing a duplicated event + for status_code, status_timestamp in ipairs(timestamp) do + -- of course it needs to be a different status code than the actual one + if status_code ~= self.event.state and status_timestamp >= self.event.last_hard_state_change then + return false + end + end + + -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: + -- OK(H) --> NOT-OK(S) --> OK(H) + return true +end + +--- is_host_status_event_duplicated: check if the host event is the same than the last one (will not work for UP(H) -> DOWN(S) -> UP(H)) +-- @return true|false (boolean) +function ScEvent:is_host_status_event_duplicated() + -- return false if option is not activated + if self.params.enable_host_status_dedup ~= 1 then + self.sc_logger:debug("[sc_event:is_host_status_event_duplicated]: host status is not enabled option enable_host_status_dedup is set to: " .. tostring(self.params.enable_host_status_dedup)) + return false + end + + -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event + if self.event.last_hard_state_change == self.event.last_check then + return false + end + + -- map the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_up), + [1] = tonumber(self.event.cache.service.last_time_down), + [2] = tonumber(self.event.cache.service.last_time_unreachable), + } + + -- if we find a last time status older than the last_hard_state_change then we are not facing a duplicated event + for status_code, status_timestamp in ipairs(timestamp) do + -- of course it needs to be a different status code than the actual one + if status_code ~= self.event.state and status_timestamp >= self.event.last_hard_state_change then + return false + end + end + + -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: + -- UP(H) --> NOT-UP(S) --> UP(H) + return true +end + + +--- is_downtime_event_useless: the purpose of this method is to filter out unnecessary downtime event. It appears that broker +-- is sending many downtime events before sending the one we want +-- @return true|false (boolean) +function ScEvent:is_downtime_event_useless() + -- return false if downtime event is not a valid start of downtime event + if self:is_valid_downtime_event_start() then + return true + end + + -- return false if downtime event is not a valid end of downtime event + if self:is_valid_downtime_event_end() then + return true + end + + return false +end + +--- is_valid_downtime_event_start: make sure that the event is the one notifying us that a downtime has just started +-- @return true|false (boolean) +function ScEvent:is_valid_downtime_event_start() + -- event is about the end of the downtime (actual_end_time key is not present in a start downtime event) + if self.event.actual_end_time then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_end_time found in the downtime event. It can't be a downtime start event") + return false + end + + -- event hasn't actually started until the actual_start_time key is present in the start downtime event + if not self.event.actual_start_time then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_start_time not found in the downtime event. The downtime hasn't yet started") + return false + end + + return true +end + +--- is_valid_downtime_event_end: make sure that the event is the one notifying us that a downtime has just ended +-- @return true|false (boolean) +function ScEvent:is_valid_downtime_event_end() + -- event is about the end of the downtime (deletion_time key is only present in a end downtime event) + if self.event.deletion_time then + return true + end + + -- any other downtime event is not about the actual end of a downtime so we return false + self.sc_logger:debug("[sc_event:is_valid_downtime_event_end]: deletion_time not found in the downtime event. The downtime event is not about the end of a downtime") + return false +end + --- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead --- return true (boolean) +-- @return true (boolean) function ScEvent:is_valid_storage_event() return true end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 29904006005..6c93edc13d2 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -35,6 +35,8 @@ function sc_params.new(common, logger) host_status = "0,1,2", -- = ok, down, unreachable service_status = "0,1,2,3", -- = ok, warning, critical, unknown, ba_status = "0,1,2", -- = ok, warning, critical + ack_host_status = "", -- will use host_status if empty + ack_service_status = "", -- wil use service_status if empty -- filter state type hard_only = 1, @@ -46,6 +48,7 @@ function sc_params.new(common, logger) accepted_servicegroups = "", accepted_bvs = "", accepted_pollers = "", + accepted_authors = "", service_severity_threshold = nil, service_severity_operator = ">=", host_severity_threshold = nil, @@ -54,6 +57,10 @@ function sc_params.new(common, logger) -- filter anomalous events skip_anon_events = 1, skip_nil_id = 1, + + -- enable or disable dedup + enable_host_status_dedup = 0, + enable_service_status_dedup = 0, -- communication parameters max_buffer_size = 1, @@ -178,6 +185,28 @@ function sc_params.new(common, logger) [2] = "CRITICAL" } + -- map downtime category statuses + self.params.status_mapping[1][5] = { + [1] = {}, + [2] = {} + } + + -- service downtime mapping + self.params.status_mapping[1][5][1] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" + } + + -- host donwtime mapping + self.params.status_mapping[1][5][2] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + } + + setmetatable(self, { __index = ScParams }) return self @@ -192,7 +221,7 @@ function ScParams:param_override(user_params) end for param_name, param_value in pairs(user_params) do - if self.params[param_name] then + if self.params[param_name] or string.find(param_name, "^_sc_kafka_") ~= nil then self.params[param_name] = param_value self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) else @@ -208,6 +237,7 @@ function ScParams:check_params() self.params.in_downtime = self.common:check_boolean_number_option_syntax(self.params.in_downtime, 0) self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) + self.params.accepted_authors = self.common:if_wrong_type(self.params.accepted_authors, "string", "") self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") self.params.accepted_servicegroups = self.common:if_wrong_type(self.params.accepted_servicegroups, "string", "") self.params.accepted_bvs = self.common:if_wrong_type(self.params.accepted_bvs, "string", "") @@ -216,6 +246,43 @@ function ScParams:check_params() self.params.service_severity_threshold = self.common:if_wrong_type(self.params.service_severity_threshold, "number", nil) self.params.host_severity_operator = self.common:if_wrong_type(self.params.host_severity_operator, "string", ">=") self.params.service_severity_operator = self.common:if_wrong_type(self.params.service_severity_operator, "string", ">=") + self.params.ack_host_status = self.common:ifnil_or_empty(self.params.ack_host_status,self.params.host_status) + self.params.ack_service_status = self.common:ifnil_or_empty(self.params.ack_service_status,self.params.service_status) + self.params.dt_host_status = self.common:ifnil_or_empty(self.params.dt_host_status,self.params.host_status) + self.params.dt_service_status = self.common:ifnil_or_empty(self.params.dt_service_status,self.params.service_status) + self.params.enable_host_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_host_status_dedup, 0) + self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) +end + +--- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table +-- @param kafka_config (object) object instance of kafka_config +-- @param params (table) the list of parameters from broker web configuration +function ScParams:get_kafka_params(kafka_config, params) + for param_name, param_value in pairs(params) do + -- check if param starts with sc_kafka (meaning it is a parameter for kafka) + if string.find(param_name, "^_sc_kafka_") ~= nil then + -- remove the _sc_kafka_ prefix and store the param in a dedicated kafka table + kafka_config[string.gsub(param_name, "_sc_kafka_", "")] = param_value + self.logger:notice("[sc_param:get_kafka_params]: " .. tostring(param_name) + .. " parameter with value " .. tostring(param_value) .. " added to kafka_config") + end + end +end + +--- is_mandatory_config_set: check if the mandatory parameters required by a stream connector are set +-- @param mandatory_params (table) the list of mandatory parameters +-- @param params (table) the list of parameters from broker web configuration +-- @eturn true|false (boolean) +function ScParams:is_mandatory_config_set(mandatory_params, params) + for index, mandatory_param in ipairs(mandatory_params) do + if not params[mandatory_param] then + self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) + .. " parameter is not set in the stream connector web configuration") + return false + end + end + + return true end return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 59e2373bdb9..85cce972989 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -55,41 +55,51 @@ ## sc_param methods -| Method name | Method description | Link | -| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------- | -| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | -| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | +| Method name | Method description | Link | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | +| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | +| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | +| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | +| get_kafka_params | retrive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | ## sc_event methods -| Method name | Method description | Link | -| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------- | -| is_valid_category | check if the category of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_category-method) | -| is_valid_element | check if the element of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_element-method) | -| is_valid_event | check if the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event-method) | -| is_valid_neb_event | check if the neb event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_neb_event-method) | -| is_valid_host_status_event | check the "host status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host_status_event-method) | -| is_valid_service_status_event | check the "servce status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service_status_event-method) | -| is_valid_host | check if the host name and/or ID are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host-method) | -| is_valid_service | check if the service description and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service-method) | -| is_valid_event_states | check if the state (HARD/SOFT), acknowledgement state and downtime state are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_states-method) | -| is_valid_event_status | check if the status (OK, DOWN...) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_status-method) | -| is_valid_event_state_type | check if the state (HARD/SOFT) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_state_type-method) | -| is_valid_event_acknowledge_state | check if the acknowledgement state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_acknowledge_state-method) | -| is_valid_event_downtime_state | check if the downtime state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_downtime_state-method) | -| is_valid_hostgroup | check if the host is in an accepted hostgroup according to the stream connector params | [Documentation](sc_event.md#is_valid_hostgroup-method) | -| find_hostgroup_in_list | check if one of the hostgroups of the event is in the list of accepted hostgroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_hostgroup_in_list-method) | -| is_valid_servicegroup | check if the service is in an accepted servicegroup according to the stream connector params | [Documentation](sc_event.md#is_valid_servicegroup-method) | -| find_servicegroup_in_list | check if one of the servicegroups of the event is in the list of accepted servicegroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_servicegroup_in_list-method) | -| is_valid_bam_event | check if the BAM event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_bam_event-method) | -| is_valid_ba | check if the BA name and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba-method) | -| is_valid_ba_status_event | check if the "ba status" (OK, WARNING, CRITICAL) event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_status_event-method) | -| is_valid_ba_downtime_state | check if the BA downtime state is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_downtime_state-method) | -| is_valid_ba_acknowledge_state | DOES NOTHING | [Documentation](sc_event.md#is_valid_ba_acknowledge_state-method) | -| is_valid_bv | check if the BA is in an accepted BV according to the stream connector params | [Documentation](sc_event.md#is_valid_bv-method) | -| find_bv_in_list | check if one of the BV of the event is in the list of accepted BV provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_bv_in_list-method) | -| is_valid_poller | check if the host is monitored from an accepted poller according to the stream connector params | [Documentation](sc_event.md#is_valid_poller-method) | -| find_poller_in_list | check if the poller that monitores the host is in the list of accepted pollers provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_poller_in_list-method) | -| is_valid_host_severity | check if a host has a valid severity | [Documentation](sc_event.md#is_valid_host_severity-method) | -| is_valid_service_severity | check if a service has a valid severity | [Documentation](sc_event.md#is_valid_service_severity-method) | -| is_valid_storage_event | DO NOTHING (deprecated, you should use neb event to send metrics) | [Documentation](sc_event.md#is_valid_storage_event-method) | +| Method name | Method description | Link | +| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------- | +| is_valid_category | check if the category of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_category-method) | +| is_valid_element | check if the element of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_element-method) | +| is_valid_event | check if the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event-method) | +| is_valid_neb_event | check if the neb event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_neb_event-method) | +| is_valid_host_status_event | check the "host status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host_status_event-method) | +| is_valid_service_status_event | check the "servce status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service_status_event-method) | +| is_valid_host | check if the host name and/or ID are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host-method) | +| is_valid_service | check if the service description and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service-method) | +| is_valid_event_states | check if the state (HARD/SOFT), acknowledgement state and downtime state are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_states-method) | +| is_valid_event_status | check if the status (OK, DOWN...) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_status-method) | +| is_valid_event_state_type | check if the state (HARD/SOFT) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_state_type-method) | +| is_valid_event_acknowledge_state | check if the acknowledgement state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_acknowledge_state-method) | +| is_valid_event_downtime_state | check if the downtime state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_downtime_state-method) | +| is_valid_hostgroup | check if the host is in an accepted hostgroup according to the stream connector params | [Documentation](sc_event.md#is_valid_hostgroup-method) | +| find_hostgroup_in_list | check if one of the hostgroups of the event is in the list of accepted hostgroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_hostgroup_in_list-method) | +| is_valid_servicegroup | check if the service is in an accepted servicegroup according to the stream connector params | [Documentation](sc_event.md#is_valid_servicegroup-method) | +| find_servicegroup_in_list | check if one of the servicegroups of the event is in the list of accepted servicegroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_servicegroup_in_list-method) | +| is_valid_bam_event | check if the BAM event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_bam_event-method) | +| is_valid_ba | check if the BA name and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba-method) | +| is_valid_ba_status_event | check if the "ba status" (OK, WARNING, CRITICAL) event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_status_event-method) | +| is_valid_ba_downtime_state | check if the BA downtime state is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_downtime_state-method) | +| is_valid_ba_acknowledge_state | DOES NOTHING | [Documentation](sc_event.md#is_valid_ba_acknowledge_state-method) | +| is_valid_bv | check if the BA is in an accepted BV according to the stream connector params | [Documentation](sc_event.md#is_valid_bv-method) | +| find_bv_in_list | check if one of the BV of the event is in the list of accepted BV provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_bv_in_list-method) | +| is_valid_poller | check if the host is monitored from an accepted poller according to the stream connector params | [Documentation](sc_event.md#is_valid_poller-method) | +| find_poller_in_list | check if the poller that monitores the host is in the list of accepted pollers provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_poller_in_list-method) | +| is_valid_host_severity | check if a host has a valid severity | [Documentation](sc_event.md#is_valid_host_severity-method) | +| is_valid_service_severity | check if a service has a valid severity | [Documentation](sc_event.md#is_valid_service_severity-method) | +| is_valid_acknowledgement_event | check if the acknowledgement event is valid | [Documentation](sc_event.md#is_valid_acknowledgement_event-method) | +| is_valid_author | check if the author of a comment is accepted | [Documentation](sc_event.md#is_valid_author-method) | +| is_valid_downtime_event | check if the downtime event is valid | [Documentation](sc_event.md#is_valid_downtime_event-method) | +| is_host_status_event_duplicated | check if the host_status event is duplicated | [Documentation](sc_event.md#is_host_status_event_duplicated-method) | +| is_service_status_event_duplicated | check if the service_status event is duplicated | [Documentation](sc_event.md#is_service_status_event_duplicated-method) | +| is_downtime_event_useless | checks if the downtime event is a usefull one. Meaning that it carries valuable data regarding the actual end or start of the downtime | [Documentation](sc_event.md#is_downtime_event_useless-method) | +| is_valid_downtime_event_start | checks that the downtime event is about the actual start of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_start-method) | +| is_valid_downtime_event_end | checks that the downtime event is about the actual end of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_end-method) | +| is_valid_storage_event | DO NOTHING (deprecated, you should use neb event to send metrics) | [Documentation](sc_event.md#is_valid_storage_event-method) | diff --git a/stream-connectors/modules/docs/broker_data_structure.md b/stream-connectors/modules/docs/broker_data_structure.md new file mode 100644 index 00000000000..1c71eaf02b3 --- /dev/null +++ b/stream-connectors/modules/docs/broker_data_structure.md @@ -0,0 +1,111 @@ +# Broker data structure documentation + +- [Broker data structure documentation](#broker-data-structure-documentation) + - [Introduction](#introduction) + - [NEB Category](#neb-category) + - [Downtime](#downtime) + - [Downtime actual start](#downtime-actual-start) + - [Downtime actual end](#downtime-actual-end) + - [Acknowledgements](#acknowledgements) + - [Acknowledgement actual start](#acknowledgement-actual-start) + - [Acknowledgement actual end](#acknowledgement-actual-end) + +## Introduction + +The purpose of this documentation is to provide a quick overview of what data structure you should expect from a broker event. +This documentation will not explain the meaning of the structures. It is mostly a guide to help writing centreon lua modules and stream connectors + +## NEB Category + +### Downtime + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#downtime) + +if you are using the [**is_valid_downtime_event method**](sc_event.md#is_valid_downtime_event-method) you'll also have access to a `state` index that will give you the status code of the host or service and a `cache` table. + +#### Downtime actual start + +| index | type | +| ----------------- | ------- | +| actual_start_time | number | +| author | string | +| cancelled | boolean | +| category | number | +| comment_data | string | +| duration | number | +| element | number | +| end_time | number | +| entry_time | number | +| fixed | boolean | +| host_id | number | +| instance_id | number | +| internal_id | number | +| service_id | number | +| start_time | number | +| started | boolean | +| type | number | + +#### Downtime actual end + +| index | type | +| ----------------- | ------- | +| actual_end_time | number | +| actual_start_time | number | +| author | string | +| cancelled | boolean | +| category | number | +| comment_data | string | +| deletion_time | number | +| duration | number | +| element | number | +| end_time | number | +| entry_time | number | +| fixed | boolean | +| host_id | number | +| instance_id | number | +| internal_id | number | +| service_id | number | +| start_time | number | +| started | boolean | +| type | number | + +### Acknowledgements + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#acknowledgement) + +#### Acknowledgement actual start + +| index | type | +| ------------------ | ------- | +| author | string | +| category | number | +| comment_data | string | +| element | number | +| entry_time | number | +| host_id | number | +| instance_id | number | +| notify_contacts | boolean | +| persistent_comment | boolean | +| service_id | number | +| state | number | +| sticky | boolean | +| type | number | + +#### Acknowledgement actual end + +| index | type | +| ------------------ | ------- | +| author | string | +| category | number | +| comment_data | string | +| deletion_time | number | +| element | number | +| entry_time | number | +| host_id | number | +| instance_id | number | +| notify_contacts | boolean | +| persistent_comment | boolean | +| service_id | number | +| state | number | +| sticky | boolean | +| type | number | diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index aeb42d5228e..db460c35cdd 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -200,10 +200,10 @@ The **split** method split a string using a separator and returns a table of all ### split: returns -| return | type | always | condition | -| ------------------------------- | ------ | ------ | ------------------------------------------- | -| a table with all splitted parts | table | no | the string to split mustn't be empty or nil | -| empty string | string | no | the string to split is empty or nil | +| return | type | always | condition | +| ------------------------------- | ------- | ------ | ------------------------------------------- | +| a table with all splitted parts | table | no | the string to split mustn't be empty or nil | +| false | boolean | no | if the string to split is empty or nil | ### split: example @@ -229,7 +229,7 @@ local result = test_common:split(my_string, separator) my_string = "" result = test_common:split(my_string, separator) ---> result is "" (empty string) +--> result is false ``` ## compare_numbers method diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 209dc3b5909..48797e70d8c 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -33,6 +33,7 @@ - [is_valid_event_states: returns](#is_valid_event_states-returns) - [is_valid_event_states: example](#is_valid_event_states-example) - [is_valid_event_status method](#is_valid_event_status-method) + - [is_valid_event_status: parameters](#is_valid_event_status-parameters) - [is_valid_event_status: returns](#is_valid_event_status-returns) - [is_valid_event_status: example](#is_valid_event_status-example) - [is_valid_event_state_type method](#is_valid_event_state_type-method) @@ -89,6 +90,36 @@ - [is_valid_service_severity method](#is_valid_service_severity-method) - [is_valid_service_severity: returns](#is_valid_service_severity-returns) - [is_valid_service_severity: example](#is_valid_service_severity-example) + - [is_valid_acknowledgement_event method](#is_valid_acknowledgement_event-method) + - [is_valid_acknowledgement_event: returns](#is_valid_acknowledgement_event-returns) + - [is_valid_acknowledgement_event: example](#is_valid_acknowledgement_event-example) + - [is_host_status_event_duplicated method](#is_host_status_event_duplicated-method) + - [is_host_status_event_duplicated: returns](#is_host_status_event_duplicated-returns) + - [is_host_status_event_duplicated: example](#is_host_status_event_duplicated-example) + - [is_service_status_event_duplicated method](#is_service_status_event_duplicated-method) + - [is_service_status_event_duplicated: returns](#is_service_status_event_duplicated-returns) + - [is_service_status_event_duplicated: example](#is_service_status_event_duplicated-example) + - [is_valid_downtime_event method](#is_valid_downtime_event-method) + - [is_valid_downtime_event: returns](#is_valid_downtime_event-returns) + - [is_valid_downtime_event: example](#is_valid_downtime_event-example) + - [get_downtime_host_status method](#get_downtime_host_status-method) + - [get_downtime_host_status: returns](#get_downtime_host_status-returns) + - [get_downtime_host_status: example](#get_downtime_host_status-example) + - [get_downtime_service_status method](#get_downtime_service_status-method) + - [get_downtime_service_status: returns](#get_downtime_service_status-returns) + - [get_downtime_service_status: example](#get_downtime_service_status-example) + - [is_valid_author method](#is_valid_author-method) + - [is_valid_author: returns](#is_valid_author-returns) + - [is_valid_author: example](#is_valid_author-example) + - [is_downtime_event_useless method](#is_downtime_event_useless-method) + - [is_downtime_event_useless: returns](#is_downtime_event_useless-returns) + - [is_downtime_event_useless: example](#is_downtime_event_useless-example) + - [is_valid_downtime_event_start method](#is_valid_downtime_event_start-method) + - [is_valid_downtime_event_start: returns](#is_valid_downtime_event_start-returns) + - [is_valid_downtime_event_start: example](#is_valid_downtime_event_start-example) + - [is_valid_downtime_event_end method](#is_valid_downtime_event_end-method) + - [is_valid_downtime_event_end: returns](#is_valid_downtime_event_end-returns) + - [is_valid_downtime_event_end: example](#is_valid_downtime_event_end-example) - [is_valid_storage_event method](#is_valid_storage_event-method) ## Introduction @@ -207,6 +238,7 @@ head over the following chapters for more information - [is_valid_host_status_event](#is_valid_host_status_event-method) - [is_valid_service_status_event](#is_valid_service_status_event-method) +- [is_valid_acknowledgement_event](#is_valid_acknowledgement_event-method) ### is_valid_neb_event: returns @@ -233,6 +265,7 @@ head over the following chapters for more information - [is_valid_poller](#is_valid_poller-method) - [is_valid_host_severity](#is_valid_host_severity-method) - [is_valid_hostgroup](#is_valid_hostgroup-method) +- [is_host_status_event_duplicated](#is_host_status_event_duplicated-method) ### is_valid_host_status_event: returns @@ -357,14 +390,20 @@ head over the following chapters for more information ### is_valid_event_states: example ```lua -local result = test_event:is_valid_event_states() ---> result is true or false +local result = test_event:is_valid_event_states(test_param.params.host_status) +--> result is true or false ``` ## is_valid_event_status method The **is_valid_event_states** method checks if the event status is valid based on [**host_status, service_status or ba_status**](sc_param.md#default-parameters) in the **host_status, service_status or ba_status** scope +### is_valid_event_status: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------ | ------ | -------- | ------------- | +| the list of accepted status code from parameters | string | no | | + ### is_valid_event_status: returns | return | type | always | condition | @@ -828,6 +867,207 @@ local result = test_event:is_valid_service_severity() ]] ``` +## is_valid_acknowledgement_event method + +The **is_valid_acknowledgement_event** method checks if the acknowledgement event is accepted based on [**default_parameters**](sc_param.md#default-parameters) in the **acknowledgement** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_author](#is_valid_author-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) + +### is_valid_acknowledgement_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_acknowledgement_event: example + +```lua +local result = test_event:is_valid_acknowledgement_event() +--> result is true or false +``` + +## is_host_status_event_duplicated method + +The **is_host_status_event_duplicated** method checks if the event is a duplicated one. for example, if host down event has already been received, it will consider the next down host event as a duplicated one. To enable this feature you must set the [**enable_host_status_dedup option to 1**](sc_param.md#default-parameters) + +### is_host_status_event_duplicated: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_host_status_event_duplicated: example + +```lua +local result = test_event:is_host_status_event_duplicated() +--> result is true or false +``` + +## is_service_status_event_duplicated method + +The **is_service_status_event_duplicated** method checks if the event is a duplicated one. for example, if service critical event has already been received, it will consider the next critical service event as a duplicated one. To enable this feature you must set the [**enable_service_status_dedup option to 1**](sc_param.md#default-parameters) + +### is_service_status_event_duplicated: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_service_status_event_duplicated: example + +```lua +local result = test_event:is_service_status_event_duplicated() +--> result is true or false +``` + +## is_valid_downtime_event method + +The **is_valid_downtime_event** method checks if the downtime event is valid based on [**default_parameters**](sc_param.md#default-parameters) in the **downtime** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_author](#is_valid_author-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) +- [get_downtime_host_status](#get_downtime_host_status-method) +- [get_downtime_service_status](#get_downtime_service_status-method) + +### is_valid_downtime_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event: example + +```lua +local result = test_event:is_valid_downtime_event() +--> result is true or false +``` + +## get_downtime_host_status method + +The **get_downtime_host_status** method retrieve the status of the host in a host downtime event + +### get_downtime_host_status: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| event status code | number | yes | | + +### get_downtime_host_status: example + +```lua + local result = test_event:get_downtime_host_status() + --> result is 0 or 1 (UP or DOWN) +``` + +## get_downtime_service_status method + +The **get_downtime_service_status** method retrieve the status of the host in a host downtime event + +### get_downtime_service_status: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| event status code | number | yes | | + +### get_downtime_service_status: example + +```lua + local result = test_event:get_downtime_service_status() + --> result is 0 or 1 or 2 or 3 (OK, WARNING, CRITICAL, UNKNOWN) +``` + +## is_valid_author method + +The **is_valid_author** method checks if the author of a comment is valid according to the [**accepted_authors parameter**](sc_param.md#default-parameters). + +### is_valid_author: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_author: example + +```lua +local result = test_event:is_valid_author() +--> result is true or false +``` + +## is_downtime_event_useless method + +The **is_downtime_event_useless** method checks if the downtime event is a true start or end of a downtime. + +head over the following chapters for more information + +- [is_valid_downtime_event_start](#is_valid_downtime_event_start-method) +- [is_valid_downtime_event_end](#is_valid_downtime_event_end-method) + +### is_downtime_event_useless: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_downtime_event_useless: example + +```lua +local result = test_event:is_downtime_event_useless() +--> result is true or false +``` + +## is_valid_downtime_event_start method + +The **is_valid_downtime_event_start** method checks if the downtime event is a true start of downtime event. It checks if there is no `actual_end_time` information in the downtime and that the `actual_start_time` is set. Otherwise it is not a true start of downtime event. + +### is_valid_downtime_event_start: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event_start: example + +```lua +local result = test_event:is_valid_downtime_event_start() +--> result is true or false +``` + +## is_valid_downtime_event_end method + +The **is_valid_downtime_event_end** method checks if the downtime event is a true end of downtime event. It checks if there the `deletion_time` is set. Otherwise it is not a true end of downtime event. + +### is_valid_downtime_event_end: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event_end: example + +```lua +local result = test_event:is_valid_downtime_event_end() +--> result is true or false +``` + ## is_valid_storage_event method **DEPRECATED** does nothing diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index cd535c732c6..d47fe75eaa4 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -11,6 +11,13 @@ - [param_override: example](#param_override-example) - [check_params method](#check_params-method) - [check_params: example](#check_params-example) + - [get_kafka_parameters method](#get_kafka_parameters-method) + - [get_kafka_params: parameters](#get_kafka_params-parameters) + - [get_kafka_params: example](#get_kafka_params-example) + - [is_mandatory_config_set method](#is_mandatory_config_set-method) + - [is_mandatory_config_set: parameters](#is_mandatory_config_set-parameters) + - [is_mandatory_config_set: returns](#is_mandatory_config_set-returns) + - [is_mandatory_config_set: example](#is_mandatory_config_set-example) ## Introduction @@ -18,28 +25,33 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters -| Parameter name | type | default value | description | default scope | additionnal information | -| -------------------------- | ------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | -| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | -| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | -| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | -| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | -| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | -| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | -| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | -| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb) | | -| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb) | | -| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb) | | -| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam) | | -| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam) | | -| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | -| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | -| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb) | | -| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb) | | -| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) | | -| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb) | | +| Parameter name | type | default value | description | default scope | additionnal information | +| --------------------------- | ------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | +| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| enable_host_status_dedup | number | 0 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | +| enable_service_status_dedup | number | 0 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | +| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | ## Module initialization @@ -127,3 +139,76 @@ test_param:check_params() --> test_param.param.accepted_elements is: "ba_status" --> test_param.param.in_downtime is: 0 (12 is not a valid value, it goes back to the default one) ``` + +## get_kafka_parameters method + +The **get_kafka_parameters** method find the configuration parameters that are related to a stream connector that sends data to **Kafka**. +To achieve this, parameters must match the following regular expression `^_sc_kafka_`. It will then exclude the `_sc_kafka_` prefix from the parameter name and add the parameter to the kafka_config object. + +A list of Kafka parameters is available [**here**](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). You must put **_sc_kafka_** as a prefix to use them. +For example the parameter `security.protocol` becomes `_sc_kafka_security.protocol` + +### get_kafka_params: parameters + +| parameter | type | optional | default value | +| ------------ | ------ | -------- | ------------- | +| kafka_config | object | no | | +| params | table | no | | + +### get_kafka_params: example + +```lua +-- create the kafka_config object +local test_kafka_config = kafka_config.create() + +-- set up a parameter list +local params = { + broker = "localhost:9093", + ["_sc_kafka_sasl.username"] = "john", + topic = "pasta", + ["_sc_kafka_sasl.password"] = "doe" +} + +test_param:get_kafka_params(test_kafka_config, params) + +--> test_kafka_config["sasl.username"] is "john" +--> test_kafka_config["sasl.password"] is "doe" +--> test_kafka_config["topic"] is nil +``` + +## is_mandatory_config_set method + +The **is_mandatory_config_set** method checks if all mandatory parameters for a stream connector are set up. If one is missing, it will print an error and return false. + +### is_mandatory_config_set: parameters + +| parameter | type | optional | default value | +| ---------------- | ----- | -------- | ------------- | +| mandatory_params | table | no | | +| params | table | no | | + +### is_mandatory_config_set: returns + +| return | type | always | condition | +| ---------------- | ------- | ------ | -------------------------------------------------------- | +| true or false | boolean | yes | if a mandatory configuration parameter is missing or not | + +### is_mandatory_config_set: example + +```lua +-- create a list of mandatory parameters +local mandatory_parameters = { + [1] = "username", + [2] = "password" +} + +-- list of parameters configured by the user +local params = { + username = "John", + address = "localhost", +} + +local result = test_param:is_mandatory_config_set(mandatory_params, params) + +--> result is false because the "password" parameter is not in the list of parameters +``` diff --git a/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0-1.rockspec similarity index 100% rename from stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0.rockspec rename to stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0-1.rockspec diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0-1.rockspec similarity index 100% rename from stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0.rockspec rename to stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0-1.rockspec diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec new file mode 100644 index 00000000000..e5b4af8aa13 --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer.lua"] = "modules/centreon-stream-connectors-lib/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/tests/acknowledgement_stream_connector.lua b/stream-connectors/modules/tests/acknowledgement_stream_connector.lua new file mode 100644 index 00000000000..d965a0fca31 --- /dev/null +++ b/stream-connectors/modules/tests/acknowledgement_stream_connector.lua @@ -0,0 +1,208 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "acknowledgement" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + for i, v in pairs(self.sc_event.event) do + self.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + end + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.host.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + my_author = self.sc_event.event.author, + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + -- for i, v in pairs(queue.sc_event.event) do + -- queue.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/downtime_stream_connector.lua b/stream-connectors/modules/tests/downtime_stream_connector.lua new file mode 100644 index 00000000000..b81beb0fe30 --- /dev/null +++ b/stream-connectors/modules/tests/downtime_stream_connector.lua @@ -0,0 +1,210 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "downtime" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- for i, v in pairs(self.sc_event.event) do + -- self.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.host.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.type][self.sc_event.event.state], + my_author = self.sc_event.event.author, + my_start_time = self.sc_event.event.actual_start_time, + my_end_time = self.sc_event.event.actual_end_time, + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + -- for i, v in pairs(queue.sc_event.event) do + -- queue.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/kafka_test_connexion.lua b/stream-connectors/modules/tests/kafka_test_connexion.lua new file mode 100644 index 00000000000..9f0186916cb --- /dev/null +++ b/stream-connectors/modules/tests/kafka_test_connexion.lua @@ -0,0 +1,45 @@ +----- START OF PARAMETERS ------- + +-- put your kafka broker address {"host1:port", "host2:port"} +local BROKERS_ADDRESS = { "hhhhhhh:pppp" } +-- change topic depending on your needs +local TOPIC_NAME = "centreon" + +local config = require 'rdkafka.config'.create() + +-- set up your configuration. List of parameters there : https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +config["security.protocol"] = "sasl_plaintext" +config["sasl.mechanisms"] = "PLAIN" +config["sasl.username"] = "xxxxx" +config["sasl.password"] = "yyyyyyyy" +config["statistics.interval.ms"] = "1000" + +-- this is the message you want to send to kafka +local message = "This is a test message" + +------ END OF PARAMETERS --------- + + + +config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) +config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + +local producer = require 'rdkafka.producer'.create(config) + +for k, v in pairs(BROKERS_ADDRESS) do + producer:brokers_add(v) +end + +local topic_config = require 'rdkafka.topic_config'.create() +topic_config["auto.commit.enable"] = "true" + +local topic = require 'rdkafka.topic'.create(producer, TOPIC_NAME, topic_config) + +local KAFKA_PARTITION_UA = -1 + +producer:produce(topic, KAFKA_PARTITION_UA, message) + + +while producer:outq_len() ~= 0 do + producer:poll(10) +end \ No newline at end of file From 7ed5aac80902665685cadea0c57f9c00fd8549de Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 May 2021 09:55:02 +0200 Subject: [PATCH 059/219] fix missing standard param (#49) --- .../modules/centreon-stream-connectors-lib/sc_params.lua | 4 +++- stream-connectors/modules/docs/sc_param.md | 2 ++ stream-connectors/modules/tests/kafka_test_connexion.lua | 8 ++++---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 6c93edc13d2..dc6463011cb 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -36,7 +36,9 @@ function sc_params.new(common, logger) service_status = "0,1,2,3", -- = ok, warning, critical, unknown, ba_status = "0,1,2", -- = ok, warning, critical ack_host_status = "", -- will use host_status if empty - ack_service_status = "", -- wil use service_status if empty + ack_service_status = "", -- will use service_status if empty + dt_host_status = "", -- will use host_status if empty + dt_service_status = "", -- will use service_status if empty -- filter state type hard_only = 1, diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index d47fe75eaa4..624d7337067 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -49,6 +49,8 @@ The sc_param module provides methods to help you handle parameters for your stre | host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | | ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | | ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | +| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | | enable_host_status_dedup | number | 0 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | | enable_service_status_dedup | number | 0 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | | accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | diff --git a/stream-connectors/modules/tests/kafka_test_connexion.lua b/stream-connectors/modules/tests/kafka_test_connexion.lua index 9f0186916cb..e640f4dd681 100644 --- a/stream-connectors/modules/tests/kafka_test_connexion.lua +++ b/stream-connectors/modules/tests/kafka_test_connexion.lua @@ -5,7 +5,7 @@ local BROKERS_ADDRESS = { "hhhhhhh:pppp" } -- change topic depending on your needs local TOPIC_NAME = "centreon" -local config = require 'rdkafka.config'.create() +local config = require 'centreon-stream-connectors-lib.rdkafka.config'.create() -- set up your configuration. List of parameters there : https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md config["security.protocol"] = "sasl_plaintext" @@ -24,16 +24,16 @@ local message = "This is a test message" config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) -local producer = require 'rdkafka.producer'.create(config) +local producer = require 'centreon-stream-connectors-lib.rdkafka.producer'.create(config) for k, v in pairs(BROKERS_ADDRESS) do producer:brokers_add(v) end -local topic_config = require 'rdkafka.topic_config'.create() +local topic_config = require 'centreon-stream-connectors-lib.rdkafka.topic_config'.create() topic_config["auto.commit.enable"] = "true" -local topic = require 'rdkafka.topic'.create(producer, TOPIC_NAME, topic_config) +local topic = require 'centreon-stream-connectors-lib.rdkafka.topic'.create(producer, TOPIC_NAME, topic_config) local KAFKA_PARTITION_UA = -1 From 25ca8cb02f469926785f612bbae52b7aa4898c5d Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 May 2021 10:50:43 +0200 Subject: [PATCH 060/219] fix rockspec v1.1.0-2 (#50) --- ...eon-stream-connectors-lib-1.1.0-2.rockspec | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec new file mode 100644 index 00000000000..9f1ea5d95fe --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer.lua"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file From 0a43fe8b72e1cf8beb0b276b6855428dbf67327b Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 May 2021 13:29:00 +0200 Subject: [PATCH 061/219] fix requires in test_kafka script (#51) --- stream-connectors/modules/tests/kafka_test_connexion.lua | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stream-connectors/modules/tests/kafka_test_connexion.lua b/stream-connectors/modules/tests/kafka_test_connexion.lua index e640f4dd681..9bd12b6b9ed 100644 --- a/stream-connectors/modules/tests/kafka_test_connexion.lua +++ b/stream-connectors/modules/tests/kafka_test_connexion.lua @@ -5,7 +5,7 @@ local BROKERS_ADDRESS = { "hhhhhhh:pppp" } -- change topic depending on your needs local TOPIC_NAME = "centreon" -local config = require 'centreon-stream-connectors-lib.rdkafka.config'.create() +local config = require 'centreon-stream-connectors-lib.rdkafka.config'.new() -- set up your configuration. List of parameters there : https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md config["security.protocol"] = "sasl_plaintext" @@ -24,16 +24,16 @@ local message = "This is a test message" config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) -local producer = require 'centreon-stream-connectors-lib.rdkafka.producer'.create(config) +local producer = require 'centreon-stream-connectors-lib.rdkafka.producer'.new(config) for k, v in pairs(BROKERS_ADDRESS) do producer:brokers_add(v) end -local topic_config = require 'centreon-stream-connectors-lib.rdkafka.topic_config'.create() +local topic_config = require 'centreon-stream-connectors-lib.rdkafka.topic_config'.new() topic_config["auto.commit.enable"] = "true" -local topic = require 'centreon-stream-connectors-lib.rdkafka.topic'.create(producer, TOPIC_NAME, topic_config) +local topic = require 'centreon-stream-connectors-lib.rdkafka.topic'.new(producer, TOPIC_NAME, topic_config) local KAFKA_PARTITION_UA = -1 From ff5ff5ddef1d570ed1aa6cdca39cbc0ff7ad4736 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 May 2021 23:07:44 +0200 Subject: [PATCH 062/219] fix rockspec (#52) * fix requires in test_kafka script * fix rockspec again --- ...eon-stream-connectors-lib-1.1.0-2.rockspec | 2 +- ...eon-stream-connectors-lib-1.1.0-3.rockspec | 33 +++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec index 9f1ea5d95fe..a6c4ed1296f 100644 --- a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec @@ -26,7 +26,7 @@ build = { ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", - ["centreon-stream-connectors-lib.rdkafka.producer.lua"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" } diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec new file mode 100644 index 00000000000..34cae90a9e7 --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-3" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-3" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file From 165c43e73babd8134e44e70e98ded4674c6e1dbc Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 14 Jun 2021 22:42:22 +0200 Subject: [PATCH 063/219] prepare 1.2 release add new stream connectors * fix requires in test_kafka script * fix rockspec again * put mandatory param in std param list * organize spec files * new method for url param * fix spec file 1.1.0-2 * conflicting * solving conflict * add a new loglevel (info) * smarter url encoding * fix mandatory params * start bq_table module * start bigquery module * create google oauth module * start bigquery stream connector * create a macro module * add time related params * change severity in cache to match macro system * prepare spec file for 1.2 release * add time params in the doc * force broker api v2 * document google oauth module * improve bq_tables * minor refacto on new modules * improve google modules * better cache and skip_anon + small fixes * add bigquery module documentation * add logging in sc_macros module * remove unnecessary require in sc_macros module * enable event dedup by default * add sc_macros documentation * add new broker event data structures * better method naming in sc_macros module * index new documentations in readme * git conflicting * add service now api v2 * bigquery stream connector * remove sleep in stream connectors --- .../bsm/bsm_connector-apiv1.lua | 3 +- .../elasticsearch/elastic-metrics-apiv1.lua | 8 +- .../google/google-bigquery-apiv2.lua | 409 ++++++++++++++ .../influxdb/influxdb-metrics-apiv1.lua | 16 +- .../centreon-certified/kafka/kafka-apiv2.lua | 271 +++++++++ .../opsgenie/opsgenie-apiv1.lua | 6 +- .../pagerduty/pagerduty-apiv1.lua | 3 +- .../prometheus/prometheus-gateway-apiv1.lua | 3 +- .../servicenow/servicenow-apiv1.lua | 3 +- .../servicenow/servicenow-apiv2.lua | 427 ++++++++++++++ .../google/auth/oauth.lua | 290 ++++++++++ .../google/bigquery/bigquery.lua | 196 +++++++ .../sc_broker.lua | 2 + .../sc_common.lua | 26 + .../sc_event.lua | 58 +- .../sc_logger.lua | 6 + .../sc_macros.lua | 370 +++++++++++++ .../sc_params.lua | 13 +- stream-connectors/modules/docs/README.md | 61 +- .../modules/docs/broker_data_structure.md | 97 ++++ .../modules/docs/google/auth/oauth.md | 217 ++++++++ .../modules/docs/google/bigquery/bigquery.md | 334 +++++++++++ stream-connectors/modules/docs/sc_broker.md | 2 +- stream-connectors/modules/docs/sc_common.md | 36 +- stream-connectors/modules/docs/sc_event.md | 8 +- stream-connectors/modules/docs/sc_logger.md | 18 + stream-connectors/modules/docs/sc_macros.md | 519 ++++++++++++++++++ stream-connectors/modules/docs/sc_param.md | 73 ++- ...eon-stream-connectors-lib-1.1.0-1.rockspec | 2 +- ...eon-stream-connectors-lib-1.1.0-2.rockspec | 33 ++ ...eon-stream-connectors-lib-1.1.0-3.rockspec | 33 ++ ...eon-stream-connectors-lib-1.2.0-1.rockspec | 38 ++ 32 files changed, 3502 insertions(+), 79 deletions(-) create mode 100644 stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua create mode 100644 stream-connectors/centreon-certified/kafka/kafka-apiv2.lua create mode 100644 stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua create mode 100644 stream-connectors/modules/docs/google/auth/oauth.md create mode 100644 stream-connectors/modules/docs/google/bigquery/bigquery.md create mode 100644 stream-connectors/modules/docs/sc_macros.md rename stream-connectors/modules/specs/{1.0.x => 1.1.x}/centreon-stream-connectors-lib-1.1.0-1.rockspec (93%) create mode 100644 stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec create mode 100644 stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec create mode 100644 stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua index fb16e0f36dc..1fe32338664 100644 --- a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua @@ -301,8 +301,7 @@ function write(e) -- Then we check whether the event queue is already full if (#queue.events >= queue.max_buffer_size) then - broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, flushing data after a 1s sleep.") - os.execute("sleep " .. tonumber(1)) + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, flushing data.") return queue:flush() end diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua index 2cd3bec4b21..46ee6edeec5 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua @@ -11,8 +11,8 @@ local elastic = { local function check_index(socket) -- Ask for the index socket:write('GET /centreon/_mapping?pretty HTTP/1.1\r\nHost: ' - .. elastic.address .. ':' .. elastic.port - .. '\r\nAccept: */*\r\n\r\n') + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\n\r\n') local answer = socket:read() if string.match(answer, "HTTP/1.1 200 OK") then return true @@ -30,8 +30,8 @@ local function init_index(socket) broker_log:info(1, "init_index") -- Initialize the index local header = 'PUT /centreon?pretty HTTP/1.1\r\nHost: ' - .. elastic.address .. ':' .. elastic.port - .. '\r\nAccept: */*\r\nContent-Type: application/json\r\n' + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\nContent-Type: application/json\r\n' local content = [[{ "mappings": { "metrics": { diff --git a/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua b/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua new file mode 100644 index 00000000000..5436a5eb42a --- /dev/null +++ b/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua @@ -0,0 +1,409 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_oauth = require("centreon-stream-connectors-lib.google.auth.oauth") +local sc_bq = require("centreon-stream-connectors-lib.google.bigquery.bigquery") +local curl = require("cURL") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + [1] = "dataset", + [2] = "key_file_path", + [3] = "api_key", + [4] = "scope_list" + } + + -- initiate EventQueue variables + self.events = { + [1] = {}, + [6] = {} + } + + self.events[1] = { + [1] = {}, + [6] = {}, + [14] = {}, + [24] = {} + } + + self.events[6] = { + [1] = {} + } + + self.flush = { + [1] = {}, + [6] = {} + } + + self.flush[1] = { + [1] = function () return self:flush_ack() end, + [6] = function () return self:flush_dt() end, + [14] = function () return self:flush_host() end, + [24] = function () return self:flush_service() end + } + + self.flush[6] = { + [1] = function () return self:flush_ba() end + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + params.accepted_categories = "neb,bam" + params.accepted_elements = "host_status,service_status,downtime,acknowledgement,ba_status" + self.sc_params.params.proxy_address = params.proxy_address + self.sc_params.params.proxy_port = params.proxy_port + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_params.params.__internal_ts_host_last_flush = os.time() + self.sc_params.params.__internal_ts_service_last_flush = os.time() + self.sc_params.params.__internal_ts_ack_last_flush = os.time() + self.sc_params.params.__internal_ts_dt_last_flush = os.time() + self.sc_params.params.__internal_ts_ba_last_flush = os.time() + + self.sc_params.params.host_table = params.host_table or "hosts" + self.sc_params.params.service_table = params.service_table or "services" + self.sc_params.params.ack_table = params.ack_table or "acknowledgements" + self.sc_params.params.downtime_table = params.downtime_table or "downtimes" + self.sc_params.params.ba_table = params.ba_table or "bas" + self.sc_params.params._sc_gbq_use_default_schemas = 1 + + self.sc_params.params.google_bq_api_url = params.google_bq_api_url or "https://content-bigquery.googleapis.com/bigquery/v2" + + self.sc_macros = sc_macros.new(self.sc_common, self.sc_params.params, self.sc_logger) + self.sc_oauth = sc_oauth.new(self.sc_params.params, self.sc_common, self.sc_logger) -- , self.sc_common, self.sc_logger) + self.sc_bq = sc_bq.new(self.sc_common, self.sc_params.params, self.sc_logger) + self.sc_bq:get_tables_schema() + + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + + self.sc_event.event.formated_event = {} + self.sc_event.event.formated_event.json = {} + + for column, value in pairs(self.sc_bq.schemas[self.sc_event.event.category][self.sc_event.event.element]) do + self.sc_event.event.formated_event.json[column] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + self.events[category][element][#self.events[category][element] + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored host events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_host () + self.sc_logger:debug("EventQueue:flush: Concatenating all the host events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.host_table) + + -- reset stored events list + self.events[1][14] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_host_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored host events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_service () + self.sc_logger:debug("EventQueue:flush: Concatenating all the service events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.service_table) + + -- reset stored events list + self.events[1][24] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_service_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored ack events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_ack () + self.sc_logger:debug("EventQueue:flush: Concatenating all the ack events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.ack_table) + + -- reset stored events list + self.events[1][1] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_ack_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored downtime events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_dt () + self.sc_logger:debug("EventQueue:flush: Concatenating all the downtime events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.downtime_table) + + -- reset stored events list + self.events[1][6] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_dt_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored BA events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_ba () + self.sc_logger:debug("EventQueue:flush: Concatenating all the BA events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.ba_table) + + -- reset stored events list + self.events[6][1] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_ba_last_flush = os.time() + + return retval +end + +function EventQueue:flush_old_queues() + local current_time = os.time() + + -- flush old ack events + if #self.events[1][1] > 0 and os.time() - self.sc_params.params.__internal_ts_ack_last_flush > self.sc_params.params.max_buffer_age then + self:flush_ack() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ack_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old downtime events + if #self.events[1][6] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then + self:flush_dt() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_dt_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old host events + if #self.events[1][14] > 0 and os.time() - self.sc_params.params.__internal_ts_host_last_flush > self.sc_params.params.max_buffer_age then + self:flush_host() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_host_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old service events + if #self.events[1][24] > 0 and os.time() - self.sc_params.params.__internal_ts_service_last_flush > self.sc_params.params.max_buffer_age then + self:flush_service() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_service_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old BA events + if #self.events[6][1] > 0 and os.time() - self.sc_params.params.__internal_ts_ba_last_flush > self.sc_params.params.max_buffer_age then + self:flush_ba() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ba_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data (table_name) + local data = { + rows = {} + } + + -- concatenate all stored event in the data variable + for index, formated_event in ipairs(self.events[self.sc_event.event.category][self.sc_event.event.element]) do + data.rows[index] = formated_event + end + + self.sc_logger:info("EventQueue:send_data: creating json: " .. tostring(broker.json_encode(data))) + + -- output data to the tool we want + if self:call(broker.json_encode(data), table_name) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data, table_name) + local res = "" + local headers = { + "Authorization: Bearer " .. self.sc_oauth:get_access_token(), + "Content-Type: application/json" + } + local url = self.sc_params.params.google_bq_api_url .. "/projects/" .. self.sc_oauth.key_table.project_id .. "/datasets/" + .. self.sc_params.params.dataset .. "/tables/" .. table_name .. "/insertAll?alt=json&key=" .. self.sc_params.params.api_key + + -- initiate curl + local request = curl.easy() + :setopt_url(url) + :setopt_writefunction(function (response) + res = res .. response + end) + + -- add postfields url params + if data then + request:setopt_postfields(data) + end + + self.sc_logger:info("[EventQueue:call]: URL: " .. tostring(url)) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= "" and self.sc_params.params.proxy_address) then + if (self.sc_params.params.proxy_port ~= "" and self.sc_params.params.proxy_port) then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:call]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '' and self.sc_params.params.proxy_username) then + if (self.sc_params.params.proxy_password ~= '' and self.sc_params.params.proxy_username) then + request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:call]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- set up headers + request:setopt(curl.OPT_HTTPHEADER, headers) + + -- run query + request:perform() + self.sc_logger:info("EventQueue:call: sending data: " .. tostring(data)) + + local code = request:getinfo(curl.INFO_RESPONSE_CODE) + + if code ~= 200 then + self.sc_logger:error("[EventQueue:call]: http code is: " .. tostring(code) .. ". Result is: " ..tostring(res)) + end + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + queue:flush_old_queues() + + -- Then we check that the event queue is not already full + if (#queue.events[queue.sc_event.event.category][queue.sc_event.event.element] >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events[queue.sc_event.event.category][queue.sc_event.event.element] .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + queue.flush[queue.sc_event.event.category][queue.sc_event.event.element]() + end + + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events[queue.sc_event.event.category][queue.sc_event.event.element] >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events[queue.sc_event.event.category][queue.sc_event.event.element] .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + queue.flush[queue.sc_event.event.category][queue.sc_event.event.element]() + end + + return true +end diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua index ccc86c6dae2..d001c76e9a3 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua @@ -35,14 +35,14 @@ local ltn12 = require("ltn12") -------------------------------------------------------------------------------- local event_queue = { - __internal_ts_last_flush = nil, - http_server_address = "", - http_server_port = 8086, - http_server_protocol = "http", - events = {}, - influx_database = "mydb", - max_buffer_size = 5000, - max_buffer_age = 5 + __internal_ts_last_flush = nil, + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "http", + events = {}, + influx_database = "mydb", + max_buffer_size = 5000, + max_buffer_age = 5 } -- Constructor: event_queue:new diff --git a/stream-connectors/centreon-certified/kafka/kafka-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-apiv2.lua new file mode 100644 index 00000000000..d1d4862e9d6 --- /dev/null +++ b/stream-connectors/centreon-certified/kafka/kafka-apiv2.lua @@ -0,0 +1,271 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local kafka_config = require("centreon-stream-connectors-lib.rdkafka.config") +local kafka_producer = require("centreon-stream-connectors-lib.rdkafka.producer") +local kafka_topic_config = require("centreon-stream-connectors-lib.rdkafka.topic_config") +local kafka_topic = require("centreon-stream-connectors-lib.rdkafka.topic") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- listing madantory parameters + local mandatory_parameters = { + [1] = "topic", + [2] = "brokers" + } + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/kafka-stream-connector.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + self.sc_kafka_config = kafka_config.new() + self.sc_kafka_topic_config = kafka_topic_config.new() + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.kafka_partition_ua = -1 + self.sc_params.params.topic = params.topic + self.sc_params.params.brokers = params.brokers + self.sc_params.params.centreon_name = params.centreon_name + + -- overriding default parameters for this stream connector + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- handle kafka params + self.sc_params:get_kafka_params(self.sc_kafka_config, params) + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_kafka_config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) + self.sc_kafka_config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + -- initiate a kafka producer + self.sc_kafka_producer = kafka_producer.new(self.sc_kafka_config) + + -- add kafka brokers to the producer + local kafka_brokers = self.sc_common:split(self.sc_params.params.brokers, ',') + for index, broker in ipairs(kafka_brokers) do + self.sc_kafka_producer:brokers_add(broker) + end + + -- add kafka topic config + self.sc_kafka_topic_config["auto.commit.enable"] = "true" + self.sc_kafka_topic = kafka_topic.new(self.sc_kafka_producer, self.sc_params.params.topic, self.sc_kafka_topic_config) + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + if category == 1 and element == 14 then + self.sc_event.event.formated_event = self:format_host_status() + elseif category == 1 and element == 24 then + self.sc_event.event.formated_event = self:format_service_status() + elseif category == 1 and element == 5 then + self.sc_event.event.formated_event = self:format_downtime() + elseif category == 6 and element == 1 then + self.sc_event.event.formated_event = self:format_ba_status() + end + + self:add() + + return true +end + +function EventQueue:format_host_status() + local data = { + ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) + .. ";" .. self.sc_event.event.cache.host.name .. ";-", + ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name, + ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.alerte_message"] = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), + ["alerte.alerte_id"] = tostring(self.sc_params.params.centreon_name) + .. ";" .. self.sc_event.event.cache.host.name .. ";", + ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.custom_data.ticket_description"] = "", + ["alerte.custom_data.ticket_note"] = "" + } + + return data +end + +function EventQueue:format_service_status() + local data = { + ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) + .. ";" .. self.sc_event.event.cache.host.name .. ";" .. self.sc_event.event.cache.service.description, + ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name .. "_" .. self.sc_event.event.cache.service.description, + ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.alerte_message"] = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), + ["alerte.alerte_id"] = tostring(self.sc_params.params.centreon_name) + .. ";" .. self.sc_event.event.cache.host.name .. ";" .. self.sc_event.event.cache.service.description, + ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.custom_data.ticket_description"] = "", + ["alerte.custom_data.ticket_note"] = "" + } + + return data +end + +function EventQueue:format_ba_status() + local data = { + ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) .. ";Business Activity" .. self.sc_event.event.cache.ba.ba_name, + ["alerte.alerte_libelle"] = "Business_Activity_" .. self.sc_event.event.cache.ba.ba_name, + ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.alerte_message"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.alerte_id"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["alerte.custom_data.ticket_description"] = "", + ["alerte.custom_data.ticket_note"] = "" + } + + return data +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + self.sc_kafka_producer:produce(self.sc_kafka_topic, self.sc_params.params.kafka_partition_ua, data) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua index cb6655b58ad..3a360ee53dc 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua @@ -807,12 +807,13 @@ local queue -------------------------------------------------------------------------------- function init (parameters) logfile = parameters.logfile or "/var/log/centreon-broker/connector-opsgenie.log" + log_level = parameters.log_level or 1 if not parameters.app_api_token or not parameters.integration_api_token then broker_log:error(1,'Required parameters are: api_token. There type must be string') end - broker_log:set_parameters(1, logfile) + broker_log:set_parameters(log_level, logfile) broker_log:info(1, "Parameters") for i,v in pairs(parameters) do if i == 'app_api_token' or i == 'integration_api_token' then @@ -977,8 +978,7 @@ function write (event) -- Then we check that the event queue is not already full if (#queue.events >= queue.max_buffer_size) then - broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") - os.execute("sleep " .. tonumber(1)) + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") queue:flush() end diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua index b546a53f3b9..546cfcb2882 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -391,8 +391,7 @@ function write(e) -- Then we check that the event queue is not already full if (#queue.events >= queue.max_buffer_size) then - broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") - os.execute("sleep " .. tonumber(1)) + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") return queue:flush() end diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua index 07f947705e1..3fe09e6c3fc 100644 --- a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -918,8 +918,7 @@ function write (event) -- Then we check that the event queue is not already full if (#queue.events >= queue.max_buffer_size) then - broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") - os.execute("sleep " .. tonumber(1)) + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") queue:flush() end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua index b0b06fa3082..10cd3c44076 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua @@ -838,8 +838,7 @@ function write (event) -- Then we check that the event queue is not already full if (#queue.events >= queue.max_buffer_size) then - broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") - os.execute("sleep " .. tonumber(1)) + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") queue:flush() end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua new file mode 100644 index 00000000000..5e70795384e --- /dev/null +++ b/stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua @@ -0,0 +1,427 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html +-------------------------------------------------------------------------------- + + +-- libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (params) + local self = {} + local mandatory_parameters = { + [1] = "instance", + [2] = "client_id", + [3] = "client_secret", + [4] = "username", + [5] = "password" + } + + self.tokens = {} + self.tokens.authToken = nil + self.tokens.refreshToken = nil + + + self.events = {} + self.fail = false + + local logfile = params.logfile or "/var/log/centreon-broker/servicenow-stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + self.sc_params.params.instance = params.instance + self.sc_params.params.client_id = params.client_id + self.sc_params.params.client_secret = params.client_secret + self.sc_params.params.username = params.username + self.sc_params.params.password = params.password + self.sc_params.params.proxy_address = params.proxy_address or '' + self.sc_params.params.proxy_port = params.proxy_port or '' + self.sc_params.params.proxy_username = params.proxy_username or '' + self.sc_params.params.proxy_password = params.proxy_password or '' + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + setmetatable(self, { __index = EventQueue }) + + return self +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + + return self.tokens.authToken.token +end + +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + + self.tokens.refreshToken = { + token = res.refresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password .. "&refresh_token=" .. token + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.tokens.refreshToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.tokens.authToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (url, method, data, authToken) + method = method or "GET" + data = data or nil + authToken = authToken or nil + + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. ".service-now.com/" .. tostring(url) + self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + + self.sc_logger:debug("EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + if not authToken then + if method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) + end + else + broker_log:info(3, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + ) + end + + if method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add post data") + request:setopt_postfields(data) + end + + self.sc_logger:debug("EventQueue:call: request body " .. tostring(data)) + self.sc_logger:debug("EventQueue:call: request header " .. tostring(authToken)) + self.sc_logger:warning("EventQueue:call: Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + self.sc_logger:debug("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:debug("EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + self.sc_logger:error("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:error("EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + self.sc_logger:warning("EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + + +function EventQueue:format_event() + self.sc_event.event.formated_event = { + source = "centreon", + event_class = "centreon", + severity = 5, + node = self.sc_event.event.cache.host.name, + time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), + description = self.sc_event.event.output + } + + if self.sc_event.event.element == 14 then + + self.sc_event.event.formated_event.resource = self.sc_event.event.cache.host.name + self.sc_event.event.formated_event.severity = self.sc_event.event.state + + elseif self.sc_event.event.element == 24 then + self.sc_event.event.formated_event.resource = self.sc_event.event.cache.service.description + if self.sc_event.event.state == 0 then + self.sc_event.event.formated_event.severity = 0 + elseif self.sc_event.event.state == 1 then + self.sc_event.event.formated_event.severity = 3 + elseif self.sc_event.event.state == 2 then + self.sc_event.event.formated_event.severity = 1 + elseif self.sc_event.event.state == 3 then + self.sc_event.event.formated_event.severity = 4 + end + end + + self:add() +end + + + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} eventData, the data related to the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add () + self.events[#self.events + 1] = self.sc_event.event.formated_event + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + self:send_data() + + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = '' + local authToken = self:getAuthToken() + local counter = 0 + + for _, raw_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(raw_event) + counter = counter + 1 + else + data = data .. ',' .. broker.json_encode(raw_event) + end + end + + data = '{"records":[' .. data .. ']}' + self.sc_logger:notice('EventQueue:send_data: creating json: ' .. data) + + if self:call( + "api/global/em/jsonv2", + "POST", + data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:warning("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:warning("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + queue:flush() + end + + -- adding event to the queue + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:warning( "write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end + diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua b/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua new file mode 100644 index 00000000000..af8ef79b3e0 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua @@ -0,0 +1,290 @@ +#!/usr/bin/lua + +--- +-- oauth module for google oauth +-- @module oauth +-- @alias oauth +local oauth = {} + +local mime = require("mime") +local crypto = require("crypto") +local curl = require("cURL") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + + + +local OAuth = {} + +--- oauth constructor +-- @param params (table) the table of all the stream connector parameters +-- @param sc_common (object) a sc_common object instance +-- @param sc_logger (object) a sc_logger object instance +function oauth.new(params, sc_common, sc_logger) + local self = {} + + -- initiate stream connector logger + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new("/var/log/centreon-broker/gbq.log", 3) + end + self.sc_common = sc_common + + -- load stream connector parameters + self.params = params + + -- initiate standard params for google oauth + self.jwt_info = { + scope = params.scope_list, + api_key = params.api_key, + key_file = params.key_file_path, + hash_protocol = "sha256WithRSAEncryption", + jwt_header = {} + } + + -- put jwt header in params to be able to override them if needed + self.jwt_info.jwt_header = { + alg = "RS256", + typ = "JWT" + } + + setmetatable(self, { __index = OAuth }) + return self +end + +--- create_jwt_token: create a jwt token +-- @return false (boolean) if we can't open the key file nor create the claim nor the signature +-- @return true (boolean) if the jwt token has been successfully created +function OAuth:create_jwt_token() + + -- retrieve information that are in the key file + if not self:get_key_file() then + self.sc_logger:error("[google.auth.oauth:create_jwt]: an error occured while getting file: " + .. tostring(self.jwt_info.key_file)) + + return false + end + + -- b64 encoded json of the jwt_header + -- local jwt_header = mime.b64(broker.json_encode(self.jwt_info.jwt_header)) + local jwt_header = mime.b64(broker.json_encode(self.jwt_info.jwt_header)) + + -- build the claim part of the jwt + if not self:create_jwt_claim() then + self.sc_logger:error("[google.auth.oauth:create_jwt]: an error occured while creating the jwt claim") + + return false + end + + -- b64 encoded json of the jwt_claim + local jwt_claim = mime.b64(broker.json_encode(self.jwt_claim)) + + local string_to_sign = jwt_header .. "." .. jwt_claim + + -- sign our jwt_header and claim + if not self:create_signature(string_to_sign) then + self.sc_logger:error("[google.auth.oauth:create_jwt]: couldn't sign the concatenation of" + .. " the JWT header and the JWT claim.") + + return false + end + + -- create our jwt_token using the signature + self.jwt_token = string_to_sign .. "." .. mime.b64(self.signature) + + return true +end + +--- get_key_file: open the key file and store information in self.key_table +-- @return false (boolean) if the key file is not found or it is not a valid json file +-- @return true (boolean) if the information from the key file has been successfully loaded in self.key_table +function OAuth:get_key_file() + local file = io.open(self.jwt_info.key_file, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[google.auth.oauth:get_key_file]: couldn't open file " + .. tostring(self.jwt_info.key_file) .. ". Make sure your key file is there.") + return false + end + + local file_content = file:read("*a") + io.close(file) + + local key_table = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if (type(key_table) ~= "table") then + self.sc_logger:error("[google.auth.oauth:get_key_file]: the key file " + .. tostring(self.jwt_info.key_file) .. ". Is not a valid json file.") + return false + end + + self.key_table = key_table + return true +end + +--- create_jwt_claim: create the claim for the jwt token using information from the key table +-- @return false (boolean) if a mandatory information is missing in the key file. +-- @return true (boolean) if the claim has been successfully created +function OAuth:create_jwt_claim() + -- return false if there is a missing parameter in the key table + if + not self.key_table.client_email or + not self.key_table.auth_uri or + not self.key_table.token_uri or + not self.key_table.private_key or + not self.key_table.project_id + then + self.sc_logger:error("[google.auth.oauth:create_jwt_claim]: one of the following information wasn't found in the key_file:" + .. " client_email, auth_uri, token_uri, private_key or project_id. Make sure that " + .. tostring(self.key_file) .. " is a valid key file.") + return false + end + + -- jwt claim time to live + local iat = os.time() + self.jwt_expiration_date = iat + 3600 + + -- create jwt_claim table + self.jwt_claim = { + iss = self.key_table.client_email, + aud = self.key_table.token_uri, + scope = self.jwt_info.scope, + iat = iat, + exp = self.jwt_expiration_date + } + + return true +end + +--- create_signature: sign a string using the hash protocol provided by the user in the hash_protocol parameter +-- @param string_to_sign (string) the string that must be signed +-- @return false (boolean) if the key object is not created using the private key from the key file or if the sign operation failed +-- @return true (boolean) if the string has been successfully signed +function OAuth:create_signature(string_to_sign) + -- create a pkey object + local private_key_object = crypto.pkey.from_pem(self.key_table.private_key, true) + + -- return if the pkey object is not valid + if not private_key_object then + self.sc_logger:error("[google.auth.oauth:create_signature]: couldn't create private key object using crypto lib and" + .. " private key from key file " .. tostring(self.jwt_info.key_file)) + + return false + end + + -- sign the string + local signature = crypto.sign(self.jwt_info.hash_protocol, string_to_sign, private_key_object) + + -- return if string is not signed + if not signature then + self.sc_logger:error("[google.auth.oauth:create_signature]: couldn't sign string using crypto lib and the hash protocol: " + .. tostring(self.jwt_info.hash_protocol)) + + return false + end + + self.signature = signature + return true +end + +--- get_access_token: get an access token using the jwt token +-- @return false (boolean) if a jwt token needs to be generated and the operation fails or if we can't get access token from google api +-- @return access_token (string) the access token from google api +function OAuth:get_access_token() + + -- check if it is really needed to generate a new access_token + if not self.access_token or os.time() > self.jwt_expiration_date - 60 then + self.sc_logger:info("[google.auth.oauth:get_access_token]: no jwt_token found or jwt token expiration date has been reached." + .. " Generating a new JWT token") + + -- generate a new jwt token before asking for an access token + if not self:create_jwt_token() then + self.sc_logger:error("[google.auth.oauth:get_access_token]: couldn't generate a new JWT token.") + return false + end + else + -- an already valid access_token exist, give this one instead of a new one + return self.access_token + end + + local headers = { + 'Content-Type: application/x-www-form-urlencoded' + } + + self.sc_logger:info("[google.auth.oauth:get_access_token]: sending jwt token " .. tostring(self.jwt_token)) + + local data = { + grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer", + assertion = self.jwt_token + } + + -- ask google api for an access token + local result = broker.json_decode(self:curl_google(self.key_table.token_uri, headers, self.sc_common:generate_postfield_param_string(data))) + + -- return false if we didn't get an access token + if not result or not result.access_token then + self.sc_logger:error("[google.auth.oauth:get_access_token]: couldn't get access token") + return false + end + + self.access_token = result.access_token + return self.access_token +end + +--- curl_google: query google using curl +-- @param url (string) the google api url +-- @param headers (table) the curl http headers +-- @param data (string) [opt] url encoded url parameters +function OAuth:curl_google(url, headers, data) + local res = "" + -- initiate curl + local request = curl.easy() + :setopt_url(url) + :setopt_writefunction(function (response) + res = res .. response + end) + + -- add postfields url params + if data then + request:setopt_postfields(data) + end + + self.sc_logger:info("[google.auth.oauth:curl_google]: URL: " .. tostring(url) .. ". data " .. data) + + -- set proxy address configuration + if (self.params.proxy_address ~= "" and self.params.proxy_address) then + if (self.params.proxy_port ~= "" and self.params.proxy_port) then + request:setopt(curl.OPT_PROXY, self.params.proxy_address .. ':' .. self.params.proxy_port) + else + self.sc_logger:error("[google.auth.oauth:curl_google]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.params.proxy_username ~= '' and self.params.proxy_username) then + if (self.params.proxy_password ~= '' and self.params.proxy_username) then + request:setopt(curl.OPT_PROXYUSERPWD, self.params.proxy_username .. ':' .. self.params.proxy_password) + else + self.sc_logger:error("[google.auth.oauth:curl_google]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- set up headers + request:setopt(curl.OPT_HTTPHEADER, headers) + + -- run query + request:perform() + + local code = request:getinfo(curl.INFO_RESPONSE_CODE) + + if code ~= 200 then + self.sc_logger:error("[google.auth.oauth:curl_google]: http code is: " .. tostring(code) .. ". Result is: " ..tostring(res)) + return false + end + + return res +end + +return oauth \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua new file mode 100644 index 00000000000..c7beb0edd97 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua @@ -0,0 +1,196 @@ +--- +-- bigquery module for google bigquery +-- @module bigquery +-- @alias bigquery +local bigquery = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local BigQuery = {} + +--- module constructor +-- @param params (table) table of all the stream connector parameters +-- @sc_logger (object) instance of the sc_logger module +function bigquery.new(params, sc_logger) + local self = {} + + -- initiate sc_logger + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + -- initiate parameters + self.params = params + + -- initiate bigquery table schema mapping (1 = neb, 6 = bam) + self.schemas = { + [1] = {}, + [6] = {} + } + + setmetatable(self, { __index = BigQuery }) + return self +end + +--- get_tables_schema: load tables schemas according to the stream connector configuration +-- @return true (boolean) +function BigQuery:get_tables_schema() + -- use default schema + if self.params._sc_gbq_use_default_schemas == 1 then + self.schemas[1][14] = self:default_host_table_schema() + self.schemas[1][24] = self:default_service_table_schema() + self.schemas[1][1] = self:default_ack_table_schema() + self.schemas[1][6] = self:default_dt_table_schema() + self.schemas[6][1] = self:default_ba_table_schema() + return true + end + + -- use a configuration file for all the schema + if self.params._sc_gbq_schema_config_file_path == 1 then + if self:load_tables_schema_file() then + return true + end + end + + -- create tables schemas from stream connector configuration itself (not the best idea) + if self.params._sc_gbq_use_default_schemas == 0 and self.params._sc_gbq_use_schema_config_file == 0 then + -- build hosts table schema + self:build_table_schema("^_sc_gbq_host_column_", "_sc_gbq_host_column_", self.schemas[1][14]) + + -- build services table schema + self:build_table_schema("^_sc_gbq_service_column_", "_sc_gbq_service_column_", self.schemas[1][24]) + + -- build ba table schema + self:build_table_schema("^_sc_gbq_ba_column_", "_sc_gbq_ba_column_", self.schemas[6][1]) + + -- build ack table schema + self:build_table_schema("^_sc_gbq_ack_column_", "_sc_gbq_ack_column_", self.schemas[1][1]) + + -- build dowtime table schema + self:build_table_schema("^_sc_gbq_dt_column_", "_sc_gbq_dt_column_", self.schemas[1][6]) + end + + return true +end + +--- build_table_schema: create a table schema using the stream connector tables configuration +-- @param regex (string) the regex that the stream connector param must match in order to identify it as a column name in the table schema +-- @param substract (string) the string that is going to be removed from the parameter name to isolate the name of the column +-- @param structure (table) the schema table in which the column name and value are going to be stored +function BigQuery:build_table_schema(regex, substract, structure) + for param_name, param_value in pairs(self.params) do + if string.find(param_name, regex) ~= nil then + structure[string.gsub(param_name, substract, "")] = param_value + end + end +end + +--- default_host_table_schema: create a standard schema for a host event table +-- @return host_table (table) the table that is going to be used as a schema for bigquery host table +function BigQuery:default_host_table_schema() + return { + host_id = "{host_id}", + host_name = "{cache.host.name}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +end + +--- default_service_table_schema: create a standard schema for a service event table +-- @return service_table (table) the table that is going to be used as a schema for bigquery service table +function BigQuery:default_service_table_schema() + return { + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +end + +--- default_ack_table_schema: create a standard schema for an ack event table +-- @return ack_table (table) the table that is going to be used as a schema for bigquery ack table +function BigQuery:default_ack_table_schema() + return { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + entry_time = "{entry_time}" + } +end + +--- default_dt_table_schema: create a standard schema for a downtime event table +-- @return downtime_table (table) the table that is going to be used as a schema for bigquery downtime table +function BigQuery:default_dt_table_schema() + return { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + actual_start_time = "{actual_start_time}", + actual_end_time = "{deletion_time}" + } +end + +--- default_ba_table_schema: create a standard schema for a BA event table +-- @return ba_table (table) the table that is going to be used as a schema for bigquery BA table +function BigQuery:default_ba_table_schema() + return { + ba_id = "{ba_id}", + ba_name = "{cache.ba.ba_name}", + status = "{state}" + } +end + +--- load_tables_schema_file: load a table schema from a json configuration file +-- @return false (boolean) if we can't open the configuration file or it is not a valid json file +-- @return true (boolean) if everything went fine +function BigQuery:load_tables_schema_file() + local file = io.open(self.params._sc_gbq_schema_config_file_path, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[google.bq.bq_tabmes:load_tables_schema_file]: couldn't open file " + .. tostring(self.params._sc_gbq_schema_config_file_path) .. ". Make sure your table schema file is there.") + return false + end + + local file_content = file:read("*a") + io.close(file) + + local schemas = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if (type(schemas) ~= "table") then + self.sc_logger:error("[google.bq.bq_tabmes:load_tables_schema_file]: the table schema file " + .. tostring(self.params._sc_gbq_schema_config_file_path) .. ". Is not a valid json file.") + return false + end + + -- use default schema if we don't find a schema for a dedicated type of event + self.schemas[1][14] = schemas.host or self:default_host_table_schema() + self.schemas[1][24] = schemas.service or self:default_service_table_schema() + self.schemas[1][1] = schemas.ack or self:default_ack_table_schema() + self.schemas[1][6] = schemas.dt or self:default_dt_table_schema() + self.schemas[6][1] = schemas.ba or self:default_ba_table_schema() + + return true +end + +return bigquery \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua index 3057d0d4748..0fff3b9bd4d 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -14,6 +14,8 @@ local ScBroker = {} function sc_broker.new(logger) local self = {} + broker_api_version = 2 + self.logger = logger if not self.logger then self.logger = sc_logger.new() diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index fe97eb934bb..ddec8981467 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -144,4 +144,30 @@ function ScCommon:compare_numbers(firstNumber, secondNumber, operator) return false end +--- generate_postfield_param_string: convert a table of parameters into an url encoded url parameters string +-- @param params (table) the table of all url string parameters to convert +-- @return false (boolean) if params variable is not a table +-- @return param_string (string) the url encoded parameters string +function ScCommon:generate_postfield_param_string(params) + -- return false because params type is wrong + if (type(params) ~= "table") then + self.logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") + return false + end + + local param_string = "" + + -- concatenate data in params table into a string + for field, value in pairs(params) do + if param_string == "" then + param_string = field .. "=" .. broker.url_encode(value) + else + param_string = param_string .. "&" .. field .. "=" .. broker.url_encode(value) + end + end + + -- return url encoded string + return param_string +end + return sc_common \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 545499dd750..ead7e1e1514 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -239,6 +239,8 @@ function ScEvent:is_valid_host() self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false + elseif (not self.event.cache.host and self.params.skip_anon_events == 0) then + self.event.cache.host.name = self.event.host_id end -- force host name to be its id if no name has been found @@ -272,6 +274,8 @@ function ScEvent:is_valid_service() self.sc_logger:warning("[sc_event:is_valid_service]: Invalid description for service with id: " .. tostring(self.event.service_id) .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false + elseif (not self.event.cache.service and self.params.skip_anon_events == 0) then + self.event.cache.service.description = self.event.service_id end -- force service description to its id if no description has been found @@ -515,11 +519,13 @@ function ScEvent:is_valid_ba() self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". Found BA name is: " .. tostring(self.event.cache.ba.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) return false + elseif (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 0) then + self.event.cache.ba.ba_name = self.event.ba_id end -- force ba name to be its id if no name has been found if not self.event.cache.ba.ba_name then - self.event.cache.ba.ba_name = self.event.cache.ba.ba_name or self.event.ba_id + self.event.cache.ba.ba_name = self.event.ba_id end return true @@ -664,14 +670,19 @@ function ScEvent:is_valid_host_severity() return true end + -- initiate the severity table in the cache if it doesn't exist + if not self.event.cache.severity then + self.event.cache.severity = {} + end + -- get severity of the host from broker cache - self.event.cache.host_severity = self.sc_broker:get_severity(self.event.host_id) + self.event.cache.severity.host = self.sc_broker:get_severity(self.event.host_id) -- return false if host severity doesn't match - if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.host_severity, self.params.host_severity_operator) then + if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.severity.host, self.params.host_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " - .. tostring(self.event.cache.host_severity) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator - .. " to the severity of the host (" .. tostring(self.event.cache.host_severity) .. ")") + .. tostring(self.event.cache.severity.host) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.severity.host) .. ")") return false end @@ -686,14 +697,19 @@ function ScEvent:is_valid_service_severity() return true end + -- initiate the severity table in the cache if it doesn't exist + if not self.event.cache.severity then + self.event.cache.severity = {} + end + -- get severity of the host from broker cache - self.event.cache.service_severity = self.sc_broker:get_severity(self.event.host_id, self.event.service_id) + self.event.cache.severity.service = self.sc_broker:get_severity(self.event.host_id, self.event.service_id) -- return false if service severity doesn't match - if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.service_severity, self.params.service_severity_operator) then + if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.severity.service, self.params.service_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " - .. tostring(self.event.cache.service_severity) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator - .. " to the severity of the host (" .. tostring(self.event.cache.service_severity) .. ")") + .. tostring(self.event.cache.severity.service) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.severity.service) .. ")") return false end @@ -899,6 +915,10 @@ end --- get_downtime_host_status: retrieve the status of a host based on last_time_up/down dates found in cache (self.event.cache.host must be set) -- return status (number) the status code of the host function ScEvent:get_downtime_host_status() + -- if cache is not filled we can't get the state of the host + if not self.event.cache.host.last_time_up or not self.event.cache.host.last_time_down then + return "N/A" + end -- affect the status known dates to their respective status code local timestamp = { @@ -912,6 +932,15 @@ end --- get_downtime_service_status: retrieve the status of a service based on last_time_ok/warning/critical/unknown dates found in cache (self.event.cache.host must be set) -- return status (number) the status code of the service function ScEvent:get_downtime_service_status() + -- if cache is not filled we can't get the state of the service + if + not self.event.cache.host.last_time_ok + or not self.event.cache.host.last_time_warning + or not self.event.cache.service.last_time_critical + or not self.event.cache.service.last_time_unknown + then + return "N/A" + end -- affect the status known dates to their respective status code local timestamp = { @@ -959,6 +988,10 @@ function ScEvent:is_service_status_event_duplicated() if self.event.last_hard_state_change == self.event.last_check then return false end + + return true + --[[ + IT LOOKS LIKE THIS PIECE OF CODE IS USELESS -- map the status known dates to their respective status code local timestamp = { @@ -978,7 +1011,7 @@ function ScEvent:is_service_status_event_duplicated() -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: -- OK(H) --> NOT-OK(S) --> OK(H) - return true + ]]-- end --- is_host_status_event_duplicated: check if the host event is the same than the last one (will not work for UP(H) -> DOWN(S) -> UP(H)) @@ -995,6 +1028,9 @@ function ScEvent:is_host_status_event_duplicated() return false end + return true + --[[ + IT LOOKS LIKE THIS PIECE OF CODE IS USELESS -- map the status known dates to their respective status code local timestamp = { [0] = tonumber(self.event.cache.service.last_time_up), @@ -1012,7 +1048,7 @@ function ScEvent:is_host_status_event_duplicated() -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: -- UP(H) --> NOT-UP(S) --> UP(H) - return true + ]]-- end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua index dc5af962607..f63e9623505 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -76,6 +76,12 @@ function ScLogger:notice(message) broker_log:info(1, message) end +-- info: write an informational message +-- @param message (string) the message that will be written +function ScLogger:info(message) + broker_log:info(2,message) +end + --- debug: write a debug message -- @param message (string) the message that will be written function ScLogger:debug(message) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua new file mode 100644 index 00000000000..cf5629116d4 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -0,0 +1,370 @@ +#!/usr/bin/lua + +--- +-- Module to handle centreon macros (e.g: $HOSTADDRESS$) and sc macros (e.g: {cache.host.address}) +-- @module sc_macros +-- @alias sc_macros +local sc_macros = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +local ScMacros = {} + +--- sc_macros.new: sc_macros constructor +-- @param params (table) the stream connector parameter table +-- @param sc_logger (object) object instance from sc_logger module +function sc_macros.new(params, sc_logger) + local self = {} + + -- initiate mandatory libs + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + -- initiate params + self.params = params + + -- mapping of macro that we will convert if asked + self.transform_macro = { + date = function () return self:transform_date(macro_value) end, + type = function () return self:transform_type(macro_value) end, + short = function () return self:transform_short(macro_value) end, + state = function () return self:transform_state(macro_value, event) end + } + + -- mapping of centreon standard macros to their stream connectors counterparts + self.centreon_macros = { + HOSTNAME = "{cache.host.name}", + HOSTDISPLAYNAME = "{cache.host.name}", + HOSTALIAS = "{cache.host.alias}", + HOSTADDRESS = "{cache.host.address}", + HOSTSTATE = "{cache.host.state_scstate}", + HOSTSTATEID = "{cache.host.state}", + LASTHOSTSTATE = "{cache.host.state_scstate}", + LASTHOSTSTATEID = "{cache.host.state}", + HOSTSTATETYPE = "{cache.host.state_type}", + HOSTATTEMPTS = "{cache.host.check_attempt}", + MAXHOSTATTEMPTS = "{cache.host.max_check_attempts}", + -- HOSTEVENTID doesn't exist + -- LASTHOSTEVENTID doesn't exist + -- HOSTPROBLEMID doesn't exist + -- LASTHOSTPROBLEMID doesn't exist + HOSTLATENCY = "{cache.host.latency}", + HOSTEXECUTIONTIME = "{cache.host.execution_time}", + -- HOSTDURATION doesn't exist + -- HOSTDURATIONSEC doesn't exist + HOSTDOWNTIME = "{cache.host.scheduled_downtime_depth}", + HOSTPERCENTCHANGE = "{percent_state_change}" , -- will be replaced by the service percent_state_change if event is about a service + -- HOSTGROUPNAME doesn't exist + -- HOSTGROUPNAMES doesn't exist + LASTHOSTCHECK = "{cache.host.last_check_value}", + LASTHOSTSTATECHANGE = "{cache.host.last_state_change}", + LASTHOSTUP = "{cache.host.last_time_up}", + LASTHOSTDOWN = "{cache.host.last_time_down}", + LASTHOSTUNREACHABLE = "{cache.host.last_time_unreachable}", + HOSTOUTPUT = "{cache.host.output_scshort}", + HOSTLONGOUTPUT = "{cache.host.output}", + HOSTPERFDATA = "{cache.host.perfdata}", + -- HOSTCHECKCOMMAND doesn't really exist + -- HOSTACKAUTHORS doesn't exist + -- HOSTACKAUTHORNAMES doesn't exist + -- HOSTACKAUTHORALIAS doesn't exist + -- HOSTACKAUTHORCOMMENT doesn't exist + HOSTACTIONURL = "{cache.host.action_url}", + HOSTNOTESURL = "{cache.host.notes_url}", + HOSTNOTES = "{cache.host.notes}", + -- TOTALHOSTSERVICES doesn't exist + -- TOTALHOSTSERVICESOK doesn't exist + -- TOTALHOSTSERVICESWARNING doesn't exist + -- TOTALHOSTSERVICESCRITICAL doesn't exist + -- TOTALHOSTSERVICESUNKNOWN doesn't exist + -- HOSTGROUPALIAS doesn't exist + -- HOSTGROUPMEMBERS doesn't exist + -- HOSTGROUPNOTES  doesn't exist + -- HOSTGROUPNOTESURL doesn't exist + -- HOSTGROUPACTIONURL doesn't exist + SERVICEDESC = "{cache.service.description}", + SERVICEDISPLAYNAME = "{cache.service.display_name}", + SERVICESTATE = "{cache.service.state_scstate}", + SERVICESTATEID = "{cache.service.state}", + LASTSERVICESTATE = "{cache.service.state_state}", + LASTSERVICESTATEID = "{cache.service.state}", + SERVICESTATETYPE = "{cache.service.state_type}", + SERVICEATTEMPT = "{cache.service.check_attempt}", + MAXSERVICEATTEMPTS = "{cache.service.max_check_attempts}", + SERVICEISVOLATILE = "{cache.service.volatile}", + -- SERVICEEVENTID doesn't exist + -- LASTSERVICEEVENTID doesn't exist + -- SERVICEPROBLEMID doesn't exist + -- LASTSERVICEPROBLEMID doesn't exist + SERVICELATENCY = "{cache.service.latency}", + SERVICEEXECUTIONTIME = "{cache.service.execution_time}", + -- SERVICEDURATION doesn't exist + -- SERVICEDURATIONSEC doesn't exist + SERVICEDOWNTIME = "{cache.service.scheduled_downtime_depth}", + SERVICEPERCENTCHANGE = "{percent_state_change}", + -- SERVICEGROUPNAME doesn't exist + -- SERVICEGROUPNAMES doesn't exist + LASTSERVICECHECK = "{cache.service.last_check_value}", + LASTSERVICESTATECHANGE = "{cache.service.last_state_change}", + LASTSERVICEOK = "{cache.service.last_time_ok}", + LASTSERVICEWARNING = "{cache.service.last_time_warning}", + LASTSERVICEUNKNOWN = "{cache.service.last_time_unknown}", + LASTSERVICECRITICAL = "{cache.service.last_time_critical}", + SERVICEOUTPUT = "{cache.service.output_scshort}", + LONGSERVICEOUTPUT = "{cache.service.output}", + SERVICEPERFDATA = "{cache.service.perfdata}", + -- SERVICECHECKCOMMAND doesn't exist + -- SERVICEACKAUTHOR doesn't exist + -- SERVICEACKAUTHORNAME  doesn't exist + -- SERVICEACKAUTHORALIAS doesn't exist + -- SERVICEACKCOMMENT doesn't exist + SERVICEACTIONURL = "{cache.service.action_url}", + SERVICENOTESURL = "{cache.service.notes_url}", + SERVICENOTES = "{cache.service.notes}" + -- SERVICEGROUPALIAS  doesn't exist + -- SERVICEGROUPMEMBERS  doesn't exist + -- SERVICEGROUPNOTES  doesn't exist + -- SERVICEGROUPNOTESURL doesn't exist + -- SERVICEGROUPACTIONURL doesn't exist + -- CONTACTNAME doesn't exist + -- CONTACTALIAS doesn't exist + -- CONTACTEMAIL doesn't exist + -- CONTACTPAGER doesn't exist + -- CONTACTADDRESS doesn't exist + -- CONTACTGROUPALIAS  doesn't exist + -- CONTACTGROUPMEMBERS  doesn't exist + -- TOTALHOSTSUP  doesn't exist + -- TOTALHOSTSDOWN  doesn't exist + -- TOTALHOSTSUNREACHABLE  doesn't exist + -- TOTALHOSTSDOWNUNHANDLED  doesn't exist + -- TOTALHOSTSUNREACHABLEUNHANDLED  doesn't exist + -- TOTALHOSTPROBLEMS  doesn't exist + -- TOTALHOSTPROBLEMSUNHANDLED  doesn't exist + -- TOTALSERVICESOK  doesn't exist + -- TOTALSERVICESWARNING  doesn't exist + -- TOTALSERVICESCRITICAL  doesn't exist + -- TOTALSERVICESUNKNOWN  doesn't exist + -- TOTALSERVICESWARNINGUNHANDLED  doesn't exist + -- TOTALSERVICESCRITICALUNHANDLED  doesn't exist + -- TOTALSERVICESUNKNOWNUNHANDLED  doesn't exist + -- TOTALSERVICEPROBLEMS  doesn't exist + -- TOTALSERVICEPROBLEMSUNHANDLED  doesn't exist + -- NOTIFICATIONTYPE doesn't exist + -- NOTIFICATIONRECIPIENTS doesn't exist + -- NOTIFICATIONISESCALATED doesn't exist + -- NOTIFICATIONAUTHOR doesn't exist + -- NOTIFICATIONAUTHORNAME doesn't exist + -- NOTIFICATIONAUTHORALIAS doesn't exist + -- NOTIFICATIONCOMMENT doesn't exist + -- HOSTNOTIFICATIONNUMBER doesn't exist + -- HOSTNOTIFICATIONID doesn't exist + -- SERVICENOTIFICATIONNUMBER doesn't exist + -- SERVICENOTIFICATIONID doesn't exist + } + + setmetatable(self, { __index = ScMacros }) + return self +end + +--- replace_sc_macro: replace any stream connector macro with it's value +-- @param string (string) the string in which there might be some stream connector macros to replace +-- @param event (table) the current event table +-- @return converted_string (string) the input string but with the macro replaced with their values +function ScMacros:replace_sc_macro(string, event) + local cache_macro_value = false + local event_macro_value = false + local converted_string = string + + -- find all macros for exemple the string: + -- {cache.host.name} is the name of host with id: {host_id} + -- will generate two macros {cache.host.name} and {host_id}) + for macro in string.gmatch(string, "{.*}") do + self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) + + -- check if macro is in the cache + cache_macro_value = self:get_cache_macro(macro, event) + + -- replace all cache macro such as {cache.host.name} with their values + if cache_macro_value then + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a cache macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(cache_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + converted_string = string.gsub(converted_string, macro, cache_macro_value) + else + -- if not in cache, try to find a matching value in the event itself + event_macro_value = self:get_event_macro(macro, event) + + -- replace all event macro such as {host_id} with their values + if event_macro_value then + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is an event macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(event_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + converted_string = string.gsub(converted_string, macro, event_macro_value) + else + self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro") + end + end + end + + return converted_string +end + +--- get_cache_macro: check if the macro is a macro which value must be found in the cache +-- @param macro (string) the macro we want to check (for example: {cache.host.name}) +-- @param event (table) the event table (obivously, cache must be in the event table if we want to find something in it) +-- @return false (boolean) if the macro is not a cache macro ({host_id} instead of {cache.xxxx.yyy} for example) or we can't find the cache type or the macro in the cache +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_cache_macro(macro, event) + + -- try to cut the macro in three parts + local cache, cache_type, macro = string.match(macro, "^{(cache)%.(%w+)%.(.*)}") + + -- if cache is not set, it means that the macro wasn't a cache macro + if not cache then + self.sc_logger:info("[sc_macros:get_cache_macro]: macro: " .. tostring(macro) .. " is not a cache macro") + return false + end + + -- make sure that the type of cache is in the event table (for example event.cache.host must exist if the macro is {cache.host.name}) + if event.cache[cache_type] then + -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag + local macro_value, flag = self:get_transform_flag(macro) + + -- check if the macro is in the cache + if event.cache[cache_type][macro_value] then + if flag then + self.sc_logger:info("[sc_macros:get_cache_macro]: macro has a flag associated. Flag is: " .. tostring(flag) + .. ", a macro value conversion will be done.") + -- convert the found value according to the flag that has been sent + return self.transform_macro[flag](event.cache[cache_type][macro_value], event) + else + -- just return the value if there is no conversion required + return event.cache[cache_type][macro_value] + end + end + end + + return false +end + +--- get_event_macro: check if the macro is a macro which value must be found in the event table (meaning not in the cache) +-- @param macro (string) the macro we want to check (for example: {host_id}) +-- @param event (table) the event table +-- @return false (boolean) if the macro is not found in the event +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_event_macro(macro, event) + -- isolate the name of the macro + macro = string.match(macro, "{(.*)}") + + -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag + local macro_value, flag = self:get_transform_flag(macro) + + -- check if the macro is in the event + if event[macro_value] then + if flag then + self.sc_logger:info("[sc_macros:get_event_macro]: macro has a flag associated. Flag is: " .. tostring(flag) + .. ", a macro value conversion will be done.") + -- convert the found value according to the flag that has been sent + return self.transform_macro[flag](event[macro_value], event) + else + -- just return the value if there is no conversion required + return event[macro_value] + end + end + + return false +end + +--- convert_centreon_macro: replace a centreon macro with its value +-- @param string (string) the string that may contain centreon macros +-- @param event (table) the event table +-- @return converted_string (string) the input string with its macros replaced with their values +function ScMacros:convert_centreon_macro(string, event) + local centreon_macro = false + local sc_macro_value = false + local converted_string = string + + -- get all standard macros + for macro in string.gmatch(string, "$%w$") do + self.sc_logger:debug("[sc_macros:convert_centreon_macro]: found a macro, name is: " .. tostring(macro)) + -- try to find the macro in the mapping table table self.centreon_macro + centreon_macro = self:get_centreon_macro(macro) + + -- if the macro has been found, try to get its value + if centreon_macro then + sc_macro_value = self:replace_sc_macro(centreon_macro, event) + + -- if a value has been found, replace the macro with the value + if sc_macro_value then + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a centreon macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(sc_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + converted_string = string.gsub(converted_string, centreon_macro, sc_macro_value) + end + else + self.sc_logger:error("[sc_macros:convert_centreon_macro]: macro: " .. tostring(macro) .. " is not a valid centreon macro") + end + end + + return converted_string +end + +--- get_centreon_macro: try to find the macro in the centreon_macro mapping table +-- @param macro_name (string) the name of the macro ($HOSTNAME$ for example) +-- @return string (string) the value of the macro +-- @return false (boolean) if the macro is not in the mapping table +function ScMacros:get_centreon_macro(macro_name) + return self.centreon_macro[string.gsub(macro_name, "%$", "")] or false +end + +--- get_transform_flag: check if there is a tranformation flag linked to the macro and separate them +-- @param macro (string) the macro that needs to be checked +-- @return macro_value (string) the macro name ONLY if there is a flag +-- @return flag (string) the flag name if there is one +-- @return macro (string) the original macro if no flag were found +function ScMacros:get_transform_flag(macro) + -- separate macro and flag + local macro_value, flag = string.match(macro, "(.*)_sc(%w+)$") + + -- if there was a flag in the macro name, return the real macro name and its flag + if macro_value then + return macro_value, flag + end + + -- if no flag were found, just return the original macro + return macro +end + +--- transform_date: convert a timestamp macro into a human readable date using the format set in the timestamp_conversion_format parameter +-- @param macro_value (number) the timestamp that needs to be converted +-- @return date (string) the converted timestamp +function ScMacros:transform_date(macro_value) + return os.date(self.params.timestamp_conversion_format, os.time(os.date("!*t", macro_value) + self.params.local_time_diff_from_utc)) +end + +--- transform_short: mostly used to convert the event output into a short output by keeping only the data before the new line +-- @param macro_value (string) the string that needs to be shortened +-- @return string (string) the input string with only the first lne +function ScMacros:transform_short(macro_value) + return string.match(macro_value, "^(.*)\n") +end + +--- transform_type: convert a 0, 1 value into SOFT or HARD +-- @param macro_value (number) the number that indicates a SOFT or HARD state +-- @return string (string) HARD or SOFT +function ScMacros:transform_type(macro_value) + if macro_value == 0 then + return "SOFT" + else + return "HARD" + end +end + +--- transform_state: convert the number that represent the event status with its human readable counterpart +-- @param macro_value (number) the number that represents the status of the event +-- @param event (table) the event table +-- @return string (string) the status of the event in a human readable format (e.g: OK, WARNING) +function ScMacros:transform_state(macro_value, event) + return self.params.status_mapping[event.category][event.element][macro_value] +end + +return sc_macros \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index dc6463011cb..fec7ef2e816 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -61,13 +61,17 @@ function sc_params.new(common, logger) skip_nil_id = 1, -- enable or disable dedup - enable_host_status_dedup = 0, - enable_service_status_dedup = 0, + enable_host_status_dedup = 1, + enable_service_status_dedup = 1, -- communication parameters max_buffer_size = 1, max_buffer_age = 5, + -- time parameters + local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), + timestamp_conversion_format = "%Y-%m-%d %X", -- will print 2021-06-11 10:43:38 + -- internal parameters __internal_ts_last_flush = os.time(), @@ -223,7 +227,7 @@ function ScParams:param_override(user_params) end for param_name, param_value in pairs(user_params) do - if self.params[param_name] or string.find(param_name, "^_sc_kafka_") ~= nil then + if self.params[param_name] or string.find(param_name, "^_sc") ~= nil then self.params[param_name] = param_value self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) else @@ -282,6 +286,9 @@ function ScParams:is_mandatory_config_set(mandatory_params, params) .. " parameter is not set in the stream connector web configuration") return false end + + -- add the mandatory param name in the list of the standard params and set its value to the user provided param value + self.params[mandatory_param] = params[mandatory_param] end return true diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 85cce972989..4e6b949f80b 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -7,16 +7,22 @@ - [sc_broker methods](#sc_broker-methods) - [sc_param methods](#sc_param-methods) - [sc_event methods](#sc_event-methods) + - [sc_macros methods](#sc_macros-methods) + - [google.bigquery.bigquery methods](#googlebigquerybigquery-methods) + - [google.auth.oauth methods](#googleauthoauth-methods) ## Libraries list -| Lib name | Content | Usage | Documentation | -| --------- | ------------------------------------------------ | ------------------------------------------------------------------------- | ----------------------------- | -| sc_common | basic methods for lua | you can use it when you want to simplify your code | [Documentation](sc_common.md) | -| sc_logger | methods that handle logging with centreon broker | When you want to log a message from your stream connector | [Documentation](sc_logger.md) | -| sc_broker | wrapper methods for broker cache | when you need something from the broker cache | [Documentation](sc_broker.md) | -| sc_param | handles parameters for stream connectors | when you want to initiate a stream connector with all standard parameters | [Documentation](sc_param.md) | -| sc_event | methods to help you interact with a broker event | when you to perform a specific action on an event | [Documentation](sc_event.md) | +| Lib name | Content | Usage | Documentation | +| ------------------------ | ------------------------------------------------ | ------------------------------------------------------------------------- | -------------------------------------------- | +| sc_common | basic methods for lua | you can use it when you want to simplify your code | [Documentation](sc_common.md) | +| sc_logger | methods that handle logging with centreon broker | When you want to log a message from your stream connector | [Documentation](sc_logger.md) | +| sc_broker | wrapper methods for broker cache | when you need something from the broker cache | [Documentation](sc_broker.md) | +| sc_param | handles parameters for stream connectors | when you want to initiate a stream connector with all standard parameters | [Documentation](sc_param.md) | +| sc_event | methods to help you interact with a broker event | when you want to check event data | [Documentation](sc_event.md) | +| sc_macros | methods to help you convert macros | when you want to use macros in your stream connector | [Documentation](sc_macros.md) | +| google.bigquery.bigquery | methods to help you handle bigquery data | when you want to generate tables schema for bigquery | [Documentation](google/bigquery/bigquery.md) | +| google.auth.oauth | methods to help you authenticate to google api | when you want to authenticate yourself on the google api | [Documentation](google/auth/oauth.md) | ## sc_common methods @@ -28,6 +34,7 @@ | check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | | split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | | compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | +| generate_postfield_param_string | convert a table of parameters into an url encoded parameters string | [Documentation](sc_common.md#generate_postfield_param_string-method) | ## sc_logger methods @@ -36,6 +43,7 @@ | error | write an error message in the log file | [Documentation](sc_logger.md#error-method) | | warning | write a warning message in the log file | [Documentation](sc_logger.md#warning-method) | | notice | write a notice/info message in the log file | [Documentation](sc_logger.md#notice-method) | +| info | write an info message in the log file | [Documentation](sc_logger.md#info-method) | | debug | write a debug message in the log file | [Documentation](sc_logger.md#debug-method) | ## sc_broker methods @@ -103,3 +111,42 @@ | is_valid_downtime_event_start | checks that the downtime event is about the actual start of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_start-method) | | is_valid_downtime_event_end | checks that the downtime event is about the actual end of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_end-method) | | is_valid_storage_event | DO NOTHING (deprecated, you should use neb event to send metrics) | [Documentation](sc_event.md#is_valid_storage_event-method) | + +## sc_macros methods + +| Method name | Method description | Link | +| ---------------------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------- | +| replace_sc_macro | replace a stream connector macro with its value | [Documentation](sc_macros.md#replace_sc_macro-method) | +| get_cache_macro | retrieve a macro value in the cache | [Documentation](sc_macros.md#get_cache_macro-method) | +| get_event_macro | retrieve a macro value in the event | [Documentation](sc_macros.md#get_event_macro-method) | +| convert_centreon_macro | replace a Centreon macro with its value | [Documentation](sc_macros.md#convert_centreon_macro-method) | +| get_centreon_macro | transform a Centreon macro into a stream connector macro | [Documentation](sc_macros.md#get_centreon_macro-method) | +| get_transform_flag | try to find a transformation flag in the macro name | [Documentation](sc_macros.md#get_transform_flag-method) | +| transform_date | transform a timestamp into a human readable format | [Documentation](sc_macros.md#transform_date-method) | +| transform_short | keep the first line of a string | [Documentation](sc_macros.md#transform_short-method) | +| transform_type | convert 0 or 1 into SOFT or HARD | [Documentation](sc_macros.md#transform_type-method) | +| transform_state | convert a status code into its matching human readable status (OK, WARNING...) | [Documentation](sc_macros.md#transform_state-method) | + +## google.bigquery.bigquery methods + +| Method name | Method description | Link | +| ---------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------------------------- | +| get_tables_schema | create all tables schema depending on the configuration | [Documentation](google/bigquery/bigquery.md#get_tables_schema-method) | +| default_host_table_schema | create the default table schema for host_status events | [Documentation](google/bigquery/bigquery.md#default_host_table_schema-method) | +| default_service_table_schema | create the default table schema for service_status events | [Documentation](google/bigquery/bigquery.md#default_service_table_schema-method) | +| default_ack_table_schema | create the default table schema for acknowledgement events | [Documentation](google/bigquery/bigquery.md#default_ack_table_schema-method) | +| default_dt_table_schema | create the default table schema for downtime events | [Documentation](google/bigquery/bigquery.md#default_dt_table_schema-method) | +| default_ba_table_schema | create the default table schema for ba_status events | [Documentation](google/bigquery/bigquery.md#default_ba_table_schema-method) | +| load_tables_schema_file | create tables schema based on a json file | [Documentation](google/bigquery/bigquery.md#load_tables_schema_file-method) | +| build_table_schema | create tables schema based on stream connector parameters | [Documentation](google/bigquery/bigquery.md#build_table_schema-method) | + +## google.auth.oauth methods + +| Method name | Method description | Link | +| ---------------- | ------------------------------------------- | ------------------------------------------------------------- | +| create_jwt_token | create a jwt token | [Documentation](google/auth/oauth.md#create_jwt_token-method) | +| get_key_file | retrieve information from a key file | [Documentation](google/auth/oauth.md#get_key_file-method) | +| create_jwt_claim | create the claim for the jwt token | [Documentation](google/auth/oauth.md#create_jwt_claim-method) | +| create_signature | create the signature for the jwt token | [Documentation](google/auth/oauth.md#create_signature-method) | +| get_access_token | get a google access token using a jwt token | [Documentation](google/auth/oauth.md#get_access_token-method) | +| curl_google | use curl to get an access token | [Documentation](google/auth/oauth.md#curl_google-method) | diff --git a/stream-connectors/modules/docs/broker_data_structure.md b/stream-connectors/modules/docs/broker_data_structure.md index 1c71eaf02b3..c0864a3db4e 100644 --- a/stream-connectors/modules/docs/broker_data_structure.md +++ b/stream-connectors/modules/docs/broker_data_structure.md @@ -3,6 +3,8 @@ - [Broker data structure documentation](#broker-data-structure-documentation) - [Introduction](#introduction) - [NEB Category](#neb-category) + - [Service_status](#service_status) + - [Host_status](#host_status) - [Downtime](#downtime) - [Downtime actual start](#downtime-actual-start) - [Downtime actual end](#downtime-actual-end) @@ -17,6 +19,101 @@ This documentation will not explain the meaning of the structures. It is mostly ## NEB Category +### Service_status + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#service-status) + +| index | type | +| ------------------------ | ------- | +| acknowledged | boolean | +| acknowledgement_type | number | +| active_checks | boolean | +| category | number | +| check_attempt | number | +| check_command | string | +| check_interval | number | +| check_period | string | +| check_type | number | +| checked | boolean | +| element | number | +| enabled | boolean | +| event_handler | string | +| event_handler_enabled | boolean | +| execution_time | number | +| flap_detection | boolean | +| flapping | boolean | +| host_id | number | +| last_check | number | +| last_hard_state | number | +| last_hard_state_change | number | +| last_state_change | number | +| last_time_up | number | +| last_update | number | +| latency | number | +| max_check_attempts | number | +| next_check | number | +| no_more_notifications | boolean | +| notification_number | number | +| notify | boolean | +| obsess_over_host | boolean | +| output | string | +| passive_checks | boolean | +| percent_state_change | number | +| perfdata | string | +| retry_interval | number | +| scheduled_downtime_depth | number | +| should_be_scheduled | boolean | +| state | number | +| state_type | number | + +### Host_status + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#host-status) + +| index | type | +| ------------------------ | ------- | +| acknowledged | boolean | +| acknowledgement_type | number | +| active_checks | boolean | +| category | number | +| check_attempt | number | +| check_command | string | +| check_interval | number | +| check_period | string | +| check_type | number | +| checked | boolean | +| element | number | +| enabled | boolean | +| event_handler | string | +| event_handler_enabled | boolean | +| execution_time | number | +| flap_detection | boolean | +| flapping | boolean | +| host_id | number | +| last_check | number | +| last_hard_state | number | +| last_hard_state_change | number | +| last_state_change | number | +| last_time_ok | number | +| last_update | number | +| latency | number | +| max_check_attempts | number | +| next_check | number | +| no_more_notifications | boolean | +| notification_number | number | +| notify | boolean | +| obsess_over_service | boolean | +| output | string | +| passive_checks | boolean | +| percent_state_change | number | +| perfdata | string | +| retry_interval | number | +| scheduled_downtime_depth | number | +| service_id | number | +| should_be_scheduled | boolean | +| state | number | +| state_type | number | + ### Downtime [BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#downtime) diff --git a/stream-connectors/modules/docs/google/auth/oauth.md b/stream-connectors/modules/docs/google/auth/oauth.md new file mode 100644 index 00000000000..fae2476084a --- /dev/null +++ b/stream-connectors/modules/docs/google/auth/oauth.md @@ -0,0 +1,217 @@ +# Documentation of the google oauth module + +- [Documentation of the google oauth module](#documentation-of-the-google-oauth-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [create_jwt_token method](#create_jwt_token-method) + - [create_jwt_token: returns](#create_jwt_token-returns) + - [create_jwt_token: example](#create_jwt_token-example) + - [get_key_file method](#get_key_file-method) + - [get_key_file: returns](#get_key_file-returns) + - [get_key_file: example](#get_key_file-example) + - [create_jwt_claim method](#create_jwt_claim-method) + - [create_jwt_claim: returns](#create_jwt_claim-returns) + - [create_jwt_claim: example](#create_jwt_claim-example) + - [create_signature method](#create_signature-method) + - [create_signature: returns](#create_signature-returns) + - [create_signature: example](#create_signature-example) + - [get_access_token method](#get_access_token-method) + - [get_access_token: returns](#get_access_token-returns) + - [get_access_token: example](#get_access_token-example) + - [curl_google method](#curl_google-method) + - [curl_google: parameters](#curl_google-parameters) + - [curl_google: returns](#curl_google-returns) + - [curl_google: example](#curl_google-example) + +## Introduction + +The google oauth module provides methods to help with google api authentication. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with three parameters, if the third one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_common. This is an instance of the sc_common module +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local oauth = require("centreon-stream-connecotrs-lib.google.auth.oauth") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_common and sc_logger module +local test_logger = sc_logger.new(logfile, severity) +local test_common = sc_common.new(test_logger) + +-- some stream connector params +local params = { + my_param = "my_value" +} + +-- create a new instance of the google oauth param module +local test_oauth = oauth.new(params, test_common, test_logger) +``` + +## create_jwt_token method + +The **create_jwt_token** method create a jwt token. More information about the google JWT token [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +head over the following chapters for more information + +- [get_key_file](#get_key_file-method) +- [create_jwt_claim](#create_jwt_claim-method) +- [create_signature](#create_signature-method) + +### create_jwt_token: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------------------------------------------- | +| true or false | boolean | yes | true if jwt token is created, false otherwise | + +### create_jwt_token: example + +```lua + +local result = test_oauth:create_jwt_token() +--> result is true or false +--> jwt token is stored in test_oauth.jwt_token if result is true +``` + +## get_key_file method + +The **get_key_file** method get information set in the key file. To do so, the **key_file_path** parameter must be set. + +### get_key_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------------- | +| true or false | boolean | yes | true if key file information is retrieved, false otherwise | + +### get_key_file: example + +```lua + +local result = test_oauth:get_key_file() +--> result is true or false +--> key file data is stored in test_oauth.key_table if result is true +``` + +## create_jwt_claim method + +The **create_jwt_claim** method create the claim for a jwt token. To do so, the **scope_list** and **project_id** paramters must be set. More information about the google JWT token and claim [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +### create_jwt_claim: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------------------------------------------- | +| true or false | boolean | yes | true if jwt claim is created, false otherwise | + +### create_jwt_claim: example + +```lua + +local result = test_oauth:create_jwt_claim() +--> result is true or false +--> jwt token is stored in test_oauth.jwt_claim if result is true +``` + +## create_signature method + +The **create_signature** method create the signature of the JWT claim and JWT header. To match google needs, the hash protocol used is **sha256WithRSAEncryption**. More information about the google JWT token [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +### create_signature: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------- | +| true or false | boolean | yes | true if the signature has been done, false otherwise | + +### create_signature: example + +```lua + +local result = test_oauth:create_signature() +--> result is true or false +--> signature is stored in test_oauth.signature if result is true +``` + +## get_access_token method + +The **get_access_token** method get an access token from the google api using a jwt token. It will use an existing one if it founds one. Access token life span is one hour. This method will generate a new one if the access token is at least 59 minutes old. To generate a new access token, this method will need to create a new jwt token. + +head over the following chapters for more information + +- [get_jwt_token](#get_jwt_token-method) + +### get_access_token: returns + +| return | type | always | condition | +| ------------ | ------- | ------ | ------------------------------- | +| false | boolean | no | if it can't get an access token | +| access_token | string | no | if it can get an access token | + +### get_access_token: example + +```lua + +local result = test_oauth:get_access_token() +--> result is "dzadz93213daznc321OGRK" or false if access token is not retrieved +``` + +## curl_google method + +The **curl_google** method send data to the google api for authentication. + +### curl_google: parameters + +| parameter | type | optional | default value | +| ---------------------------- | ------ | -------- | ------------- | +| the url of the google api | string | no | | +| the curl headers | table | no | | +| data that needs to be posted | string | yes | | + +### curl_google: returns + +| return | type | always | condition | +| --------------- | ------- | ------ | ------------------------------------------------------------------------ | +| false | boolean | no | if it can't get an access token http code 200 or can't access google api | +| result from api | string | no | if the query went well | + +### curl_google: example + +```lua +-- set up headers +local headers = { + 'Content-Type: application/x-www-form-urlencoded' +} + +-- set up data +local data = { + grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer", + assertion = test_oauth.jwt_token +} + +-- set up url +local url = test_oauth.key_table.uri + +-- convert data so it can be sent as url parameters +local url_encoded_data = test_common:generate_postfield_param_string(data) + +local result = test_oauth:curl_google() +--> result false or data (should be json most of the time) +``` diff --git a/stream-connectors/modules/docs/google/bigquery/bigquery.md b/stream-connectors/modules/docs/google/bigquery/bigquery.md new file mode 100644 index 00000000000..c1d1f956cdb --- /dev/null +++ b/stream-connectors/modules/docs/google/bigquery/bigquery.md @@ -0,0 +1,334 @@ +# Documentation of the google bigquery module + +- [Documentation of the google bigquery module](#documentation-of-the-google-bigquery-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [get_tables_schema method](#get_tables_schema-method) + - [get_tables_schema: returns](#get_tables_schema-returns) + - [get_tables_schema: example](#get_tables_schema-example) + - [default_host_table_schema method](#default_host_table_schema-method) + - [default_host_table_schema: returns](#default_host_table_schema-returns) + - [default_host_table_schema: example](#default_host_table_schema-example) + - [default_service_table_schema method](#default_service_table_schema-method) + - [default_service_table_schema: returns](#default_service_table_schema-returns) + - [default_service_table_schema: example](#default_service_table_schema-example) + - [default_ack_table_schema method](#default_ack_table_schema-method) + - [default_ack_table_schema: returns](#default_ack_table_schema-returns) + - [default_ack_table_schema: example](#default_ack_table_schema-example) + - [default_dt_table_schema method](#default_dt_table_schema-method) + - [default_dt_table_schema: returns](#default_dt_table_schema-returns) + - [default_dt_table_schema: example](#default_dt_table_schema-example) + - [default_ba_table_schema method](#default_ba_table_schema-method) + - [default_ba_table_schema: returns](#default_ba_table_schema-returns) + - [default_ba_table_schema: example](#default_ba_table_schema-example) + - [load_tables_schema_file method](#load_tables_schema_file-method) + - [load_tables_schema_file: returns](#load_tables_schema_file-returns) + - [load_tables_schema_file: example](#load_tables_schema_file-example) + - [build_table_schema method](#build_table_schema-method) + - [build_table_schema: parameters](#build_table_schema-parameters) + - [build_table_schema: example](#build_table_schema-example) + +## Introduction + +The google bigquery module provides methods to handle table schemas . It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameters, if the second one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_bq = require("centreon-stream-connecotrs-lib.google.bigquery.bigquery") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- some stream connector params +local params = { + my_param = "my_value" +} + +-- create a new instance of the google bigquery module +local test_bq = sc_bq.new(params, test_logger) +``` + +## get_tables_schema method + +The **get_tables_schema** method retrieves the schemas for host_status, service_status, downtime, acknowledgement and BA events. Depending on the configuration, it creates them from a default configuration, a JSON configuration file or straight from the stream connector parameters + +head over the following chapters for more information + +For the default tables schema that are provided: + +- [default_host_table_schema](#default_host_table_schema-method) +- [default_service_table_schema](#default_service_table_schema-method) +- [default_ack_table_schema](#default_ack_table_schema-method) +- [default_dt_table_schema](#default_dt_table_schema-method) +- [default_ba_table_schema](#default_ba_table_schema-method) + +For the other methods: + +- [load_tables_schema_file](#load_tables_schema_file-method) +- [build_table_schema](#build_table_schema-method) + +### get_tables_schema: returns + +| return | type | always | condition | +| ------ | ------- | ------ | --------- | +| true | boolean | yes | | + +### get_tables_schema: example + +```lua +local result = test_bq:get_tables_schema() +--> result is true +--> schemas are stored in test_bq.schemas[][] +``` + +## default_host_table_schema method + +The **default_host_table_schema** method retrieves the schemas for host_status events + +### default_host_table_schema: returns + +| return | type | always | condition | +| ----------------- | ----- | ------ | --------- | +| host schema table | table | yes | | + +### default_host_table_schema: example + +```lua +local result = test_bq:default_host_table_schema() +--> result is : +--[[ + { + host_id = "{host_id}", + host_name = "{cache.host.name}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +]]-- +``` + +## default_service_table_schema method + +The **default_service_table_schema** method retrieves the schemas for service_status events + +### default_service_table_schema: returns + +| return | type | always | condition | +| -------------------- | ----- | ------ | --------- | +| service schema table | table | yes | | + +### default_service_table_schema: example + +```lua +local result = test_bq:default_service_table_schema() +--> result is : +--[[ + { + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +]]-- +``` + +## default_ack_table_schema method + +The **default_ack_table_schema** method retrieves the schemas for acknowledgement events + +### default_ack_table_schema: returns + +| return | type | always | condition | +| ---------------- | ----- | ------ | --------- | +| ack schema table | table | yes | | + +### default_ack_table_schema: example + +```lua +local result = test_bq:default_ack_table_schema() +--> result is : +--[[ + { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + entry_time = "{entry_time}" + } +]]-- +``` + +## default_dt_table_schema method + +The **default_dt_table_schema** method retrieves the schemas for downtime events + +### default_dt_table_schema: returns + +| return | type | always | condition | +| --------------------- | ----- | ------ | --------- | +| downtime schema table | table | yes | | + +### default_dt_table_schema: example + +```lua +local result = test_bq:default_dt_table_schema() +--> result is : +--[[ + { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + actual_start_time = "{actual_start_time}", + actual_end_time = "{deletion_time}" + } +]]-- +``` + +## default_ba_table_schema method + +The **default_ba_table_schema** method retrieves the schemas for ba_status events + +### default_ba_table_schema: returns + +| return | type | always | condition | +| --------------- | ----- | ------ | --------- | +| BA schema table | table | yes | | + +### default_ba_table_schema: example + +```lua +local result = test_bq:default_ba_table_schema() +--> result is : +--[[ + { + ba_id = "{ba_id}", + ba_name = "{cache.ba.ba_name}", + status = "{state}" + } +]]-- +``` + +## load_tables_schema_file method + +The **load_tables_schema_file** method retrieves the schemas from a json file. The json file must have the following structure + +```json +{ + "host": { + "column_1": "value_1", + "column_2": "value_2" + }, + "service": { + "column_1": "value_1", + "column_2": "value_2" + }, + "ack": { + "column_1": "value_1", + "column_2": "value_2" + }, + "dt": { + "column_1": "value_1", + "column_2": "value_2" + }, + "ba": { + "column_1": "value_1", + "column_2": "value_2" + } +} +``` + +If you only want to send service_status events, you can just put the service part of the json. + +### load_tables_schema_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------------------------------------------- | +| true or false | boolean | yes | false if we can't open the configuration file or it is not a valid json file, true otherwise | + +### load_tables_schema_file: example + +```lua +local result = test_bq:load_tables_schema_file() +--> result is true or false +--> if true, schemas are stored in test_bq.schemas[][] +``` + +## build_table_schema method + +The **build_table_schema** method create tables schema using stream connector parameters. +Parameters must have the following syntax to be interpreted + +For host_status events: +`_sc_gbq_host_column_` +For service_status events: +`_sc_gbq_service_column_` +For acknowledgement events: +`_sc_gbq_ack_column_` +For downtime events: +`_sc_gbq_dt_column_` +For ba_status events: +`_sc_gbq_ba_column_` + +### build_table_schema: parameters + +| parameter | type | optional | default value | +| ----------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| regex, the regex to identify the stream connector parameter that is about a column | string | no | | +| substract, the prefix that must be excluded from the parameter name to only get the column name | table | no | | +| structure, the table in which the retrieved column and value are going to be stored | string | yes | | + +### build_table_schema: example + +```lua +self.params._sc_gbq_host_column_MYNAME = "MYVALUE" +self.params._sc_gbq_host_column_OTHERNAME = "OTHERVALUE" +self.params.something = "hello" + +-- any parameter starting with _sc_gbq_host_column is going to be computed +-- any matching parameter is going to have _sc_gbq_host_column removed from its name +-- created table schema will be stored in self.schema[1][14] (1 because host_status is neb event, and 14 because host_status is the element 14 of the neb events table) +test_bq:build_table_schema("^_sc_gbq_host_column", "_sc_gbq_host_column", self.schemas[1][14]) +--> self.schemas[1][14] is +--[[ + { + MYNAME = "MYVALUE", + OTHERNAME = "OTHERVALUE" + } +]]-- +``` diff --git a/stream-connectors/modules/docs/sc_broker.md b/stream-connectors/modules/docs/sc_broker.md index d873d87469c..d28faabe95c 100644 --- a/stream-connectors/modules/docs/sc_broker.md +++ b/stream-connectors/modules/docs/sc_broker.md @@ -60,7 +60,7 @@ Constructor can be initialized with one parameter or it will use a default value - sc_logger. This is an instance of the sc_logger module -If you don't provider this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) ### constructor: Example diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index db460c35cdd..a12ba1e7a0b 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -29,6 +29,10 @@ - [compare_numbers: parameters](#compare_numbers-parameters) - [compare_numbers: returns](#compare_numbers-returns) - [compare_numbers: example](#compare_numbers-example) + - [generate_postfield_param_string method](#generate_postfield_param_string-method) + - [generate_postfield_param_string: parameters](#generate_postfield_param_string-parameters) + - [generate_postfield_param_string: returns](#generate_postfield_param_string-returns) + - [generate_postfield_param_string: example](#generate_postfield_param_string-example) ## Introduction @@ -44,7 +48,7 @@ Constructor can be initialized with one parameter or it will use a default value - sc_logger. This is an instance of the sc_logger module -If you don't provider this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) ### constructor: Example @@ -271,3 +275,33 @@ first_number = "hello my friend" result = test_common:compare_numbers(first_number, second_number, operator) --> result is nil ("hello my friend" is not a valid number) ``` + +## generate_postfield_param_string method + +The **generate_postfield_param_string** method generate an url encoded param string based on a table with said params. + +### generate_postfield_param_string: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------------------------- | ----- | -------- | ------------- | +| the table with all the parameters to convert in a parameters string | table | no | | + +### generate_postfield_param_string: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------------------------------------- | +| false | boolean | no | if the method parameter is not a table | +| string_params | string | no | if the method parameter is a table, it will return an url encoded string of params | + +### generate_postfield_param_string: example + +```lua +local param_table = { + key = "321Xzd", + option = "full" + name = "John Doe" +} + +local result = test_common:generate_postfield_param_string(param_table) +--> result is "key=321Xzd&option=full&name=John%20Doe" +``` diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 48797e70d8c..7a520a53ec3 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -831,7 +831,9 @@ local result = test_event:is_valid_host_severity() { --- event data --- cache = { - host_severity = 2712 + severity = { + host = 2712 + } --- other cache data type --- } } @@ -860,7 +862,9 @@ local result = test_event:is_valid_service_severity() { --- event data --- cache = { - service_severity = 2712 + severity = { + service = 2712 + } --- other cache data type --- } } diff --git a/stream-connectors/modules/docs/sc_logger.md b/stream-connectors/modules/docs/sc_logger.md index 7e396d7ec21..d6d0dc0ad95 100644 --- a/stream-connectors/modules/docs/sc_logger.md +++ b/stream-connectors/modules/docs/sc_logger.md @@ -15,6 +15,9 @@ - [debug method](#debug-method) - [debug: parameters](#debug-parameters) - [debug: example](#debug-example) + - [info method](#info-method) + - [info: parameters](#info-parameters) + - [info: example](#info-example) - [notice method](#notice-method) - [notice: parameters](#notice-parameters) - [notice: example](#notice-example) @@ -113,6 +116,21 @@ The **debug** method will print a debug message in the logfile if **severity is test_logger:debug("[module_name:method_name]: This is a debug message.") ``` +## info method + +The **info** method will print an info message in the logfile if **severity is equal or superior to 2**. + +### info: parameters + +- message. A string that is the info message you want to display in your logfile + +### info: example + +```lua +-- call info method +test_logger:info("[module_name:method_name]: This is a info message.") +``` + ## notice method The **notice** method will print a notice message in the logfile if **severity is equal or superior to 1**. diff --git a/stream-connectors/modules/docs/sc_macros.md b/stream-connectors/modules/docs/sc_macros.md new file mode 100644 index 00000000000..8d655572c3c --- /dev/null +++ b/stream-connectors/modules/docs/sc_macros.md @@ -0,0 +1,519 @@ +# Documentation of the sc_macros module + +- [Documentation of the sc_macros module](#documentation-of-the-sc_macros-module) + - [Introduction](#introduction) + - [Stream connectors macro explanation](#stream-connectors-macro-explanation) + - [Event macros](#event-macros) + - [Cache macros](#cache-macros) + - [Transformation flags](#transformation-flags) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [replace_sc_macro method](#replace_sc_macro-method) + - [replace_sc_macro: parameters](#replace_sc_macro-parameters) + - [replace_sc_macroreplace_sc_macro: returns](#replace_sc_macroreplace_sc_macro-returns) + - [replace_sc_macro: example](#replace_sc_macro-example) + - [get_cache_macro method](#get_cache_macro-method) + - [get_cache_macro: parameters](#get_cache_macro-parameters) + - [get_cache_macro: returns](#get_cache_macro-returns) + - [get_cache_macro: example](#get_cache_macro-example) + - [get_event_macro method](#get_event_macro-method) + - [get_event_macro: parameters](#get_event_macro-parameters) + - [get_event_macro: returns](#get_event_macro-returns) + - [get_event_macro: example](#get_event_macro-example) + - [convert_centreon_macro method](#convert_centreon_macro-method) + - [convert_centreon_macro: parameters](#convert_centreon_macro-parameters) + - [convert_centreon_macro: returns](#convert_centreon_macro-returns) + - [convert_centreon_macro: example](#convert_centreon_macro-example) + - [get_centreon_macro method](#get_centreon_macro-method) + - [get_centreon_macro: parameters](#get_centreon_macro-parameters) + - [get_centreon_macro: returns](#get_centreon_macro-returns) + - [get_centreon_macro: example](#get_centreon_macro-example) + - [get_transform_flag method](#get_transform_flag-method) + - [get_transform_flag: parameters](#get_transform_flag-parameters) + - [get_transform_flag: returns](#get_transform_flag-returns) + - [get_transform_flag: example](#get_transform_flag-example) + - [transform_date method](#transform_date-method) + - [transform_date: parameters](#transform_date-parameters) + - [transform_date: returns](#transform_date-returns) + - [transform_date: example](#transform_date-example) + - [transform_short method](#transform_short-method) + - [transform_short: parameters](#transform_short-parameters) + - [transform_short: returns](#transform_short-returns) + - [transform_short: example](#transform_short-example) + - [transform_type method](#transform_type-method) + - [transform_type: parameters](#transform_type-parameters) + - [transform_type: returns](#transform_type-returns) + - [transform_type: example](#transform_type-example) + - [transform_state method](#transform_state-method) + - [transform_state: parameters](#transform_state-parameters) + - [transform_state: returns](#transform_state-returns) + - [transform_state: example](#transform_state-example) + +## Introduction + +The sc_macros module provides methods to handle a stream connector oriented macro system such as {cache.host.name} and Centreon standard macro such as $HOSTALIAS$. It has been made in OOP (object oriented programming) + +## Stream connectors macro explanation + +There are two kind of stream connectors macro, the **event macros** and the **cache macros**. The first type refers to data that are accessible right from the event. The second type refers to data that needs to be retrieved from the broker cache. + +### Event macros + +This one is quite easy to understand. The macro syntaxt is `{macro_name}` where *macro_name* is a property of an event. For example, for a service_status neb event all macro names are available [there](broker_data_structure.md#Service_status). + +This means that it is possible to use the following macros + +```lua +"{service_id}" -- will be replaced by the service_id +"{output}" -- will be replaced by the service output +"{last_check}" -- will be replaced by the last_check timestamp +"{state_type}" -- will be replaced by the state type value (0 or 1 for SOFT or HARD) +"{state}" -- will be replaced by the state of the service (0, 1, 2, 3 for OK, WARNING, CRITICAL, UNKNOWN) +``` + +### Cache macros + +This one is a bit more complicated. The purpose is to retrieve information from the event cache using a macro. If you rely on the centreon-stream-connectors-lib to fill the cache, here is what you need to know. + +There are X kind of cache + +- host cache (for any event that is linked to a host, which means any event but BA events) +- service cache (for any event that is linked to a service) +- poller cache (only generated if you filter your events on a poller) +- severity cache (only generated if you filter your events on a severity) +- hostgroups cache (only generated if you filter your events on a hostgroup) +- servicegroups cache (only generated if you filter your events on a servicegroup) +- ba cache (only for a ba_status event) +- bvs cache (only generated if you filter your BA events on a BV) + +For example, if we want to retrieve the description of a service in the cache (because the description is not provided in the event data). We will use `{cache.service.description}`. + +For example, for a service_status neb event, all cache macros are available [there](sc_broker.md#get_service_all_infos-example) + +This means that it is possible to use the following macros + +```lua +"{cache.service.description}" -- will be replaced by the service description +"{cache.service.notes}" -- will be replaced by the service notes +"{cache.service.last_time_critical}" -- will be replaced by the service last_time_critical timestamp +``` + +### Transformation flags + +You can use transformation flags on stream connectors macros. Those flags purpose is to convert the given value to something more appropriate. For example, you can convert a timestamp to a human readable date. + +Here is the list of all available flags + +| flag name | purpose | without flag | with flag | +| --------- | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | ---------------------- | +| _scdate | convert a timestamp to a date | 1623691758 | 2021-06-14 19:29:18 | +| _sctype | convert a state type number to its human value | 0 | SOFT | +| _scstate | convert a state to its human value | 2 | WARNING (for a servie) | +| _scshort | only retrieve the first line of a string (mostly use to get the output instead of the long output of a service for exemple) | "my output\n this is part of the longoutput" | "my output" | + +The **_scdate** is a bit specific because you can change the date format using the [**timestamp_conversion_format parameter**](sc_param.md#default-parameters) + +With all that information in mind, we can use the following macros + +```lua +"{cache.service.last_time_critical}" -- will be replaced by the service last_time_critical timestamp +"{cache.service.last_time_critical_scdate}" -- will be replaced by the service last_time_critical converted in a human readable date format +"{state_type_sctype}" -- will be replaced by the service state_type in a human readable format (SOFT or HARD) +"{state_scstate}" -- will be replaced by the servie state in a human readable format (OK, WARNING, CRITICAL or UNKNOWN) +"{output_short}" -- will be replaced by the first line of the service output +``` + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameters, if the second one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_macros = require("centreon-stream-connecotrs-lib.sc_macros") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) +-- some stream connector params + +local params = { + my_param = "my_value" +} + +-- create a new instance of the sc_macros module +local test_macros = sc_macros.new(params, test_logger) +``` + +## replace_sc_macro method + +The **replace_sc_macro** method replaces all stream connector macro in a string with its value. + +head over the following chapters for more information + +- [Stream connectors macro explanation](#stream-connectors-macro-explanation) +- [get_cache_macro](#get_cache_macro-method) +- [get_event_macro](#get_event_macro-method) + +### replace_sc_macro: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the string with macros | string | no | | +| the event | table | no | | + +### replace_sc_macroreplace_sc_macro: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | --------- | +| converted_string | string | yes | | + +### replace_sc_macro: example + +```lua +local string = "my host id is {host_id}, name is {cache.host.name}, its status is {state_scstate} and its state type is {state_type_scstate}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:replace_sc_macro(string, event) +--> result is "my host id is 2712, name is Tatooine, its status is UP and its state type is HARD" +``` + +## get_cache_macro method + +The **get_cache_macro** method replaces a stream connector cache macro by its value. + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) +- [Cache macros](#cache-macros) +- [get_transform_flag](#get_transform_flag-method) + +### get_cache_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_cache_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ----------------------------------------------------------------- | +| false | boolean | no | if the macro is not a cache macro or value can't be find in cache | +| value of the macro | boolean or string or number | no | the value that has been found in the cache | + +### get_cache_macro: example + +```lua +local macro = "{cache.host.name}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:get_cache_macro(macro, event) +--> result is "Tatooine" + +macro = "{host_id}" +result = test_macros:get_cache_macro(macro, event) +--> result is false, host_id is in the event table, not in a table inside the cache table of the event +``` + +## get_event_macro method + +The **get_event_macro** method replaces a stream connector event macro by its value. + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) +- [Event macros](#event-macros) +- [get_transform_flag](#get_transform_flag-method) + +### get_event_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_event_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ------------------------------------------ | +| false | boolean | no | if the macro is not an event macro | +| value of the macro | boolean or string or number | no | the value that has been found in the event | + +### get_event_macro: example + +```lua +local macro = "{host_id}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:get_event_macro(macro, event) +--> result is "2712" + +macro = "{cache.host.name}" +result = test_macros:get_event_macro(macro, event) +--> result is false, cache.host.name is in the cache table, not directly in the event table +``` + +## convert_centreon_macro method + +The **convert_centreon_macro** method replaces all centreon macro in a string (such as $HOSTALIAS$) by its value. It will first convert it to its stream connector macro counterpart and then convert the stream connector macro to its value. + +### convert_centreon_macro: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the string with macros | string | no | | +| the event | table | no | | + +### convert_centreon_macro: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | ------------------------------------------ | +| converted string | string | yes | the value that has been found in the event | + +### convert_centreon_macro: example + +```lua +local string = "We should go to $HOSTNAME$ but address $HOSTADDRESS$ is not on open street map and by the way there is $HOSTALIAS$" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine", + address = "27.12.19.91" + alias = "Too much sand" + } + } +} + +local result = test_macros:convert_centreon_macro(macro, event) +--> result is "We should go to Tatooine but address 27.12.19.91 is not on open street map and by the way there is Too much sand" +``` + +## get_centreon_macro method + +The **get_centreon_macro** method retrieves the given macro in a Centreon macro list set up in the sc_macros module constructor and returns its associated stream connector macro. + +### get_centreon_macro: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the name of the macro | string | no | | + +### get_centreon_macro: returns + +| return | type | always | condition | +| -------------------------------------- | ------- | ------ | ------------------------------------------------------ | +| false | boolean | no | if the macro is not found in the predefined macro list | +| the appropriate stream connector macro | string | no | the value that has been found in the event | + +### get_centreon_macro: example + +```lua +local macro = "$HOSTALIAS$" + +local result = test_macros:get_centreon_macro(macro) +--> result is "{cache.host.alias}" + +macro = "$ENDOR$" + +result = test_macros:get_centreon_macro(macro) +--> result is false +``` + +## get_transform_flag method + +The **get_transform_flag** method gets the flag from a macro if there is one + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) + +### get_transform_flag: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the name of the macro | string | no | | + +### get_transform_flag: returns + +| return | type | always | condition | +| ------ | ------------- | ------ | --------------------------------------------- | +| macro | string | yes | the name of the macro | +| flag | string or nil | yes | the macro transformation flag if there is one | + +### get_transform_flag: example + +```lua +local macro = "{state_scstate}" + +local result, flag = test_macros:get_transform_flag(macro) +--> result is "state" flag is "state" (_sc prefix is removed) + +macro = "{last_check}" + +result, flag = test_macros:get_transform_flag(macro) +--> result is "last_check" flag is nil +``` + +## transform_date method + +The **transform_date** method converts a timestamp into a human readable date. It is possible to chose the date format using the [**timestamp_conversion_format parameter**](sc_param.md#default-parameters) and get help from the [**lua documentation**](https://www.lua.org/pil/22.1.html) for the option syntax. + +### transform_date: parameters + +| parameter | type | optional | default value | +| ----------------- | ------ | -------- | ------------- | +| a timestamp value | number | no | | + +### transform_date: returns + +| return | type | always | condition | +| ------ | ------ | ------ | --------------------------- | +| date | string | yes | timestamp converted to date | + +### transform_date: example + +```lua +local timestamp = 1623691758 + +local result = test_macros:transform_date(timestamp) +--> result is "2021-06-14 19:29:18" +``` + +## transform_short method + +The **transform_short** method keeps the first line of a string. + +### transform_short: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| a string | string | no | | + +### transform_short: returns + +| return | type | always | condition | +| -------------------------- | ------ | ------ | --------- | +| the first line of a string | string | yes | | + +### transform_short: example + +```lua +local string = "Paris is a nice city\n Mont de Marsan is way better" + +local result, flag = test_macros:transform_short(string) +--> result is "Paris is a nice city" +``` + +## transform_type method + +The **transform_type** method transforms a 0 or 1 value into SOFT or HARD + +### transform_type: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| 0 or 1 | number | no | | + +### transform_type: returns + +| return | type | always | condition | +| ------------ | ------ | ------ | --------- | +| SOFT or HARD | string | yes | | + +### transform_type: example + +```lua +local state_type = 0 + +local result = test_macros:transform_type(state_type) +--> result is "SOFT" +``` + +## transform_state method + +The **transform_state** method transforms a status code into its human readable status (e.g: UP, DOWN, WARNING, CRITICAL...) + +### transform_state: parameters + +| parameter | type | optional | default value | +| ------------ | ------ | -------- | ------------- | +| 0, 1, 2 or 3 | number | no | | +| the event | table | no | | + +### transform_state: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| the status string | string | yes | | + +### transform_state: example + +```lua +local event = { + service_id = 2712, + element = 24, + category = 1, + host_id = 1991 +} + +local state = 1 + +local result = test_macros:transform_state(state, event) +--> result is "WARNING" because it is a service (category 1 = neb, element 24 = service_status event) + + +event = { + element = 14, + category = 1, + host_id = 1991 +} + +result = test_macros:transform_state(state, event) +--> result is "DOWN" because it is a service (category 1 = neb, element 14 = host_status event) +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 624d7337067..2f11310d0f8 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -25,35 +25,37 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters -| Parameter name | type | default value | description | default scope | additionnal information | -| --------------------------- | ------ | ------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | -| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | -| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | -| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | -| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | -| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | -| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | -| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | -| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | -| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | -| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | -| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | -| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | -| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | -| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | -| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | -| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | -| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | -| enable_host_status_dedup | number | 0 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | -| enable_service_status_dedup | number | 0 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | -| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | +| Parameter name | type | default value | description | default scope | additionnal information | +| --------------------------- | ------ | ----------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | +| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | +| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | +| enable_host_status_dedup | number | 1 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | +| enable_service_status_dedup | number | 1 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | +| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | +| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | +| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | (date format information)[https://www.lua.org/pil/22.1.html] | ## Module initialization @@ -180,7 +182,7 @@ test_param:get_kafka_params(test_kafka_config, params) ## is_mandatory_config_set method -The **is_mandatory_config_set** method checks if all mandatory parameters for a stream connector are set up. If one is missing, it will print an error and return false. +The **is_mandatory_config_set** method checks if all mandatory parameters for a stream connector are set up. If one is missing, it will print an error and return false. Each mandatory parameter that is found is going to be stored in the standard parameters list. ### is_mandatory_config_set: parameters @@ -213,4 +215,15 @@ local params = { local result = test_param:is_mandatory_config_set(mandatory_params, params) --> result is false because the "password" parameter is not in the list of parameters +--[[ + since username index (1) is lower than password index (2), the username property will still be available in the test_param.param table + --> test_param.param.username is "John" +]] + +params.password = "hello" + +result = test_param:is_mandatory_config_set(mandatory_params, params) +--> result is truc because password and username are in the params table +--> test_param.param.username is "John" +--> test_param.param.password is "hello" ``` diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec similarity index 93% rename from stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec rename to stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec index e5b4af8aa13..c52cac7ab1f 100644 --- a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-1.rockspec +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec @@ -26,7 +26,7 @@ build = { ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/config.lua", ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/librdkafka.lua", - ["centreon-stream-connectors-lib.rdkafka.producer.lua"] = "modules/centreon-stream-connectors-lib/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/producer.lua", ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/topic_config.lua", ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/topic.lua" } diff --git a/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec new file mode 100644 index 00000000000..a6c4ed1296f --- /dev/null +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec new file mode 100644 index 00000000000..34cae90a9e7 --- /dev/null +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-3" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-3" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec new file mode 100644 index 00000000000..fa778169151 --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec @@ -0,0 +1,38 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "LuaSocket >= 3.0rc1-2", + "LuaCrypto >= 0.3.2-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} \ No newline at end of file From d184e899b777e24fd9347220a48b433336936352 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 15 Jun 2021 08:41:35 +0200 Subject: [PATCH 064/219] Update centreon-stream-connectors-lib-1.2.0-1.rockspec --- .../1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec index fa778169151..0dd857fed3b 100644 --- a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec @@ -26,7 +26,7 @@ build = { ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", - ["centreon-stream-connectors-lib.sc_macros] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", @@ -35,4 +35,4 @@ build = { ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" } -} \ No newline at end of file +} From 84a8b8d6043104b6a8f365e856b1801b31e2857f Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 18 Jun 2021 11:02:59 +0200 Subject: [PATCH 065/219] patch for el8 compatibility (#55) * fix requires in test_kafka script * fix rockspec again * prepare el8 compatibility * el8 compat patch --- .../google/google-bigquery-apiv2.lua | 2 + .../rdkafka/config.lua | 2 +- .../rdkafka/librdkafka.lua | 2 +- .../rdkafka/producer.lua | 2 +- .../rdkafka/topic.lua | 2 +- .../rdkafka/topic_config.lua | 2 +- ...ream-connectors-lib-1.2.0-1 copy.rockspec} | 0 ...tream-connectors-lib-1.2.0-2 copy.rockspec | 38 +++++++++++++++++++ ...eon-stream-connectors-lib-1.2.1-1.rockspec | 37 ++++++++++++++++++ .../modules/tests/kafka_test_connexion.lua | 6 +-- 10 files changed, 85 insertions(+), 8 deletions(-) rename stream-connectors/modules/specs/1.2.x/{centreon-stream-connectors-lib-1.2.0-1.rockspec => centreon-stream-connectors-lib-1.2.0-1 copy.rockspec} (100%) create mode 100644 stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec create mode 100644 stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec diff --git a/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua b/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua index 5436a5eb42a..7ba8158c208 100644 --- a/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua +++ b/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua @@ -77,6 +77,8 @@ function EventQueue.new(params) params.accepted_elements = "host_status,service_status,downtime,acknowledgement,ba_status" self.sc_params.params.proxy_address = params.proxy_address self.sc_params.params.proxy_port = params.proxy_port + self.sc_params.params.proxy_username = params.proxy_username + self.sc_params.params.proxy_password = params.proxy_password -- apply users params and check syntax of standard ones self.sc_params:param_override(params) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua index 96e3d36c6e5..368b094a9fa 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua @@ -1,7 +1,7 @@ #!/usr/bin/lua local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") -local ffi = require 'ffi' +local ffi = require 'ffi' or 'cffi' local KafkaConfig = {} KafkaConfig.__index = KafkaConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua index df0a75de0ea..5e97f3bc72f 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua -local ffi = require 'ffi' +local ffi = require 'ffi' or 'cffi' ffi.cdef[[ typedef struct rd_kafka_s rd_kafka_t; diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua index e6805971ad1..28070a7a1f6 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua @@ -3,7 +3,7 @@ local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") local KafkaConfig = require("centreon-stream-connectors-lib.rdkafka.config") local KafkaTopic = require("centreon-stream-connectors-lib.rdkafka.topic") -local ffi = require 'ffi' +local ffi = require 'ffi' or 'cffi' local DEFAULT_DESTROY_TIMEOUT_MS = 3000 diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua index 7a55fffa40b..b5f97a2e884 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua @@ -2,7 +2,7 @@ local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") local KafkaTopicConfig = require("centreon-stream-connectors-lib.rdkafka.topic_config") -local ffi = require 'ffi' +local ffi = require 'ffi' or 'cffi' local KafkaTopic = { kafka_topic_map_ = {} } -- KafkaProducer will delete all topics on destroy diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua index a9e0e0f368a..a6f08fc4977 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua @@ -1,7 +1,7 @@ #!/usr/bin/lua local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") -local ffi = require 'ffi' +local ffi = require 'ffi' or 'cffi' local KafkaTopicConfig = {} KafkaTopicConfig.__index = KafkaTopicConfig diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1 copy.rockspec similarity index 100% rename from stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1.rockspec rename to stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1 copy.rockspec diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec new file mode 100644 index 00000000000..ae732fb6a62 --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec @@ -0,0 +1,38 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2", + "luacrypto >= 0.3.2-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec new file mode 100644 index 00000000000..351a34a6533 --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec @@ -0,0 +1,37 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/tests/kafka_test_connexion.lua b/stream-connectors/modules/tests/kafka_test_connexion.lua index 9bd12b6b9ed..2f313b86964 100644 --- a/stream-connectors/modules/tests/kafka_test_connexion.lua +++ b/stream-connectors/modules/tests/kafka_test_connexion.lua @@ -20,9 +20,9 @@ local message = "This is a test message" ------ END OF PARAMETERS --------- - -config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) -config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) +-- you can uncomment this if you are on el7 +-- config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) +-- config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) local producer = require 'centreon-stream-connectors-lib.rdkafka.producer'.new(config) From 20e56041e942b3e41600c74055577358b1396465 Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Fri, 18 Jun 2021 11:48:31 +0200 Subject: [PATCH 066/219] fix(influxdb) Properly replace backslashes (#54) --- .../centreon-certified/influxdb/influxdb-neb-apiv1.lua | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index 78a68dea66a..5d9d7b449ef 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -152,14 +152,16 @@ function EventQueue:add(e) end -- message format : [,=...] -- =[,=...] [unix-nano-timestamp] - -- some characters [ ,=] must be escaped, let's replace them by _ for better handling + -- some characters [ ,=] must be escaped, let's replace them by the replacement_character for better handling + -- backslash at the end of a tag value is not supported, let's also replace it -- consider last space in service_description as a separator for an item tag local item = "" if string.find(service_description, " [^ ]+$") then - item = ",item=" .. string.gsub(string.gsub(service_description, ".* ", "", 1), "[ ,=]+", self.replacement_character) + item = string.gsub(service_description, ".* ", "", 1) + item = ",item=" .. string.gsub(string.gsub(item, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) service_description = string.gsub(service_description, " +[^ ]+$", "", 1) end - service_description = string.gsub(service_description, "[ ,=]+", self.replacement_character) + service_description = string.gsub(string.gsub(service_description, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance -- consider new perfdata (dot-separated metric names) only (of course except for host-latency) local instances = {} @@ -170,7 +172,7 @@ function EventQueue:add(e) inst = "" metric = m else - inst = ",inst=" .. string.gsub(inst, "[ ,=]+", self.replacement_character) + inst = ",inst=" .. string.gsub(string.gsub(inst, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) end if (not e.service_id and metric ~= "time") or string.match(metric, ".+[.].+") then if not instances[inst] then From 7197cd62418331912d14b8e8fa5c00bfcc34d238 Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Fri, 25 Jun 2021 11:08:00 +0200 Subject: [PATCH 067/219] fix(influxdb) events may not have last_check value (#56) --- .../centreon-certified/influxdb/influxdb-neb-apiv1.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index 5d9d7b449ef..bae83be3969 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -119,7 +119,7 @@ function EventQueue:add(e) perfdata = {} end -- retrieve and store state for further processing - if self.skip_events_state == 0 then + if self.skip_events_state == 0 and e.last_check ~= nil then perfdata["centreon.state"] = e.state perfdata["centreon.state_type"] = e.state_type elseif perfdata_err then From 99341814cce5a0dc2b4b21d785567cbde8bd6cf7 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 15 Jul 2021 19:18:32 +0200 Subject: [PATCH 068/219] Splunk rework + prepare new release (#57) * add splunk event apiv2 SC * better cat and el handling * add new flush module * add template handling * add load json file method * add template file param + fix new elems * fix wrong log message * fix wrong send_data method naming * add new sc_metrics module * add default params value * use is_valid_perfdata method where needed * add new splunk metrics stream connector * load needed modules * improve logging and comments in metrics module * add logging and comments for flush module * add a test option * comment code in sc_common module * document new common method * refacto flush module and cata nd el mapping * refacto cat + el mapping * add new methods in sc_param documentation * fix cat and el in modules * add documentation of the sc_flush module * copy pasta errors in sc_event doc * add documentation for sc_metrics module * update readme index * add a documentation about mappings * add templating documentation * fix param doc link * better stream connector naming * fix thousand bugs * fix macro bugs --- ...ry-apiv2.lua => bigquery-events-apiv2.lua} | 10 +- ...kafka-apiv2.lua => kafka-events-apiv2.lua} | 0 ...-apiv2.lua => servicenow-events-apiv2.lua} | 0 .../splunk/splunk-events-apiv2.lua | 301 +++++++++ .../splunk/splunk-metrics-apiv2.lua | 294 ++++++++ .../google/bigquery/bigquery.lua | 41 +- .../sc_common.lua | 31 + .../sc_event.lua | 28 +- .../sc_flush.lua | 103 +++ .../sc_macros.lua | 4 +- .../sc_metrics.lua | 250 +++++++ .../sc_params.lua | 628 +++++++++++++++--- stream-connectors/modules/docs/README.md | 27 +- stream-connectors/modules/docs/mappings.md | 164 +++++ stream-connectors/modules/docs/sc_common.md | 34 + stream-connectors/modules/docs/sc_event.md | 4 +- stream-connectors/modules/docs/sc_flush.md | 168 +++++ stream-connectors/modules/docs/sc_metrics.md | 238 +++++++ stream-connectors/modules/docs/sc_param.md | 80 ++- stream-connectors/modules/docs/templating.md | 121 ++++ 20 files changed, 2375 insertions(+), 151 deletions(-) rename stream-connectors/centreon-certified/google/{google-bigquery-apiv2.lua => bigquery-events-apiv2.lua} (98%) rename stream-connectors/centreon-certified/kafka/{kafka-apiv2.lua => kafka-events-apiv2.lua} (100%) rename stream-connectors/centreon-certified/servicenow/{servicenow-apiv2.lua => servicenow-events-apiv2.lua} (100%) create mode 100755 stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua create mode 100644 stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua create mode 100644 stream-connectors/modules/docs/mappings.md create mode 100644 stream-connectors/modules/docs/sc_flush.md create mode 100644 stream-connectors/modules/docs/sc_metrics.md create mode 100644 stream-connectors/modules/docs/templating.md diff --git a/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua similarity index 98% rename from stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua rename to stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua index 7ba8158c208..f54867c8093 100644 --- a/stream-connectors/centreon-certified/google/google-bigquery-apiv2.lua +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -30,7 +30,7 @@ function EventQueue.new(params) self.events[1] = { [1] = {}, - [6] = {}, + [5] = {}, [14] = {}, [24] = {} } @@ -46,7 +46,7 @@ function EventQueue.new(params) self.flush[1] = { [1] = function () return self:flush_ack() end, - [6] = function () return self:flush_dt() end, + [5] = function () return self:flush_dt() end, [14] = function () return self:flush_host() end, [24] = function () return self:flush_service() end } @@ -101,7 +101,7 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_common, self.sc_params.params, self.sc_logger) self.sc_oauth = sc_oauth.new(self.sc_params.params, self.sc_common, self.sc_logger) -- , self.sc_common, self.sc_logger) - self.sc_bq = sc_bq.new(self.sc_common, self.sc_params.params, self.sc_logger) + self.sc_bq = sc_bq.new(self.sc_params.params, self.sc_logger) self.sc_bq:get_tables_schema() @@ -210,7 +210,7 @@ function EventQueue:flush_dt () retval = self:send_data(self.sc_params.params.downtime_table) -- reset stored events list - self.events[1][6] = {} + self.events[1][5] = {} -- and update the timestamp self.sc_params.params.__internal_ts_dt_last_flush = os.time() @@ -248,7 +248,7 @@ function EventQueue:flush_old_queues() end -- flush old downtime events - if #self.events[1][6] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[1][5] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then self:flush_dt() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_dt_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end diff --git a/stream-connectors/centreon-certified/kafka/kafka-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua similarity index 100% rename from stream-connectors/centreon-certified/kafka/kafka-apiv2.lua rename to stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua similarity index 100% rename from stream-connectors/centreon-certified/servicenow/servicenow-apiv2.lua rename to stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua new file mode 100755 index 00000000000..3498c563a96 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -0,0 +1,301 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "splunk_token", + "splunk_index" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.proxy_address = params.proxy_address + self.sc_params.params.proxy_port = params.proxy_port + self.sc_params.params.proxy_username = params.proxy_username + self.sc_params.params.proxy_password = params.proxy_password + self.sc_params.params.splunk_source = params.splunk_source + self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" + self.sc_params.params.splunk_host = params.splunk_host or "Central" + self.sc_params.params.accetepd_categories = params.acceptd_categories or "neb" + self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.logfile = params.logfile or "/var/log/centreon-broker/splunk-events-apiv2.log" + self.sc_params.params.log_level = params.log_level or 1 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + event_type = "host", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + output = string.gsub(self.sc_event.event.output, "\n", ""), + } +end + +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + event_type = "service", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + service_description = self.sc_event.event.cache.service.description, + output = string.gsub(self.sc_event.event.output, "\n", ""), + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = { + sourcetype = self.sc_params.params.splunk_sourcetype, + source = self.sc_params.params.splunk_source, + index = self.sc_params.params.splunk_index, + host = self.sc_params.params.splunk_host, + time = self.sc_event.event.last_check, + event = self.sc_event.event.formated_event + } + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(broker.json_encode(data))) + return true + end + + local http_post_data = "" + + + for _, raw_event in ipairs(data) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.sc_params.params.splunk_token, + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + return true + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + else + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) + return true +end diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua new file mode 100644 index 00000000000..17453f73fd8 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -0,0 +1,294 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "splunk_token", + "splunk_index" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.proxy_address = params.proxy_address + self.sc_params.params.proxy_port = params.proxy_port + self.sc_params.params.proxy_username = params.proxy_username + self.sc_params.params.proxy_password = params.proxy_password + self.sc_params.params.splunk_source = params.splunk_source + self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" + self.sc_params.params.splunk_host = params.splunk_host or "Central" + self.sc_params.params.accetepd_categories = params.accepted_categories or "neb" + self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics-apiv2.log" + self.sc_params.params.log_level = params.log_level or 1 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params:build_accepted_elements_info() + + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_metrics_host() end, + [elements.service_status.id] = function () return self:format_metrics_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + -- can't format event if stream connector is not handling this kind of event + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you can open an issue at https://github.com/centreon/centreon-stream-connector-scripts/issues") + else + self.format_event[category][element]() + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_metrics_host() + self.sc_event.event.formated_event = { + event_type = "host", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + ctime = self.sc_event.event.last_check + } +end + +function EventQueue:format_metrics_service() + self.sc_event.event.formated_event = { + event_type = "service", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + service_description = self.sc_event.event.cache.service.description, + ctime = self.sc_event.event.last_check + } + + for metric_name, metric_data in pairs(self.sc_metrics.metrics) do + self.sc_event.event.formated_event["metric_name:" .. metric_name] = metric_data.value + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = { + sourcetype = self.sc_params.params.splunk_sourcetype, + source = self.sc_params.params.splunk_source, + index = self.sc_params.params.splunk_index, + host = self.sc_params.params.splunk_host, + time = self.sc_event.event.last_check, + fields = self.sc_event.event.formated_event + } + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(broker.json_encode(data))) + return true + end + + local http_post_data = "" + + + for _, raw_event in ipairs(data) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.sc_params.params.splunk_token, + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + -- drop event if wrong category + if not queue.sc_metrics:is_valid_bbdo_element() then + queue.sc_logger:debug("dropping event because category or element is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category]) + .. ". Event element is: " .. queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element]) + return true + end + + -- drop event if its perfdatas aren't valid + if queue.sc_metrics:is_valid_metric_event() then + queue.sc_logger:debug("valid Perfdata?: " .. tostring(queue.sc_event.event.perfdata)) + queue:format_accepted_event() + else + queue.sc_logger:debug("dropping event because metric event wasn't valid. Perfdata: " .. tostring(queue.sc_event.event.perf_data)) + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) + return true +end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua index c7beb0edd97..5de22f98bb8 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua @@ -23,11 +23,10 @@ function bigquery.new(params, sc_logger) -- initiate parameters self.params = params - -- initiate bigquery table schema mapping (1 = neb, 6 = bam) self.schemas = { - [1] = {}, - [6] = {} + [self.params.bbdo.categories.neb.id] = {}, + [self.params.bbdo.categories.bam.id] = {} } setmetatable(self, { __index = BigQuery }) @@ -37,13 +36,16 @@ end --- get_tables_schema: load tables schemas according to the stream connector configuration -- @return true (boolean) function BigQuery:get_tables_schema() + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + -- use default schema if self.params._sc_gbq_use_default_schemas == 1 then - self.schemas[1][14] = self:default_host_table_schema() - self.schemas[1][24] = self:default_service_table_schema() - self.schemas[1][1] = self:default_ack_table_schema() - self.schemas[1][6] = self:default_dt_table_schema() - self.schemas[6][1] = self:default_ba_table_schema() + self.schemas[categories.neb.id][elements.host_status.id] = self:default_host_table_schema() + self.schemas[categories.neb.id][elements.service_status.id] = self:default_service_table_schema() + self.schemas[categories.neb.id][elements.acknowledgement.id] = self:default_ack_table_schema() + self.schemas[categories.neb.id][elements.downtime.id] = self:default_dt_table_schema() + self.schemas[categories.bam.id][elements.ba_status.id] = self:default_ba_table_schema() return true end @@ -57,19 +59,19 @@ function BigQuery:get_tables_schema() -- create tables schemas from stream connector configuration itself (not the best idea) if self.params._sc_gbq_use_default_schemas == 0 and self.params._sc_gbq_use_schema_config_file == 0 then -- build hosts table schema - self:build_table_schema("^_sc_gbq_host_column_", "_sc_gbq_host_column_", self.schemas[1][14]) + self:build_table_schema("^_sc_gbq_host_column_", "_sc_gbq_host_column_", self.schemas[categories.neb.id][elements.host_status.id]) -- build services table schema - self:build_table_schema("^_sc_gbq_service_column_", "_sc_gbq_service_column_", self.schemas[1][24]) + self:build_table_schema("^_sc_gbq_service_column_", "_sc_gbq_service_column_", self.schemas[categories.neb.id][elements.service_status.id]) -- build ba table schema - self:build_table_schema("^_sc_gbq_ba_column_", "_sc_gbq_ba_column_", self.schemas[6][1]) + self:build_table_schema("^_sc_gbq_ba_column_", "_sc_gbq_ba_column_", self.schemas[categories.bam.id][elements.ba_status.id]) -- build ack table schema - self:build_table_schema("^_sc_gbq_ack_column_", "_sc_gbq_ack_column_", self.schemas[1][1]) + self:build_table_schema("^_sc_gbq_ack_column_", "_sc_gbq_ack_column_", self.schemas[categories.neb.id][elements.acknowledgement.id]) -- build dowtime table schema - self:build_table_schema("^_sc_gbq_dt_column_", "_sc_gbq_dt_column_", self.schemas[1][6]) + self:build_table_schema("^_sc_gbq_dt_column_", "_sc_gbq_dt_column_", self.schemas[categories.neb.id][elements.downtime.id]) end return true @@ -183,12 +185,15 @@ function BigQuery:load_tables_schema_file() return false end + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + -- use default schema if we don't find a schema for a dedicated type of event - self.schemas[1][14] = schemas.host or self:default_host_table_schema() - self.schemas[1][24] = schemas.service or self:default_service_table_schema() - self.schemas[1][1] = schemas.ack or self:default_ack_table_schema() - self.schemas[1][6] = schemas.dt or self:default_dt_table_schema() - self.schemas[6][1] = schemas.ba or self:default_ba_table_schema() + self.schemas[categories.neb.id][elements.host_status.id] = schemas.host or self:default_host_table_schema() + self.schemas[categories.neb.id][elements.service_status.id] = schemas.service or self:default_service_table_schema() + self.schemas[categories.neb.id][elements.acknowledgement.id] = schemas.ack or self:default_ack_table_schema() + self.schemas[categories.neb.id][elements.downtime.id] = schemas.dt or self:default_dt_table_schema() + self.schemas[categories.bam.id][elements.ba_status.id] = schemas.ba or self:default_ba_table_schema() return true end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index ddec8981467..496e7916d0a 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -170,4 +170,35 @@ function ScCommon:generate_postfield_param_string(params) return param_string end +--- load_json_file: load a json file +-- @param json_file (string) path to the json file +-- @return true|false (boolean) if json file is valid or not +-- @return content (table) the parsed json +function ScCommon:load_json_file(json_file) + local file = io.open(json_file, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[sc_common:load_json_file]: couldn't open file " + .. tostring(json_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + return false + end + + -- get content of the file + local file_content = file:read("*a") + io.close(file) + + -- parse it + local content = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if (type(content) ~= "table") then + self.sc_logger:error("[sc_common:load_json_file]: file " + .. tostring(json_file) .. ". Is not a valid json file.") + return false + end + + return true, content +end + return sc_common \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index ead7e1e1514..259c44321b1 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -29,7 +29,6 @@ function sc_event.new(event, params, common, logger, broker) self.event.cache = {} setmetatable(self, { __index = ScEvent }) - return self end @@ -68,11 +67,11 @@ function ScEvent:is_valid_event() local is_valid_event = false -- run validation tests depending on the category of the event - if self.event.category == 1 then + if self.event.category == self.params.bbdo.categories.neb.id then is_valid_event = self:is_valid_neb_event() - elseif self.event.category == 3 then + elseif self.event.category == self.params.bbdo.categories.storage.id then is_valid_event = self:is_valid_storage_event() - elseif self.event.category == 6 then + elseif self.event.category == self.params.bbdo.categories.bam.id then is_valid_event = self:is_valid_bam_event() end @@ -85,13 +84,13 @@ function ScEvent:is_valid_neb_event() local is_valid_event = false -- run validation tests depending on the element type of the neb event - if self.event.element == 14 then + if self.event.element == self.params.bbdo.elements.host_status.id then is_valid_event = self:is_valid_host_status_event() - elseif self.event.element == 24 then + elseif self.event.element == self.params.bbdo.elements.service_status.id then is_valid_event = self:is_valid_service_status_event() - elseif self.event.element == 1 then + elseif self.event.element == self.params.bbdo.elements.acknowledgement.id then is_valid_event = self:is_valid_acknowledgement_event() - elseif self.event.element == 5 then + elseif self.event.element == self.params.bbdo.elements.downtime.id then is_valid_event = self:is_valid_downtime_event() end @@ -135,8 +134,7 @@ function ScEvent:is_valid_host_status_event() -- return false if host has not an accepted severity if not self:is_valid_host_severity() then - self.sc_logger:warning("[sc_event:is_valid_host_status_event]: service id: " .. tostring(self.event.service_id) - .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " has not an accepted severity") return false end @@ -325,7 +323,7 @@ function ScEvent:is_valid_event_status(accepted_status_list) end -- handle downtime event specific case for logging - if (self.event.category == 1 and self.event.element == 5) then + if (self.event.category == self.params.bbdo.categories.neb.id and self.event.element == self.params.bbdo.elements.downtime.id) then self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) return false @@ -755,7 +753,7 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if event status is not accepted if not self:is_valid_event_status(event_status) then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) - .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][14][self.event.state])) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.host_status.id][self.event.state])) return false end -- service_id != 0 means ack is on a service @@ -772,7 +770,7 @@ function ScEvent:is_valid_acknowledgement_event() -- return false if event status is not accepted if not self:is_valid_event_status(event_status) then self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) - .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][24][self.event.state])) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.service_status.id][self.event.state])) return false end @@ -836,7 +834,7 @@ function ScEvent:is_valid_downtime_event() -- checks if the current host downtime state is an accpeted status if not self:is_valid_event_status(self.params.dt_host_status) then self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) - .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][14][self.event.state]) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. " Accepted states are: " .. tostring(self.params.dt_host_status)) return false end @@ -853,7 +851,7 @@ function ScEvent:is_valid_downtime_event() -- return false if event status is not accepted if not self:is_valid_event_status(self.params.dt_service_status) then self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service with id: " .. tostring(self.event.service_id) - .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][24][self.event.state]) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. " Accepted states are: " .. tostring(self.params.dt_service_status)) return false end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua new file mode 100644 index 00000000000..df09cdef527 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -0,0 +1,103 @@ +#!/usr/bin/lua + +--- +-- Module that handles data queue for stream connectors +-- @module sc_flush +-- @alias sc_flush +local sc_flush = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +local ScFlush = {} + +--- sc_flush.new: sc_flush constructor +-- @param params (table) the params table of the stream connector +-- @param [opt] sc_logger (object) a sc_logger object +function sc_flush.new(params, logger) + local self = {} + + -- create a default logger if it is not provided + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.params = params + + local os_time = os.time() + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + self.queues = { + [categories.neb.id] = {}, + [categories.storage.id] = {}, + [categories.bam.id] = {} + } + + -- link queue flush info to their respective categories and elements + for element_name, element_info in pairs(self.params.accepted_elements_info) do + self.queues[element_info.category_id][element_info.element_id] = { + flush_date = os_time, + events = {} + } + end + + setmetatable(self, { __index = ScFlush }) + return self +end + +--- flush_all_queues: tries to flush all queues according to accepted elements +-- @param send_method (function) the function from the stream connector that will send the data to the wanted tool +function ScFlush:flush_all_queues(send_method) + self.sc_logger:debug("[sc_flush:flush_all_queues]: Starting to flush all queues") + + -- flush and reset queues of accepted elements + for element_name, element_info in pairs(self.params.accepted_elements_info) do + self:flush_queue(send_method, element_info.category_id, element_info.element_id) + end + + self.sc_logger:debug("[sc_flush:flush_all_queues]: All queues have been flushed") +end + + +--- flush_queue: flush a queue if requirements are met +-- @param send_method (function) the function from the stream connector that will send the data to the wanted tool +-- @param category (number) the category related to the queue +-- @param element (number) the element related to the queue +-- @return true|false (boolean) true if the queue is not flushed and true or false depending the send_method result +function ScFlush:flush_queue(send_method, category, element) + -- no events are stored in the queue + if (#self.queues[category][element].events == 0) then + self.sc_logger:debug("[sc_flush:flush_queue]: queue with category: " .. tostring(category) .. " and element: " + .. tostring(element) .. " won't be flushed because there is no event stored in it.") + return true + end + + local rem = self.params.reverse_element_mapping; + + -- flush if events in the queue are too old or if the queue is full + if (self.queues[category][element].flush_date > self.params.max_buffer_age) + or (#self.queues[category][element].events > self.params.max_buffer_size) + then + self.sc_logger:debug("sc_queue:flush_queue: flushing all the " .. rem[category][element] .. " events") + local retval = send_method(self.queues[category][element].events, rem[category][element]) + + if retval then + self:reset_queue(category, element) + end + else + return true + end + + return retval +end + +--- reset_queue: put a queue back to its initial state after flushing its events +-- @param category (number) the category related to the queue +-- @param element (number) the element related to the queue +function ScFlush:reset_queue(category, element) + self.queues[category][element].flush_date = os.time() + self.queues[category][element].events = {} +end + +return sc_flush \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index cf5629116d4..506fa64c589 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -13,11 +13,11 @@ local ScMacros = {} --- sc_macros.new: sc_macros constructor -- @param params (table) the stream connector parameter table -- @param sc_logger (object) object instance from sc_logger module -function sc_macros.new(params, sc_logger) +function sc_macros.new(params, logger) local self = {} -- initiate mandatory libs - self.sc_logger = sc_logger + self.sc_logger = logger if not self.sc_logger then self.sc_logger = sc_logger.new() end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua new file mode 100644 index 00000000000..e6574c47367 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -0,0 +1,250 @@ +#!/usr/bin/lua + +--- +-- Module that handles event metrics for stream connectors +-- @module sc_metrics +-- @alias sc_metrics +local sc_metrics = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +local ScMetrics = {} + +--- sc_metrics.new: sc_metrics constructor +-- @param event (table) the current event +-- @param params (table) the params table of the stream connector +-- @param common (object) a sc_common instance +-- @param broker (object) a sc_broker instance +-- @param [opt] sc_logger (object) a sc_logger instance +function sc_metrics.new(event, params, common, broker, logger) + self = {} + + -- create a default logger if it is not provided + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.sc_common = common + self.params = params + self.sc_broker = broker + + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + -- store metric validation functions inside a table linked to category/element + self.metric_validation = { + [categories.neb.id] = { + [elements.host.id] = function () return self:is_valid_host_metric_event() end, + [elements.host_status.id] = function() return self:is_valid_host_metric_event() end, + [elements.service.id] = function () return self:is_valid_service_metric_event() end, + [elements.service_status.id] = function () return self:is_valid_service_metric_event() end + }, + [categories.bam.id] = { + [elements.kpi_event.id] = function () return self:is_valid_kpi_metric_event() end + } + } + + -- initiate metrics table + self.metrics = {} + -- initiate sc_event object + self.sc_event = sc_event.new(event, self.params, self.sc_common, self.sc_logger, self.sc_broker) + + setmetatable(self, { __index = ScMetrics }) + return self +end + +--- is_valid_bbdo_element: checks if the event category and element are valid according to parameters and bbdo protocol +-- @return true|false (boolean) depending on the validity of the event category and element +function ScMetrics:is_valid_bbdo_element() + -- initiate variables with shorter name + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + local event_category = self.sc_event.event.category + local event_element = self.sc_event.event.element + -- self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element]: event cat: " .. tostring(event_category) .. ". Event element: " .. tostring(event_element)) + + -- drop event if event category is not accepted + if not self.sc_event:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, event_category) then + return false + else + -- drop event if accepted category is not supposed to be used for a metric stream connector + if event_category ~= categories.neb.id and event_category ~= categories.bam.id then + self.sc_logger:warning("[sc_metrics:is_valid_bbdo_element] Configuration error. accepted categories from paramters are: " + .. tostring(self.params.accepted_categories) .. ". Only bam and neb can be used for metrics") + return false + else + -- drop event if element is not accepted + if not self.sc_event:find_in_mapping(self.params.element_mapping[event_category], self.params.accepted_elements, event_element) then + return false + else + -- drop event if element is not an element that carries perfdata + if event_element ~= elements.host.id + and event_element ~= elements.host_status.id + and event_element ~= elements.service.id + and event_element ~= elements.service_status.id + and event_element ~= elements.kpi_event.id + then + self.sc_logger:warning("[sc_metrics:is_valid_bbdo_element] Configuration error. accepted elements from paramters are: " + .. tostring(self.params.accepted_elements) .. ". Only host, host_status, service, service_status and kpi_event can be used for metrics") + return false + end + end + end + + return true + end +end + +--- is_valid_metric_event: makes sure that the event is a valid event for metric usage +-- @return true|false (boolean) depending on the validity of the metric event +function ScMetrics:is_valid_metric_event() + category = self.sc_event.event.category + element = self.sc_event.event.element + + self.sc_logger:debug("[sc_metrics:is_valid_metric_event]: starting validation for event with category: " + .. tostring(category) .. ". And element: " .. tostring(element)) + return self.metric_validation[category][element]() +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the host are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_host_metric_event() + -- return false if we can't get hostname or host id is nil + if not self.sc_event:is_valid_host() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self.sc_event:is_valid_poller() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self.sc_event:is_valid_host_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self.sc_event:is_valid_hostgroup() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not in an accepted hostgroup") + return false + end + + -- return false if there is no perfdata or they it can't be parsed + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_vaild_host_metric_event]: host_id: " + .. tostring(self.sc_event.event.host_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perf_data)) + return false + end + + return true +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the service are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_service_metric_event() + -- return false if we can't get hostname or host id is nil + if not self.sc_event:is_valid_host() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if we can't get service description of service id is nil + if not self.sc_event:is_valid_service() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service with id: " .. tostring(self.sc_event.event.service_id) .. " hasn't been validated") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self.sc_event:is_valid_poller() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self.sc_event:is_valid_host_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Host has not an accepted severity") + return false + end + + -- return false if service has not an accepted severity + if not self.sc_event:is_valid_service_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self.sc_event:is_valid_hostgroup() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.sc_event.event.host_id)) + return false + end + + -- return false if service is not in an accepted servicegroup + if not self.sc_event:is_valid_servicegroup() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) .. " is not in an accepted servicegroup") + return false + end + + -- return false if there is no perfdata or they it can't be parsed + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " + .. tostring(self.sc_event.event.service_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perfdata)) + return false + end + + return true +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the KPI are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_kpi_metric_event() + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_vaild_kpi_metric_event]: kpi_id: " + .. tostring(self.sc_event.event.kpi_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perf_data)) + return false + end + + return true +end + +--- is_valid_perfdata: makes sure that the perfdata string is a valid one +-- @param perfdata (string) a string that contains perfdata +-- @return true|false (boolean) depending on the validity of the perfdata +function ScMetrics:is_valid_perfdata(perfdata) + -- drop event if perfdata is nil or empty + if not perfdata or perfdata == "" then + return false + end + + -- parse perfdata + local metrics_info, error = broker.parse_perfdata(perfdata, true) + + -- drop event if parsing failed + if not metrics_info then + self.sc_logger:error("[sc_metrics:is_valid_perfdata]: couldn't parse perfdata. Error is: " + .. tostring(error) .. ". Perfdata string is: " .. tostring(perfdata)) + return false + end + + -- store data from parsed perfdata inside a metrics table + for metric_name, metric_data in pairs(metrics_info) do + self.metrics[metric_name] = metric_data + self.metrics[metric_name].name = metric_name + end + + return true +end + +return sc_metrics \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index fec7ef2e816..88c0c6cdda7 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -68,16 +68,21 @@ function sc_params.new(common, logger) max_buffer_size = 1, max_buffer_age = 5, + -- event formatting parameters + format_file = "", + -- time parameters local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), timestamp_conversion_format = "%Y-%m-%d %X", -- will print 2021-06-11 10:43:38 -- internal parameters __internal_ts_last_flush = os.time(), + + -- testing parameters + send_data_test = 0, -- initiate mappings element_mapping = {}, - category_mapping = {}, status_mapping = {}, state_type_mapping = { [0] = "SOFT", @@ -89,132 +94,492 @@ function sc_params.new(common, logger) max_stored_events = 10 -- do not use values above 100 } - -- maps category id and name - self.params.category_mapping = { - neb = 1, - bbdo = 2, - storage = 3, - correlation = 4, - dumper = 5, - bam = 6, - extcmd = 7 + -- maps categories name and id + self.params.bbdo = { + categories = { + neb = { + id = 1, + name = "neb" + }, + storage = { + id = 3, + name = "storage" + }, + bam = { + id = 6, + name = "bam" + } + } + } + + local categories = self.params.bbdo.categories + self.params.bbdo.elements = { + acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 1, + name = "acknowledgement" + }, + comment = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 2, + name = "comment" + }, + custom_variable = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 3, + name = "custom_variable" + }, + custom_variable_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 4, + name = "custom_variable_status" + }, + downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 5, + name = "downtime" + }, + event_handler = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 6, + name = "event_handler" + }, + flapping_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 7, + name = "flapping_status" + }, + host_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 8, + name = "host_check" + }, + host_dependency = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 9, + name = "host_dependency" + }, + host_group = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 10, + name = "host_group" + }, + host_group_member = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 11, + name = "host_group_member" + }, + host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 12, + name = "host" + }, + host_parent = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 13, + name = "host_parent" + }, + host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 14, + name = "host_status" + }, + instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 15, + name = "instance" + }, + instance_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 16, + name = "instance_status" + }, + log_entry = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 17, + name = "log_entry" + }, + module = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 18, + name = "module" + }, + service_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 19, + name = "service_check" + }, + service_dependency = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 20, + name = "service_dependency" + }, + service_group = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 21, + name = "service_group" + }, + service_group_member = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 22, + name = "service_group_member" + }, + service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 23, + name = "service" + }, + service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 24, + name = "service_status" + }, + instance_configuration = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 25, + name = "instance_configuration" + }, + metric = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 1, + name = "metric" + }, + rebuild = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 2, + name = "rebuild" + }, + remove_graph = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 3, + name = "remove_graph" + }, + status = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 4, + name = "status" + }, + index_mapping = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 5, + name = "index_mapping" + }, + metric_mapping = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 6, + name = "metric_mapping" + }, + ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 1, + name = "ba_status" + }, + kpi_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 2, + name = "kpi_status" + }, + meta_service_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 3, + name = "meta_service_status" + }, + ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 4, + name = "ba_event" + }, + kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 5, + name = "kpi_event" + }, + ba_duration_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 6, + name = "ba_duration_event" + }, + dimension_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 7, + name = "dimension_ba_event" + }, + dimension_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 8, + name = "dimension_kpi_event" + }, + dimension_ba_bv_relation_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 9, + name = "dimension_ba_bv_relation_event" + }, + dimension_bv_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 10, + name = "dimension_bv_event" + }, + dimension_truncate_table_signal = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 11, + name = "dimension_truncate_table_signal" + }, + bam_rebuild = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 12, + name = "bam_rebuild" + }, + dimension_timeperiod = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 13, + name = "dimension_timeperiod" + }, + dimension_ba_timeperiod_relation = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 14, + name = "dimension_ba_timeperiod_relation" + }, + dimension_timeperiod_exception = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 15, + name = "dimension_timeperiod_exception" + }, + dimension_timeperiod_exclusion = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 16, + name = "dimension_timeperiod_exclusion" + }, + inherited_downtime = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 17, + name = "inherited_downtime" + } } + local elements = self.params.bbdo.elements + -- initiate category and element mapping self.params.element_mapping = { - [1] = {}, - [3] = {}, - [6] = {} + [categories.neb.id] = {}, + [categories.storage.id] = {}, + [categories.bam.id] = {} } -- maps category id with element name and element id -- neb elements - self.params.element_mapping[1].acknowledgement = 1 - self.params.element_mapping[1].comment = 2 - self.params.element_mapping[1].custom_variable = 3 - self.params.element_mapping[1].custom_variable_status = 4 - self.params.element_mapping[1].downtime = 5 - self.params.element_mapping[1].event_handler = 6 - self.params.element_mapping[1].flapping_status = 7 - self.params.element_mapping[1].host_check = 8 - self.params.element_mapping[1].host_dependency = 9 - self.params.element_mapping[1].host_group = 10 - self.params.element_mapping[1].host_group_member = 11 - self.params.element_mapping[1].host = 12 - self.params.element_mapping[1].host_parent = 13 - self.params.element_mapping[1].host_status = 14 - self.params.element_mapping[1].instance = 15 - self.params.element_mapping[1].instance_status = 16 - self.params.element_mapping[1].log_entry = 17 - self.params.element_mapping[1].module = 18 - self.params.element_mapping[1].service_check = 19 - self.params.element_mapping[1].service_dependency = 20 - self.params.element_mapping[1].service_group = 21 - self.params.element_mapping[1].service_group_member = 22 - self.params.element_mapping[1].service = 23 - self.params.element_mapping[1].service_status = 24 - self.params.element_mapping[1].instance_configuration = 25 + self.params.element_mapping[categories.neb.id].acknowledgement = elements.acknowledgement.id + self.params.element_mapping[categories.neb.id].comment = elements.comment.id + self.params.element_mapping[categories.neb.id].custom_variable = elements.custom_variable.id + self.params.element_mapping[categories.neb.id].custom_variable_status = elements.custom_variable_status.id + self.params.element_mapping[categories.neb.id].downtime = elements.downtime.id + self.params.element_mapping[categories.neb.id].event_handler = elements.event_handler.id + self.params.element_mapping[categories.neb.id].flapping_status = elements.flapping_status.id + self.params.element_mapping[categories.neb.id].host_check = elements.host_check.id + self.params.element_mapping[categories.neb.id].host_dependency = elements.host_dependency.id + self.params.element_mapping[categories.neb.id].host_group = elements.host_group.id + self.params.element_mapping[categories.neb.id].host_group_member = elements.host_group_member.id + self.params.element_mapping[categories.neb.id].host = elements.host.id + self.params.element_mapping[categories.neb.id].host_parent = elements.host_parent.id + self.params.element_mapping[categories.neb.id].host_status = elements.host_status.id + self.params.element_mapping[categories.neb.id].instance = elements.instance.id + self.params.element_mapping[categories.neb.id].instance_status = elements.instance_status.id + self.params.element_mapping[categories.neb.id].log_entry = elements.log_entry.id + self.params.element_mapping[categories.neb.id].module = elements.module.id + self.params.element_mapping[categories.neb.id].service_check = elements.service_check.id + self.params.element_mapping[categories.neb.id].service_dependency = elements.service_dependency.id + self.params.element_mapping[categories.neb.id].service_group = elements.service_group.id + self.params.element_mapping[categories.neb.id].service_group_member = elements.service_group_member.id + self.params.element_mapping[categories.neb.id].service = elements.service.id + self.params.element_mapping[categories.neb.id].service_status = elements.service_status.id + self.params.element_mapping[categories.neb.id].instance_configuration = elements.instance_configuration.id -- metric elements mapping - self.params.element_mapping[3].metric = 1 - self.params.element_mapping[3].rebuild = 2 - self.params.element_mapping[3].remove_graph = 3 - self.params.element_mapping[3].status = 4 - self.params.element_mapping[3].index_mapping = 5 - self.params.element_mapping[3].metric_mapping = 6 + self.params.element_mapping[categories.storage.id].metric = elements.metric.id + self.params.element_mapping[categories.storage.id].rebuild = elements.rebuild.id + self.params.element_mapping[categories.storage.id].remove_graph = elements.remove_graph.id + self.params.element_mapping[categories.storage.id].status = elements.status.id + self.params.element_mapping[categories.storage.id].index_mapping = elements.index_mapping.id + self.params.element_mapping[categories.storage.id].metric_mapping = elements.metric_mapping.id -- bam elements mapping - self.params.element_mapping[6].ba_status = 1 - self.params.element_mapping[6].kpi_status = 2 - self.params.element_mapping[6].meta_service_status = 3 - self.params.element_mapping[6].ba_event = 4 - self.params.element_mapping[6].kpi_event = 5 - self.params.element_mapping[6].ba_duration_event = 6 - self.params.element_mapping[6].dimension_ba_event = 7 - self.params.element_mapping[6].dimension_kpi_event = 8 - self.params.element_mapping[6].dimension_ba_bv_relation_event = 9 - self.params.element_mapping[6].dimension_bv_event = 10 - self.params.element_mapping[6].dimension_truncate_table_signal = 11 - self.params.element_mapping[6].bam_rebuild = 12 - self.params.element_mapping[6].dimension_timeperiod = 13 - self.params.element_mapping[6].dimension_ba_timeperiod_relation = 14 - self.params.element_mapping[6].dimension_timeperiod_exception = 15 - self.params.element_mapping[6].dimension_timeperiod_exclusion = 16 - self.params.element_mapping[6].inherited_downtime = 17 - - -- initiate category and status mapping - self.params.status_mapping = { - [1] = {}, - [3] = {}, - [6] = {} - } - - -- maps neb category statuses with host status element - self.params.status_mapping[1][14] = { - [0] = "UP", - [1] = "DOWN", - [2] = "UNREACHABLE" + self.params.element_mapping[categories.bam.id].ba_status = elements.ba_status.id + self.params.element_mapping[categories.bam.id].kpi_status = elements.kpi_status.id + self.params.element_mapping[categories.bam.id].meta_service_status = elements.meta_service_status.id + self.params.element_mapping[categories.bam.id].ba_event = elements.ba_event.id + self.params.element_mapping[categories.bam.id].kpi_event = elements.kpi_event.id + self.params.element_mapping[categories.bam.id].ba_duration_event = elements.ba_duration_event.id + self.params.element_mapping[categories.bam.id].dimension_ba_event = elements.dimension_ba_event.id + self.params.element_mapping[categories.bam.id].dimension_kpi_event = elements.dimension_kpi_event.id + self.params.element_mapping[categories.bam.id].dimension_ba_bv_relation_event = elements.dimension_ba_bv_relation_event.id + self.params.element_mapping[categories.bam.id].dimension_bv_event = elements.dimension_bv_event.id + self.params.element_mapping[categories.bam.id].dimension_truncate_table_signal = elements.dimension_truncate_table_signal.id + self.params.element_mapping[categories.bam.id].bam_rebuild = elements.bam_rebuild.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod = elements.dimension_timeperiod.id + self.params.element_mapping[categories.bam.id].dimension_ba_timeperiod_relation = elements.dimension_ba_timeperiod_relation.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod_exception = elements.dimension_timeperiod_exception.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod_exclusion = elements.dimension_timeperiod_exclusion.id + self.params.element_mapping[categories.bam.id].inherited_downtime = elements.inherited_downtime.id + + self.params.reverse_element_mapping = { + [categories.neb.id] = { + [elements.acknowledgement.id] = "acknowledgement", + [elements.comment.id] = "comment", + [elements.custom_variable.id] = "custom_variable", + [elements.custom_variable_status.id] = "custom_variable_status", + [elements.downtime.id] = "downtime", + [elements.event_handler.id] = "event_handler", + [elements.flapping_status.id] = "flapping_status", + [elements.host_check.id] = "host_check", + [elements.host_dependency.id] = "host_dependency", + [elements.host_group.id] = "host_group", + [elements.host_group_member.id] = "host_group_member", + [elements.host.id] = "host", + [elements.host_parent.id] = "host_parent", + [elements.host_status.id] = "host_status", + [elements.instance.id] = "instance", + [elements.instance_status.id] = "instance_status", + [elements.log_entry.id] = "log_entry", + [elements.module.id] = "module", + [elements.service_check.id] = "service_check", + [elements.service_dependency.id] = "service_dependency", + [elements.service_group.id] = "service_group", + [elements.service_group_member.id] = "service_group_member", + [elements.service.id] = "service", + [elements.service_status.id] = "service_status", + [elements.instance_configuration.id] = "instance_configuration" + }, + [categories.storage.id] = { + [elements.metric.id] = "metric", + [elements.rebuild.id] = "rebuild", + [elements.remove_graph.id] = "remove_graph", + [elements.status.id] = "status", + [elements.index_mapping.id] = "index_mapping", + [elements.metric_mapping.id] = "metric_mapping" + }, + [categories.bam.id] = { + [elements.ba_status.id] = "ba_status", + [elements.kpi_status.id] = "kpi_status", + [elements.meta_service_status.id] = "meta_service_status", + [elements.ba_event.id] = "ba_event", + [elements.kpi_event.id] = "kpi_event", + [elements.ba_duration_event.id] = "ba_duration_event", + [elements.dimension_ba_event.id] = "dimension_ba_event", + [elements.dimension_kpi_event.id] = "dimension_kpi_event", + [elements.dimension_ba_bv_relation_event.id] = "dimension_ba_bv_relation_event", + [elements.dimension_bv_event.id] = "dimension_bv_event", + [elements.dimension_truncate_table_signal.id] = "dimension_truncate_table_signal", + [elements.bam_rebuild.id] = "bam_rebuild", + [elements.dimension_timeperiod.id] = "dimension_timeperiod", + [elements.dimension_ba_timeperiod_relation.id] = "dimension_ba_timeperiod_relation", + [elements.dimension_timeperiod_exception.id] = "dimension_timeperiod_exception", + [elements.dimension_timeperiod_exclusion.id] = "dimension_timeperiod_exclusion", + [elements.inherited_downtime.id] = "inherited_downtime" + } } - -- maps neb category statuses with service status element - self.params.status_mapping[1][24] = { - [0] = "OK", - [1] = "WARNING", - [2] = "CRITICAL", - [3] = "UNKNOWN" + self.params.reverse_category_mapping = { + [categories.neb.id] = categories.neb.name, + [2] = "bbdo", + [categories.storage.id] = categories.storage.id, + [4] = "correlation", + [5] = "dumper", + [categories.bam.id] = categories.bam.name, + [7] = "extcmd" } - -- maps bam category statuses with ba status element - self.params.status_mapping[6][1] = { - [0] = "OK", - [1] = "WARNING", - [2] = "CRITICAL" + self.params.category_mapping = { + [categories.neb.name] = categories.neb.id, + bbdo = 2, + [categories.storage.name] = categories.storage.id, + correlation = 4, + dumper = 5, + [categories.bam.name] = categories.bam.id, + extcmd = 7 } - -- map downtime category statuses - self.params.status_mapping[1][5] = { - [1] = {}, - [2] = {} + -- initiate category and status mapping + self.params.status_mapping = { + [categories.neb.id] = { + [elements.downtime.id] = { + [1] = {}, + [2] = {} + }, + [elements.host_status.id] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + }, + [elements.service_status.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" + } + }, + [categories.bam.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL" + } } - -- service downtime mapping - self.params.status_mapping[1][5][1] = { - [0] = "OK", - [1] = "WARNING", - [2] = "CRITICAL", - [3] = "UNKNOWN" - } - - -- host donwtime mapping - self.params.status_mapping[1][5][2] = { - [0] = "UP", - [1] = "DOWN", - [2] = "UNREACHABLE" + self.params.format_template = { + [categories.neb.id] = {}, + [categories.bam.id] = {} } + self.params.status_mapping[categories.neb.id][elements.downtime.id][1] = self.params.status_mapping[categories.neb.id][elements.service_status.id] + self.params.status_mapping[categories.neb.id][elements.downtime.id][2] = self.params.status_mapping[categories.neb.id][elements.host_status.id] setmetatable(self, { __index = ScParams }) - return self end @@ -258,6 +623,7 @@ function ScParams:check_params() self.params.dt_service_status = self.common:ifnil_or_empty(self.params.dt_service_status,self.params.service_status) self.params.enable_host_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_host_status_dedup, 0) self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) + self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table @@ -294,4 +660,60 @@ function ScParams:is_mandatory_config_set(mandatory_params, params) return true end +--- load_event_format_file: load a json file which purpose is to serve as a template to format events +-- @return true|false (boolean) if file is valid template file or not +function ScParams:load_event_format_file() + if self.params.format_file == "" or self.params.format_file == nil then + return false + end + + local retval, content = self.common:load_json_file(self.params.format_file) + + if not retval then + return false + end + + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + local tpl_category + local tpl_element + + -- store format template in their appropriate category/element table + for cat_el, format in pairs(content) do + tpl_category, tpl_element = string.match(cat_el, "^(%w+)_(.*)") + self.params.format_template[categories[tpl_category].id][elements[tpl_element].id] = format + end + + return true +end + +function ScParams:build_accepted_elements_info() + categories = self.params.bbdo.categories + self.params.accepted_elements_info = {} + + -- list all accepted elements + for _, accepted_element in ipairs(self.common:split(self.params.accepted_elements, ",")) do + self.logger:debug("[sc_params:build_accetped_elements_info]: accepted element: " .. tostring(accepted_element)) + -- try to find element in known categories + for category_name, category_info in pairs(categories) do + self.logger:debug("[sc_params:build_accetped_elements_info]: category id: " .. tostring(category_info.id)) + for i, v in pairs(self.params.element_mapping) do + self.logger:debug("[sc_params:build_accepted_elements_info]: mapping: " .. tostring(i) .. " value: " .. tostring(v)) + end + + if self.params.element_mapping[category_info.id][accepted_element] then + -- if found, store information in a dedicated table + self.logger:debug("[sc_params:build_accetped_elements_info] dans le param setup: " .. tostring(self.params.element_mapping[category_info.id][accepted_element])) + self.params.accepted_elements_info[accepted_element] = { + category_id = category_info.id, + category_name = category_name, + element_id = self.params.element_mapping[category_info.id][accepted_element], + element_name = accepted_element + } + end + end + end +end + return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 4e6b949f80b..dcd883c8639 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -8,6 +8,8 @@ - [sc_param methods](#sc_param-methods) - [sc_event methods](#sc_event-methods) - [sc_macros methods](#sc_macros-methods) + - [sc_flush methods](#sc_flush-methods) + - [sc_metrics methods](#sc_metrics-methods) - [google.bigquery.bigquery methods](#googlebigquerybigquery-methods) - [google.auth.oauth methods](#googleauthoauth-methods) @@ -21,6 +23,8 @@ | sc_param | handles parameters for stream connectors | when you want to initiate a stream connector with all standard parameters | [Documentation](sc_param.md) | | sc_event | methods to help you interact with a broker event | when you want to check event data | [Documentation](sc_event.md) | | sc_macros | methods to help you convert macros | when you want to use macros in your stream connector | [Documentation](sc_macros.md) | +| sc_flush | methods to help you handle queues of event | when you want to flush queues of various kind of events | [Documentation](sc_flush.md) | +| sc_metrics | methods to help you handle metrics | when you want to send metrics and not just events | [Documentation](sc_metrics.md) | | google.bigquery.bigquery | methods to help you handle bigquery data | when you want to generate tables schema for bigquery | [Documentation](google/bigquery/bigquery.md) | | google.auth.oauth | methods to help you authenticate to google api | when you want to authenticate yourself on the google api | [Documentation](google/auth/oauth.md) | @@ -35,6 +39,7 @@ | split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | | compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | | generate_postfield_param_string | convert a table of parameters into an url encoded parameters string | [Documentation](sc_common.md#generate_postfield_param_string-method) | +| load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | ## sc_logger methods @@ -68,7 +73,8 @@ | param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | | check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | | is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | -| get_kafka_params | retrive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | +| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | +| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | ## sc_event methods @@ -127,6 +133,25 @@ | transform_type | convert 0 or 1 into SOFT or HARD | [Documentation](sc_macros.md#transform_type-method) | | transform_state | convert a status code into its matching human readable status (OK, WARNING...) | [Documentation](sc_macros.md#transform_state-method) | +## sc_flush methods + +| Method name | Method description | Link | +| ---------------- | ------------------------------------------------- | ---------------------------------------------------- | +| flush_all_queues | flush all the possible queues that can be created | [Documentation](sc_flush.md#flush_all_queues-method) | +| flush_queue | flush a specific queue | [Documentation](sc_flush.md#flush_queue-method) | +| reset_queue | reset a queue after it has been flushed | [Documentation](sc_flush.md#reset_queue-method) | + +## sc_metrics methods + +| Method name | Method description | Link | +| ----------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------- | +| is_valid_bbdo_element | checks if the event is in an accepted category and is an appropriate element | [Documentation](sc_metrics.md#is_valid_bbdo_element-method) | +| is_valid_metric_event | makes sure that the metric event is valid if it is a **host, service, service_status or kpi_event** event | [Documentation](sc_metrics.md#is_valid_metric_event-method) | +| is_valid_host_metric_event | makes sure that the metric event is valid host metric event | [Documentation](sc_metrics.md#is_valid_host_metric_event-method) | +| is_valid_service_metric_event | makes sure that the metric event is valid service metric event | [Documentation](sc_metrics.md#is_valid_service_metric_event-method) | +| is_valid_kpi_metric_event | makes sure that the metric event is valid KPI metric event | [Documentation](sc_metrics.md#is_valid_kpi_metric_event-method) | +| is_valid_perfdata | makes sure that the performance data is valid | [Documentation](sc_metrics.md#is_valid_perfdata-method) | + ## google.bigquery.bigquery methods | Method name | Method description | Link | diff --git a/stream-connectors/modules/docs/mappings.md b/stream-connectors/modules/docs/mappings.md new file mode 100644 index 00000000000..a820c1a07dd --- /dev/null +++ b/stream-connectors/modules/docs/mappings.md @@ -0,0 +1,164 @@ +# mappings documentation + +- [mappings documentation](#mappings-documentation) + - [Introduction](#introduction) + - [Categories](#categories) + - [get category ID from name](#get-category-id-from-name) + - [get category name from ID](#get-category-name-from-id) + - [Elements](#elements) + - [get element ID from name](#get-element-id-from-name) + - [get element name from ID](#get-element-name-from-id) + - [get the category ID from an element name](#get-the-category-id-from-an-element-name) + - [get the category name from an element name](#get-the-category-name-from-an-element-name) + - [get the element ID from a category ID and an element name](#get-the-element-id-from-a-category-id-and-an-element-name) + - [States](#states) + - [get state type name from state type ID](#get-state-type-name-from-state-type-id) + - [get state name from category ID, element ID and state ID](#get-state-name-from-category-id-element-id-and-state-id) + - [Tips and tricks](#tips-and-tricks) + +## Introduction + +Every mappings is made available trough a params table that is only available if you have created an instance of the sc_params module. To create such instance, head over [**the sc_params documentation**](sc_param.md#Module-initialization) + +## Categories + +Each event is linked to a category. To help you work with that, there are a bunch of mappings available + +### get category ID from name + +To get the ID of a category based on its name, you can use the following mapping table + +```lua +-- get the id of the neb category +local category_id = params_object.params.bbdo.categories["neb"].id +--> category_id is 1 + +-- you can also get its name from this table but it shouldn't be very useful +local category_name = params_object.params.bbdo.categories["neb"].name +``` + +### get category name from ID + +To get the name of a category based on its ID, you can use the following mapping table + +```lua +-- get the name of the category 6 +local category_name = params_object.params.reverse_category_mapping[6] +--> category_name is "bam" +``` + +## Elements + +Each event is linked to an element. To help you work with that, there are a bunch of mappings available + +### get element ID from name + +```lua +-- get the ID of the element host_status +local element_id = params_object.params.bbdo.elements["host_status"].id +--> element_id is 14 + +-- you can also get its name from this table but it shouldn't be very useful +local element_name = params_object.params.bbdo.elements["host_status"].name +--> element_name is "host_status" +``` + +### get element name from ID + +You can't get a element name from its ID only. You must have its category too. For example, there are many elements that shares the ID 1. Because each category has its own elements and their ID start at 1. +For example, the **acknowledgement** element and the **ba_status** element have 1 as an element ID. The first element is part of the **neb category**, the second one is part of the **bam category** + +```lua +-- category is neb +local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +-- element is service_status +local element_id = 24 + +local element_name = params_object.params.reverse_element_mapping[category_id][element_id] +--> element_name is "service_status" +``` + +### get the category ID from an element name + +```lua +local category_id = params_object.params.bbdo.elements["host_status"].category_id +--> category_id is 1 +``` + +### get the category name from an element name + +```lua +local category_name = params_object.params.bbdo.elements["host_status"].category_name +--> category_name is neb +``` + +### get the element ID from a category ID and an element name + +This one is a bit redundant with the [**get the category ID from an element name**](#get-the-category-ID-from-an-element-name) mapping. It should be deprecated but in a world where two elements from different categories could share the same name, it is better to keep this possibility + +```lua +local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +local element_name = "host_status" + +local element_id = params_object.params.element_mapping[category_id][element_name] +--> element_id is 14 +``` + +## States + +### get state type name from state type ID + +```lua +local state_type_name = params_object.state_type_mapping[0] +--> state_type_name is "SOFT" + +state_type_name = params_object.state_type_mapping[1] +--> state_type_name is "HARD" +``` + +### get state name from category ID, element ID and state ID + +```lua +local category_id = local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +local element_id = params_object.params.bbdo.elements["host_status"].id -- it is better to use the mapping instead of hard coding the ID if you know it. + +local state_name = params_object.params.status_mapping[category_id][element_id][1] +--> state_name is "DOWN" +``` + +## Tips and tricks + +- When you want to use the ID of the neb category for example + +```lua +-- ✘ bad +local neb_category_id = 1 + +-- ✓ good +local neb_category_id = params_object.params.bbdo.categories.neb.id +``` + +- When you want to use the ID of the host_status element for example + +```lua +-- ✘ bad +local host_status_element_id = 14 + +-- ✓ good +local host_status_element_id = params_object.params.bbdo.elements.host_status.id +``` + +- When working on a downtime event, you can get the human readable state using a hidden mapping table. Because this event is shared between services and hosts you don't know if the ID 1 means DOWN or WARNING + +```lua +local categories = params_object.params.bbdo.categories +local elements = params_object.params.bbdo.elements + +-- 2 = host, 1 is the ID code of the state +local host_state_downtime = params.status_mapping[categories.neb.id][elements.downtime.id][2][1] +--> host_state_downtime is "DOWN" + +-- 1 = service, 2 is the ID code the state +local service_state_downtime = params.status_mapping[categories.neb.id][elements.downtime.id][1][2] +--> service_state_downtime is "CRITICAL" +``` diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index a12ba1e7a0b..35a5970504d 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -33,6 +33,10 @@ - [generate_postfield_param_string: parameters](#generate_postfield_param_string-parameters) - [generate_postfield_param_string: returns](#generate_postfield_param_string-returns) - [generate_postfield_param_string: example](#generate_postfield_param_string-example) + - [load_json_file method](#load_json_file-method) + - [load_json_file: parameters](#load_json_file-parameters) + - [load_json_file: returns](#load_json_file-returns) + - [load_json_file: example](#load_json_file-example) ## Introduction @@ -305,3 +309,33 @@ local param_table = { local result = test_common:generate_postfield_param_string(param_table) --> result is "key=321Xzd&option=full&name=John%20Doe" ``` + +## load_json_file method + +The **load_json_file** method loads a json file and parse it. + +### load_json_file: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------- | ------ | -------- | ------------- | +| the path to the json file (must be readable by centreon-broker) | string | no | | + +### load_json_file: returns + +| return | type | always | condition | +| ------------------------------ | ----- | ------- | -------------------------- | +| true | false | boolean | yes | false if the json file couldn't be loaded or parsed, true otherwise | +| the parsed content of the json | table | no | only when true is returned | + +### load_json_file: example + +```lua +local json_file = "/etc/centreon-broker/sc_config.json" + +local result, content = test_common:load_json_file(json_file) +--> result is true, content is a table + +json_file = 3 +result, content = test_common:load_json_file(json_file) +--> result is false, content is nil +``` diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 7a520a53ec3..1df2f57b2c8 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -124,7 +124,7 @@ ## Introduction -The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below). It has been made in OOP (object oriented programming) +The sc_event module provides methods to help you handle events for your stream connectors. It has been made in OOP (object oriented programming) ## Module initialization @@ -132,7 +132,7 @@ Since this is OOP, it is required to initiate your module. ### module constructor -Constructor must be initialized with two parameters +Constructor must be initialized with 5 parameters - an event table - a params table diff --git a/stream-connectors/modules/docs/sc_flush.md b/stream-connectors/modules/docs/sc_flush.md new file mode 100644 index 00000000000..266f8a2f486 --- /dev/null +++ b/stream-connectors/modules/docs/sc_flush.md @@ -0,0 +1,168 @@ +# Documentation of the sc_flush module + +- [Documentation of the sc_flush module](#documentation-of-the-sc_flush-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [flush_all_queues method](#flush_all_queues-method) + - [flush_all_queues: parameters](#flush_all_queues-parameters) + - [flush_all_queues: example](#flush_all_queues-example) + - [flush_queue method](#flush_queue-method) + - [flush_queue: parameters](#flush_queue-parameters) + - [flush_queue: returns](#flush_queue-returns) + - [flush_queue: example](#flush_queue-example) + - [reset_queue method](#reset_queue-method) + - [reset_queue: parameters](#reset_queue-parameters) + - [reset_queue: example](#reset_queue-example) + +## Introduction + +The sc_flush module provides methods to help handling queues of events in stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameter if the second one is not provided it will use a default value. + +- params. This is the table of all stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +local params = { + param_A = "value A", + param_B = "value B" +} + +-- create a new instance of the sc_common module +local test_flush = sc_flush.new(params, test_logger) +``` + +## flush_all_queues method + +The **flush_all_queues** method tries to flush all the possible queues that can be created. It flushes queues according to the [**accepted_elements, max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) + +head over the following chapters for more information + +- [flush_queue](#flush_queue-method) + +### flush_all_queues: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_all_queues: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local function send_data() + -- send data somewhere +end + +test_flush:flush_all_queues(send_data) +--> host_status and service_status are flushed if it is possible +``` + +## flush_queue method + +The **flush_queue** method tries to flush a specific queue. It flushes a queue according to the [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) + +head over the following chapters for more information + +- [reset_queue](#reset_queue-method) + +### flush_queue: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the category of the queue that we need to flush | number | no | | +| the element of the queue that we need to flush | number | no | | + +### flush_queue: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_queue: example + +```lua + +local function send_data() + -- send data somewhere +end + +-- fill a host_status queue with 2 events for the example +test_flush.queues[1][14].events = { + [1] = "first event", + [2] = "second event" +} + +local result = test_flush:flush_queue(send_data, 1, 14) +--> result is true + +-- initiate a empty queue for service_status events +test_.queues[1][24].events = {} + +result = test_flush:flush_queue(send_data, 1, 24) +--> result is false because buffer size is 0 +``` + +## reset_queue method + +The **reset_queue** reset a queue after it has been flushed + +### reset_queue: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | +| the category of the queue that we need to reset | number | no | | +| the element of the queue that we need to reset| number | no | | + +### reset_queue: example + +```lua + +local function send_data() + -- send data somewhere +end + +-- fill a host_status queue with 2 events for the example +test_flush.queues[1][14] = { + flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past + events = { + [1] = "first event", + [2] = "second event" + } +} + +test_flush:reset_queue(1, 14) +--> test_flush.queues[1][14] is now reset like below +--[[ + test_flush.queues[1][14] = { + flush_date = os.time() , -- the time at which the reset happened + events = {} + } +]] +``` diff --git a/stream-connectors/modules/docs/sc_metrics.md b/stream-connectors/modules/docs/sc_metrics.md new file mode 100644 index 00000000000..a7f3e51f4ef --- /dev/null +++ b/stream-connectors/modules/docs/sc_metrics.md @@ -0,0 +1,238 @@ +# Documentation of the sc_flush module + +- [Documentation of the sc_flush module](#documentation-of-the-sc_flush-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [is_valid_bbdo_element method](#is_valid_bbdo_element-method) + - [is_valid_bbdo_element: returns](#is_valid_bbdo_element-returns) + - [is_valid_bbdo_element: example](#is_valid_bbdo_element-example) + - [is_valid_metric_event method](#is_valid_metric_event-method) + - [is_valid_metric_event: returns](#is_valid_metric_event-returns) + - [is_valid_metric_event: example](#is_valid_metric_event-example) + - [is_valid_host_metric_event method](#is_valid_host_metric_event-method) + - [is_valid_host_metric_event: returns](#is_valid_host_metric_event-returns) + - [is_valid_host_metric_event: example](#is_valid_host_metric_event-example) + - [is_valid_service_metric_event method](#is_valid_service_metric_event-method) + - [is_valid_service_metric_event: returns](#is_valid_service_metric_event-returns) + - [is_valid_service_metric_event: example](#is_valid_service_metric_event-example) + - [is_valid_kpi_metric_event method](#is_valid_kpi_metric_event-method) + - [is_valid_kpi_metric_event: returns](#is_valid_kpi_metric_event-returns) + - [is_valid_kpi_metric_event: example](#is_valid_kpi_metric_event-example) + - [is_valid_perfdata method](#is_valid_perfdata-method) + - [is_valid_perfdata parameters](#is_valid_perfdata-parameters) + - [is_valid_perfdata: returns](#is_valid_perfdata-returns) + - [is_valid_perfdata: example](#is_valid_perfdata-example) + +## Introduction + +The sc_metrics module provides methods to help you handle metrics for your stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with 5 parameters + +- an event table +- a params table +- a sc_common instance +- a sc_broker instance +- a sc_logger instance (will create a new one with default parameters if not provided) + +### constructor: Example + +```lua +local event = { + --- event data --- +} + + -- load module +local sc_param = require("centreon-stream-connectors-lib.sc_param") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) + +-- create a new instance of the sc_broker module +local test_broker = sc_broker.new(test_logger) + +-- create a new instance of the sc_event module +local test_metrics = sc_metrics.new(event, test_param.params, test_common, test_broker, test_logger) +``` + +## is_valid_bbdo_element method + +The **is_valid_bbdo_element** method checks if the event is in an accepted category and is an appropriate element. It uses the [**accepted_elements and accepted_categories parameters**](sc_param.md#default_parameters) to validate an event. It also checks if the element is one that provides performance data (current list is: *host, service, host_status, service_status, kpi_event*) + +head over the following chapters for more information + +- [flush_queue](#flush_queue-method) + +### is_valid_bbdo_element: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bbdo_element: example + +```lua +local result = test_metrics:is_valid_bbdo_element() +--> result is true or false +``` + +## is_valid_metric_event method + +The **is_valid_metric_event** method makes sure that the metric event is valid if it is a **host, service, service_status or kpi_event** event. + +head over the following chapters for more information + +- [is_valid_host_metric_event](#is_valid_host_metric_event-method) +- [is_valid_service_metric_event](#is_valid_service_metric_event-method) +- [is_valid_kpi_metric_event](#is_valid_kpi_metric_event-method) + +### is_valid_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_metric_event: example + +```lua +local result = test_metrics:is_valid_metric_event() +--> result is true or false +``` + +## is_valid_host_metric_event method + +The **is_valid_host_metric_event** method makes sure that the metric event is valid host metric event. + +head over the following chapters for more information + +- [is_valid_host](sc_event.md#is_valid_host-method) +- [is_valid_poller](sc_event.md#is_valid_poller-method) +- [is_valid_host_severity](sc_event.md#is_valid_host_severity-method) +- [is_valid_hostgroup](sc_event.md#is_valid_hostgroup-method) +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_host_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_metric_event: example + +```lua +local result = test_metrics:is_valid_host_metric_event() +--> result is true or false +``` + +## is_valid_service_metric_event method + +The **is_valid_service_metric_event** method makes sure that the metric event is valid service metric event. + +head over the following chapters for more information + +- [is_valid_host](sc_event.md#is_valid_host-method) +- [is_valid_poller](sc_event.md#is_valid_poller-method) +- [is_valid_host_severity](sc_event.md#is_valid_host_severity-method) +- [is_valid_hostgroup](sc_event.md#is_valid_hostgroup-method) +- [is_valid_service](sc_event.md#is_valid_service-method) +- [is_valid_service_severity](sc_event.md#is_valid_service_severity-method) +- [is_valid_servicegroup](sc_event.md#is_valid_servicegroup-method) +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_service_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_metric_event: example + +```lua +local result = test_metrics:is_valid_service_metric_event() +--> result is true or false +``` + +## is_valid_kpi_metric_event method + +The **is_valid_kpi_metric_event** method makes sure that the metric event is valid kpi metric event. + +head over the following chapters for more information + +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_kpi_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_kpi_metric_event: example + +```lua +local result = test_metrics:is_valid_kpi_metric_event() +--> result is true or false +``` + +## is_valid_perfdata method + +The **is_valid_perfdata** method makes sure that the performance data is valid. Meaning that it is not empty and that it can be parsed. If the performance data is valid, it will store its information in a new table + +### is_valid_perfdata parameters + +| parameter | type | optional | default value | +| --------------------------------------------- | ------ | -------- | ------------- | +| the performance data that needs to be checked | string | no | | + +### is_valid_perfdata: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_perfdata: example + +```lua +local perfdata = "pl=45%;40;80;0;100" +local result = test_metrics:is_valid_perfdata() +--> result is true or false +--> test_metrics.metrics is now +--[[ + test_metrics.metrics = { + pl = { + value = 45, + uom = "%", + min = 0, + max = 100, + warning_low = 0, + warning_high = 40, + warning_mode = false, + critical_low = 0, + critical_high = 80, + critical_mode = false, + name = "pl" + } + } +]]-- +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 2f11310d0f8..3833cdf2ba6 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -18,10 +18,15 @@ - [is_mandatory_config_set: parameters](#is_mandatory_config_set-parameters) - [is_mandatory_config_set: returns](#is_mandatory_config_set-returns) - [is_mandatory_config_set: example](#is_mandatory_config_set-example) + - [load_event_format_file method](#load_event_format_file-method) + - [load_event_format_file: returns](#load_event_format_file-returns) + - [load_event_format_file: example](#load_event_format_file-example) + - [build_accepted_elements_info method](#build_accepted_elements_info-method) + - [build_accepted_elements_info: example](#build_accepted_elements_info-example) ## Introduction -The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below). It has been made in OOP (object oriented programming) +The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below) and a set of mappings to convert ID to human readable text or the other way around. Head over [**the mappings documentation**](mappings.md) for more information. It has been made in OOP (object oriented programming) ### Default parameters @@ -56,6 +61,8 @@ The sc_param module provides methods to help you handle parameters for your stre | accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | | local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | | timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | (date format information)[https://www.lua.org/pil/22.1.html] | +| send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | +| format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | ## Module initialization @@ -193,9 +200,9 @@ The **is_mandatory_config_set** method checks if all mandatory parameters for a ### is_mandatory_config_set: returns -| return | type | always | condition | -| ---------------- | ------- | ------ | -------------------------------------------------------- | -| true or false | boolean | yes | if a mandatory configuration parameter is missing or not | +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------- | +| true or false | boolean | yes | if a mandatory configuration parameter is missing or not | ### is_mandatory_config_set: example @@ -223,7 +230,70 @@ local result = test_param:is_mandatory_config_set(mandatory_params, params) params.password = "hello" result = test_param:is_mandatory_config_set(mandatory_params, params) ---> result is truc because password and username are in the params table +--> result is true because password and username are in the params table --> test_param.param.username is "John" --> test_param.param.password is "hello" ``` + +## load_event_format_file method + +The **load_event_format_file** load a json file which purpose is to serve as a template to format events. It will use the [**format_file parameter**](#default-parameters) in order to know which file to load. If a file has been successfully loaded, a template table will be created in the self.params table. + +### load_event_format_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------- | +| true or false | boolean | yes | if the template file has been successfully loaded or not | + +### load_event_format_file: example + +```lua +test_param.params.format_file = "/etc/centreon-broker/sc_template.json" + + +local result = test_param:load_event_format_file() + +--> result is true +--> test_param.params.format_template is now created + +test_param.params.format_file = 3 + +result = test_param:load_event_format_file(mandatory_params, params) +--> result is false +``` + +## build_accepted_elements_info method + +The **build_accepted_elements_info** creates a table with information related to the accepted elements. It will use the [**accepted_elements parameter**](#default-parameters) in order to create this table. + +### build_accepted_elements_info: example + +```lua +test_param.params.accepted_elements = "host_status,service_status,ba_status" + +test_param:build_accepted_elements_info() + +--> a test_param.params.accepted_elements_info table is now created and here is its content +--[[ + test_param.params.accepted_elements_info = { + host_status = { + category_id = 1, + category_name = "neb", + id = 14, + name = "host_status" + }, + service_status = { + category_id = 1, + category_name = "neb", + id = 24, + name = "service_status" + }, + ba_status = { + category_id = 6, + category_name = "bam", + id = 1, + name = "ba_status + } + } +]]-- +``` diff --git a/stream-connectors/modules/docs/templating.md b/stream-connectors/modules/docs/templating.md new file mode 100644 index 00000000000..1bff215768a --- /dev/null +++ b/stream-connectors/modules/docs/templating.md @@ -0,0 +1,121 @@ +# Templating documentation + +- [Templating documentation](#templating-documentation) + - [Introduction](#introduction) + - [Templating](#templating) + - [Structure](#structure) + - [Template and macros](#template-and-macros) + - [Example: adding new entries to already handled event types](#example-adding-new-entries-to-already-handled-event-types) + - [Example: adding a not handled event type](#example-adding-a-not-handled-event-type) + +## Introduction + +Templating with stream connectors is an options that is offered **only for events oriented stream connecotrs**. This means that **you can't use** templating with **metrics oriented stream connectors** + +Templating allows you to format your events at your convenience either because the default format doesn't suit your needs or because the stream connector doesn't handle a type of event that you would like to receive. + +Stream connectors modules are build to handle the following event types + +- acknowledgemnt +- downtime +- host_status +- service_status +- ba_status + +It means that if you create a template that is not related to those types, you will not be able to use the built in features of the stream connectors modules. More event types may be handled in the feature. If some important ones come to your mind, feel free to let us know by opening an issue at [https://github.com/centreon/centreon-stream-connector-scripts/issues](https://github.com/centreon/centreon-stream-connector-scripts/issues) + +## Templating + +### Structure + +A template is a json file with the following structure + +```json +{ + "_": { + "key_1": "value_1", + "key_2": "value_2" + }, + "_": { + "key_1": "value_1", + "key_2": "value_2" + } +} +``` + +### Template and macros + +To make the best use of the template feature, you should take a look at the whole macros system that is implemented in the stream connectors modules. [**Macros documentation**](sc_macros.md#stream-connectors-macro-explanation) + +### Example: adding new entries to already handled event types + +In order to get a better overview of the system, we are going to work on the Splunk-events-apiv2 stream connector. + +This stream connector handles the following event types + +- host_status +- service_status + +lets take a closer look at the format of a host_status event + +```lua +self.sc_event.event.formated_event = { + event_type = "host", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + output = string.gsub(self.sc_event.event.output, "\n", ""), +} +``` + +In the code, the formated event is made of a string (event_type), the state, state_type, hostname and output. Let say we would like to have the **host_id** and the **address**. The first one needs to be in an index called **"MY_HOST_ID"** and the address stored in an index called **"IP"** + +This will result in the following json templating file + +```json +{ + "neb_host_status": { + "event_type": "host", + "state": "{state}", + "state_type": "{state_type}", + "hostname": "{cache.host.name}", + "outout": "{output_scshort}", + "MY_HOST_ID": "{host_id}", + "IP": "{cache.host.address}" + } +} +``` + +As you can see, there are a lot of **{text}** those are macros that will be replaced by the value found in the event or linked to the event (like in the cache). + +The service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. + +### Example: adding a not handled event type + +This example will use what has been made in [the previous example](#example-adding-new-entries-to-already-handled-event-types) + +As stated before, only **host_status** and **service_status** are handled by the Splunk stream connector. Stream connectors module are able to handle a few others that have been communicated [**in the introduction**](#introduction) + +Let say we would like to handle **ba_status** events. To do so, we need to add this kind of event in the json file + +```json +{ + "neb_host_status": { + "event_type": "host", + "state": "{state}", + "state_type": "{state_type}", + "hostname": "{cache.host.name}", + "outout": "{output_scshort}", + "MY_HOST_ID": "{host_id}", + "IP": "{cache.host.address}" + }, + "bam_ba_status": { + "event_type": "BA", + "ba_name": "{cache.ba.ba_name}", + "ba_id": "{ba_id}", + "state": "{state}" + } +} +``` + +As state in the previous example, the service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. From 7a6bc2deba807b58d20824696e70edaa44cba2fc Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 15 Jul 2021 19:36:36 +0200 Subject: [PATCH 069/219] add rockspec files for 1.3.0 release (#58) --- ...eon-stream-connectors-lib-1.3.0-1.rockspec | 39 +++++++++++++++++++ ...eon-stream-connectors-lib-1.3.0-2.rockspec | 39 +++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec create mode 100644 stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec diff --git a/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec new file mode 100644 index 00000000000..47b22dee43f --- /dev/null +++ b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.3.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.3.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec new file mode 100644 index 00000000000..770d06f1825 --- /dev/null +++ b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.3.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.3.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From af9f96a7b724007a74392fa5b9da56530e262850 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 16 Jul 2021 08:01:48 +0200 Subject: [PATCH 070/219] fix wrong logging default configuration (#59) --- .../centreon-certified/splunk/splunk-events-apiv2.lua | 6 ++---- .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 3498c563a96..87dde7ff145 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -39,8 +39,8 @@ function EventQueue.new(params) self.fail = false -- set up log configuration - local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" - local log_level = params.log_level or 2 + local logfile = params.logfile or "/var/log/centreon-broker/splunk-events.log" + local log_level = params.log_level or 1 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) @@ -63,8 +63,6 @@ function EventQueue.new(params) self.sc_params.params.splunk_host = params.splunk_host or "Central" self.sc_params.params.accetepd_categories = params.acceptd_categories or "neb" self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" - self.sc_params.params.logfile = params.logfile or "/var/log/centreon-broker/splunk-events-apiv2.log" - self.sc_params.params.log_level = params.log_level or 1 -- apply users params and check syntax of standard ones self.sc_params:param_override(params) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 17453f73fd8..be545985987 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -37,8 +37,8 @@ function EventQueue.new(params) self.fail = false -- set up log configuration - local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" - local log_level = params.log_level or 2 + local logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics.log" + local log_level = params.log_level or 1 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) @@ -62,8 +62,6 @@ function EventQueue.new(params) self.sc_params.params.accetepd_categories = params.accepted_categories or "neb" self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" self.sc_params.params.hard_only = params.hard_only or 0 - self.sc_params.params.logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics-apiv2.log" - self.sc_params.params.log_level = params.log_level or 1 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 From 8125858ff566ca4c96eac60fc2c59f88df17d4d7 Mon Sep 17 00:00:00 2001 From: pkriko <32265250+pkriko@users.noreply.github.com> Date: Wed, 4 Aug 2021 14:29:01 +0200 Subject: [PATCH 071/219] typo --- .../centreon-certified/splunk/splunk-events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 87dde7ff145..1e9e9a06b59 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -61,8 +61,8 @@ function EventQueue.new(params) self.sc_params.params.splunk_source = params.splunk_source self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" self.sc_params.params.splunk_host = params.splunk_host or "Central" - self.sc_params.params.accetepd_categories = params.acceptd_categories or "neb" - self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" -- apply users params and check syntax of standard ones self.sc_params:param_override(params) From 8960ebcc2fd817bf2aeebc0f5b86f9de2072f241 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 9 Aug 2021 08:40:42 +0200 Subject: [PATCH 072/219] improve kafka and doc readme (#60) * fix requires in test_kafka script * fix rockspec again * update kafka and fix readme --- .../kafka/kafka-events-apiv2.lua | 59 +++++++++++++------ stream-connectors/modules/docs/README.md | 15 ++--- 2 files changed, 50 insertions(+), 24 deletions(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index d1d4862e9d6..7174621c84e 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -5,6 +5,7 @@ local sc_logger = require("centreon-stream-connectors-lib.sc_logger") local sc_broker = require("centreon-stream-connectors-lib.sc_broker") local sc_event = require("centreon-stream-connectors-lib.sc_event") local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") local kafka_config = require("centreon-stream-connectors-lib.rdkafka.config") local kafka_producer = require("centreon-stream-connectors-lib.rdkafka.producer") local kafka_topic_config = require("centreon-stream-connectors-lib.rdkafka.topic_config") @@ -57,8 +58,10 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() - self.sc_kafka_config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) - self.sc_kafka_config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + -- SEGFAULT ON EL8 (only usefull for debugging) + -- self.sc_kafka_config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) + -- self.sc_kafka_config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + -- initiate a kafka producer self.sc_kafka_producer = kafka_producer.new(self.sc_kafka_config) @@ -72,6 +75,21 @@ function EventQueue.new(params) self.sc_kafka_topic_config["auto.commit.enable"] = "true" self.sc_kafka_topic = kafka_topic.new(self.sc_kafka_producer, self.sc_params.params.topic, self.sc_kafka_topic_config) + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + self.sc_params:build_accepted_elements_info() + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_host_status() end, + [elements.service_status.id] = function () return self:format_service_status() end + }, + [categories.bam.id] = function () return self:format_ba_status() end + } + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -84,15 +102,25 @@ end function EventQueue:format_event() local category = self.sc_event.event.category local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} - if category == 1 and element == 14 then - self.sc_event.event.formated_event = self:format_host_status() - elseif category == 1 and element == 24 then - self.sc_event.event.formated_event = self:format_service_status() - elseif category == 1 and element == 5 then - self.sc_event.event.formated_event = self:format_downtime() - elseif category == 6 and element == 1 then - self.sc_event.event.formated_event = self:format_ba_status() + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end end self:add() @@ -101,7 +129,7 @@ function EventQueue:format_event() end function EventQueue:format_host_status() - local data = { + self.sc_event.event.formated_event = { ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) .. ";" .. self.sc_event.event.cache.host.name .. ";-", ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name, @@ -113,12 +141,11 @@ function EventQueue:format_host_status() ["alerte.custom_data.ticket_description"] = "", ["alerte.custom_data.ticket_note"] = "" } - - return data + end function EventQueue:format_service_status() - local data = { + self.sc_event.event.formated_event = { ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) .. ";" .. self.sc_event.event.cache.host.name .. ";" .. self.sc_event.event.cache.service.description, ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name .. "_" .. self.sc_event.event.cache.service.description, @@ -131,11 +158,10 @@ function EventQueue:format_service_status() ["alerte.custom_data.ticket_note"] = "" } - return data end function EventQueue:format_ba_status() - local data = { + self.sc_event.event.formated_event = { ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) .. ";Business Activity" .. self.sc_event.event.cache.ba.ba_name, ["alerte.alerte_libelle"] = "Business_Activity_" .. self.sc_event.event.cache.ba.ba_name, ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], @@ -146,7 +172,6 @@ function EventQueue:format_ba_status() ["alerte.custom_data.ticket_note"] = "" } - return data end -------------------------------------------------------------------------------- diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index dcd883c8639..3f6340cc4ea 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -68,13 +68,14 @@ ## sc_param methods -| Method name | Method description | Link | -| ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------- | -| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | -| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | -| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | -| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | -| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | +| Method name | Method description | Link | +| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | +| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | +| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | +| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | +| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | +| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | +| build_accepted_elements_info | build a table that store information about accepted elements | [Documentation](sc_param.md#build_accepted_elements_info-method) | ## sc_event methods From 89161ca54a3ff2d57939afa8197fa9a1c49fee5c Mon Sep 17 00:00:00 2001 From: UrBnW <40244829+UrBnW@users.noreply.github.com> Date: Mon, 9 Aug 2021 16:43:36 +0200 Subject: [PATCH 073/219] enh(influxdb) get rid of deduplicate (#61) --- .../influxdb/influxdb-neb-apiv1.lua | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index bae83be3969..c0bf7dbb379 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -101,17 +101,8 @@ end -- @param e An event -------------------------------------------------------------------------------- -local previous_event = "" - function EventQueue:add(e) - -- workaround https://github.com/centreon/centreon-broker/issues/201 - current_event = broker.json_encode(e) - if current_event == previous_event then - broker_log:info(3, "EventQueue:add: Duplicate event ignored.") - return false - end - previous_event = current_event - broker_log:info(3, "EventQueue:add: " .. current_event) + broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) -- let's get and verify we have perfdata local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata) if perfdata_err then From 03c6c25cf74649fb18dfa2786b79cee9f44f22b6 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 6 Sep 2021 10:36:29 +0200 Subject: [PATCH 074/219] Fix splunk (#62) * add rockspec files for 1.3.0 release * fix options in stream connector * add number to boolean method * handle basic connection parameters * add rockspec file for 1.4.0 release --- .../servicenow/servicenow-events-apiv2.lua | 5 +-- .../splunk/splunk-events-apiv2.lua | 13 +++---- .../splunk/splunk-metrics-apiv2.lua | 17 ++++---- .../sc_common.lua | 16 ++++++++ .../sc_params.lua | 16 ++++++++ stream-connectors/modules/docs/README.md | 1 + stream-connectors/modules/docs/sc_common.md | 30 ++++++++++++++ stream-connectors/modules/docs/sc_param.md | 5 +++ ...eon-stream-connectors-lib-1.4.0-1.rockspec | 39 +++++++++++++++++++ 9 files changed, 120 insertions(+), 22 deletions(-) create mode 100644 stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua index 5e70795384e..cc3824bc22b 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua @@ -59,10 +59,6 @@ function EventQueue:new (params) self.sc_params.params.client_secret = params.client_secret self.sc_params.params.username = params.username self.sc_params.params.password = params.password - self.sc_params.params.proxy_address = params.proxy_address or '' - self.sc_params.params.proxy_port = params.proxy_port or '' - self.sc_params.params.proxy_username = params.proxy_username or '' - self.sc_params.params.proxy_password = params.proxy_password or '' -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then @@ -200,6 +196,7 @@ function EventQueue:call (url, method, data, authToken) :setopt_writefunction(function (response) res = res .. tostring(response) end) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) self.sc_logger:debug("EventQueue:call: Request initialize") diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 1e9e9a06b59..e917df7eb18 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -32,8 +32,7 @@ function EventQueue.new(params) local mandatory_parameters = { "http_server_url", - "splunk_token", - "splunk_index" + "splunk_token" } self.fail = false @@ -54,11 +53,8 @@ function EventQueue.new(params) end -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs - self.sc_params.params.proxy_address = params.proxy_address - self.sc_params.params.proxy_port = params.proxy_port - self.sc_params.params.proxy_username = params.proxy_username - self.sc_params.params.proxy_password = params.proxy_password - self.sc_params.params.splunk_source = params.splunk_source + self.sc_params.params.splunk_index = params.splunk_index or "" + self.sc_params.params.splunk_source = params.splunk_source or "" self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" self.sc_params.params.splunk_host = params.splunk_host or "Central" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" @@ -196,7 +192,8 @@ function EventQueue:send_data(data, element) http_response_body = http_response_body .. tostring(response) end ) - :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) :setopt( curl.OPT_HTTPHEADER, { diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index be545985987..bec66ce2372 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -30,8 +30,7 @@ function EventQueue.new(params) local mandatory_parameters = { "http_server_url", - "splunk_token", - "splunk_index" + "splunk_token" } self.fail = false @@ -52,15 +51,12 @@ function EventQueue.new(params) end -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs - self.sc_params.params.proxy_address = params.proxy_address - self.sc_params.params.proxy_port = params.proxy_port - self.sc_params.params.proxy_username = params.proxy_username - self.sc_params.params.proxy_password = params.proxy_password - self.sc_params.params.splunk_source = params.splunk_source + self.sc_params.params.splunk_index = params.splunk_index or "" + self.sc_params.params.splunk_source = params.splunk_source or "" self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" self.sc_params.params.splunk_host = params.splunk_host or "Central" - self.sc_params.params.accetepd_categories = params.accepted_categories or "neb" - self.sc_params.params.accetepd_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" self.sc_params.params.hard_only = params.hard_only or 0 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 @@ -192,7 +188,8 @@ function EventQueue:send_data(data, element) http_response_body = http_response_body .. tostring(response) end ) - :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) :setopt( curl.OPT_HTTPHEADER, { diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 496e7916d0a..7ba6272e92e 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -64,6 +64,22 @@ function ScCommon:boolean_to_number(boolean) return boolean and 1 or 0 end +--- number_to_boolean: convert a 0, 1 number to its boolean counterpart +-- @param number (number) the number to convert +-- @return (boolean) true if param is 1, false if param is 0 +function ScCommon:number_to_boolean(number) + if number ~= 0 and number ~= 1 then + self.sc_logger:error("[sc_common:number_to_boolean]: number is not 1 or 0. Returning nil. Parameter value is: " .. tostring(number)) + return nil + end + + if number == 1 then + return true + end + + return false +end + --- check_boolean_number_option_syntax: make sure the number is either 1 or 0 -- @param number (number) the boolean number that must be validated diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 88c0c6cdda7..5ae50f7170a 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -68,6 +68,16 @@ function sc_params.new(common, logger) max_buffer_size = 1, max_buffer_age = 5, + -- connection parameters + connection_timeout = 60, + allow_insecure_connection = 0, + + -- proxy parameters + proxy_address = "", + proxy_port = "", + proxy_username = "", + proxy_password = "", + -- event formatting parameters format_file = "", @@ -624,6 +634,12 @@ function ScParams:check_params() self.params.enable_host_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_host_status_dedup, 0) self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) + self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_address, "string", "") + self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_port, "number", "") + self.params.proxy_username = self.common:if_wrong_type(self.params.proxy_username, "string", "") + self.params.proxy_password = self.common:if_wrong_type(self.params.proxy_password, "string", "") + self.params.connection_timeout = self.common:if_wrong_type(self.params.connection_timeout, "number", 60) + self.params.allow_insecure_connection = self.common:number_to_boolean(self.common:check_boolean_number_option_syntax(self.params.allow_insecure_connection, 0)) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 3f6340cc4ea..03729258fcf 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -35,6 +35,7 @@ | ifnil_or_empty | check if a variable is empty or nil and replace it with a default value if it is the case | [Documentation](sc_common.md#ifnil_or_empty-method) | | if_wrong_type | check the type of a variable, if it is wrong, replace the variable with a default value | [Documentation](sc_common.md#if_wrong_type-method) | | boolean_to_number | change a true/false boolean to a 1/0 value | [Documentation](sc_common.md#boolean_to_number-method) | +| number_to_boolean | change a 0/1 number to a false/true value | [Documentation](sc_common.md#number_to_boolean-method) | | check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | | split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | | compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index 35a5970504d..592cc013e3b 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -17,6 +17,10 @@ - [boolean_to_number: parameters](#boolean_to_number-parameters) - [boolean_to_number: returns](#boolean_to_number-returns) - [boolean_to_number: example](#boolean_to_number-example) + - [number_to_boolean method](#number_to_boolean-method) + - [number_to_boolean: parameters](#number_to_boolean-parameters) + - [number_to_boolean: returns](#number_to_boolean-returns) + - [number_to_boolean: example](#number_to_boolean-example) - [check_boolean_number_option_syntax method](#check_boolean_number_option_syntax-method) - [check_boolean_number_option_syntax: parameters](#check_boolean_number_option_syntax-parameters) - [check_boolean_number_option_syntax: returns](#check_boolean_number_option_syntax-returns) @@ -163,6 +167,32 @@ local result = test_common:boolean_to_number(my_boolean) --> result is 1 ``` +## number_to_boolean method + +The **number_to_boolean** method converts a number to its boolean equivalent. + +### number_to_boolean: parameters + +| parameter | type | optional | default value | +| ----------------- | ------ | -------- | ------------- | +| a number (0 or 1) | number | no | | + +### number_to_boolean: returns + +| return | type | always | condition | +| ------------------------- | ------- | ------ | -------------------------- | +| a boolean (true or false) | boolean | no | if parameter is 0 or 1 | +| nil | nil | no | if parameter is not 0 or 1 | + +### number_to_boolean: example + +```lua +local my_number = 1 + +local result = test_common:number_to_boolean(my_number) +--> result is true +``` + ## check_boolean_number_option_syntax method The **check_boolean_number_option_syntax** method checks if the first paramter is a boolean number (0 or 1) and if that is not the case, returns the second parameter diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 3833cdf2ba6..f93bc68fbc1 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -63,6 +63,11 @@ The sc_param module provides methods to help you handle parameters for your stre | timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | (date format information)[https://www.lua.org/pil/22.1.html] | | send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | | format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | +| proxy_address | string | | | address of the proxy | | +| proxy_port | number | | | port of the proxy | | +| proxy_username | string | | | user for the proxy | | +| proxy_password | string | | | pasword of the proxy user | | +| connection_timeout | number | 60 | | time to wait in second when opening connection | | ## Module initialization diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec new file mode 100644 index 00000000000..a599961a210 --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From d74a207752098c82179892dad24b90d88ea2dbd2 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 6 Sep 2021 10:57:08 +0200 Subject: [PATCH 075/219] Forgot doc (#63) * doc for allow_insecure_connection param --- stream-connectors/modules/docs/sc_param.md | 1 + 1 file changed, 1 insertion(+) diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index f93bc68fbc1..175fe9612cc 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -68,6 +68,7 @@ The sc_param module provides methods to help you handle parameters for your stre | proxy_username | string | | | user for the proxy | | | proxy_password | string | | | pasword of the proxy user | | | connection_timeout | number | 60 | | time to wait in second when opening connection | | +| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | ## Module initialization From 55ef6edba49f308945ad61b0ca8ca14fee5ef4c9 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 6 Sep 2021 11:37:20 +0200 Subject: [PATCH 076/219] Fix splunk metrics (#64) * fix metric handling for splunk --- .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index bec66ce2372..ec8b72099d5 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -105,6 +105,11 @@ function EventQueue:format_accepted_event() .. ". If it is a not a misconfiguration, you can open an issue at https://github.com/centreon/centreon-stream-connector-scripts/issues") else self.format_event[category][element]() + + -- add metrics in the formated event + for metric_name, metric_data in pairs(self.sc_metrics.metrics) do + self.sc_event.event.formated_event["metric_name:" .. tostring(metric_name)] = metric_data.value + end end self:add() @@ -130,10 +135,6 @@ function EventQueue:format_metrics_service() service_description = self.sc_event.event.cache.service.description, ctime = self.sc_event.event.last_check } - - for metric_name, metric_data in pairs(self.sc_metrics.metrics) do - self.sc_event.event.formated_event["metric_name:" .. metric_name] = metric_data.value - end end -------------------------------------------------------------------------------- From b4535ef4d3a36f851d295fb47d5f79e673de27f4 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 6 Sep 2021 15:11:05 +0200 Subject: [PATCH 077/219] Streamline kafka (#65) * add a more generic kafka event format * add default values for el and cat params snow stream connectors --- .../kafka/kafka-events-apiv2.lua | 39 +++++-------------- .../servicenow/servicenow-events-apiv2.lua | 9 +++-- 2 files changed, 15 insertions(+), 33 deletions(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 7174621c84e..1424035ef0b 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -130,46 +130,25 @@ end function EventQueue:format_host_status() self.sc_event.event.formated_event = { - ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) - .. ";" .. self.sc_event.event.cache.host.name .. ";-", - ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name, - ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.alerte_message"] = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), - ["alerte.alerte_id"] = tostring(self.sc_params.params.centreon_name) - .. ";" .. self.sc_event.event.cache.host.name .. ";", - ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.custom_data.ticket_description"] = "", - ["alerte.custom_data.ticket_note"] = "" + host = tostring(self.sc_event.event.cache.host.name), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + output = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), } - end function EventQueue:format_service_status() self.sc_event.event.formated_event = { - ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) - .. ";" .. self.sc_event.event.cache.host.name .. ";" .. self.sc_event.event.cache.service.description, - ["alerte.alerte_libelle"] = self.sc_event.event.cache.host.name .. "_" .. self.sc_event.event.cache.service.description, - ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.alerte_message"] = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), - ["alerte.alerte_id"] = tostring(self.sc_params.params.centreon_name) - .. ";" .. self.sc_event.event.cache.host.name .. ";" .. self.sc_event.event.cache.service.description, - ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.custom_data.ticket_description"] = "", - ["alerte.custom_data.ticket_note"] = "" + host = tostring(self.sc_event.event.cache.host.name), + service = tostring(self.sc_event.event.cache.service.description), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + output = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output") } - end function EventQueue:format_ba_status() self.sc_event.event.formated_event = { - ["alerte.alerte_emetteur"] = tostring(self.sc_params.params.centreon_name) .. ";Business Activity" .. self.sc_event.event.cache.ba.ba_name, - ["alerte.alerte_libelle"] = "Business_Activity_" .. self.sc_event.event.cache.ba.ba_name, - ["alerte.alerte_statut"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.alerte_message"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.alerte_id"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.alerte_criticite"] = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - ["alerte.custom_data.ticket_description"] = "", - ["alerte.custom_data.ticket_note"] = "" + ba = tostring(self.sc_event.event.cache.ba.ba_name), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state] } end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua index cc3824bc22b..29d41323683 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua @@ -60,6 +60,9 @@ function EventQueue:new (params) self.sc_params.params.username = params.username self.sc_params.params.password = params.password + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true @@ -271,18 +274,18 @@ function EventQueue:format_event() source = "centreon", event_class = "centreon", severity = 5, - node = self.sc_event.event.cache.host.name, + node = tostring(self.sc_event.event.cache.host.name), time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), description = self.sc_event.event.output } if self.sc_event.event.element == 14 then - self.sc_event.event.formated_event.resource = self.sc_event.event.cache.host.name + self.sc_event.event.formated_event.resource = tostring(self.sc_event.event.cache.host.name) self.sc_event.event.formated_event.severity = self.sc_event.event.state elseif self.sc_event.event.element == 24 then - self.sc_event.event.formated_event.resource = self.sc_event.event.cache.service.description + self.sc_event.event.formated_event.resource = tostring(self.sc_event.event.cache.service.description) if self.sc_event.event.state == 0 then self.sc_event.event.formated_event.severity = 0 elseif self.sc_event.event.state == 1 then From 2b82c4fc41b459bd2005588c04029a7e0c107d57 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 7 Sep 2021 11:30:36 +0200 Subject: [PATCH 078/219] fix require for kafka (#66) --- .../centreon-certified/kafka/kafka-events-apiv2.lua | 2 +- .../centreon-stream-connectors-lib/rdkafka/config.lua | 9 ++++++++- .../rdkafka/librdkafka.lua | 8 +++++++- .../centreon-stream-connectors-lib/rdkafka/producer.lua | 8 +++++++- .../centreon-stream-connectors-lib/rdkafka/topic.lua | 8 +++++++- .../rdkafka/topic_config.lua | 8 +++++++- stream-connectors/modules/docs/sc_macros.md | 2 +- stream-connectors/modules/docs/templating.md | 2 +- 8 files changed, 39 insertions(+), 8 deletions(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 1424035ef0b..a6884ae5015 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -28,7 +28,7 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/kafka-stream-connector.log" - local log_level = params.log_level or 3 + local log_level = params.log_level or 1 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua index 368b094a9fa..7e1d052c81b 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua @@ -1,7 +1,14 @@ #!/usr/bin/lua local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") -local ffi = require 'ffi' or 'cffi' + +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end local KafkaConfig = {} KafkaConfig.__index = KafkaConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua index 5e97f3bc72f..7305d14a369 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua @@ -1,6 +1,12 @@ #!/usr/bin/lua -local ffi = require 'ffi' or 'cffi' +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end ffi.cdef[[ typedef struct rd_kafka_s rd_kafka_t; diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua index 28070a7a1f6..bfb28827cf0 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua @@ -3,7 +3,13 @@ local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") local KafkaConfig = require("centreon-stream-connectors-lib.rdkafka.config") local KafkaTopic = require("centreon-stream-connectors-lib.rdkafka.topic") -local ffi = require 'ffi' or 'cffi' +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end local DEFAULT_DESTROY_TIMEOUT_MS = 3000 diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua index b5f97a2e884..7b308698aff 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua @@ -2,7 +2,13 @@ local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") local KafkaTopicConfig = require("centreon-stream-connectors-lib.rdkafka.topic_config") -local ffi = require 'ffi' or 'cffi' +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end local KafkaTopic = { kafka_topic_map_ = {} } -- KafkaProducer will delete all topics on destroy diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua index a6f08fc4977..21a4e8a3e46 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua @@ -1,7 +1,13 @@ #!/usr/bin/lua local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") -local ffi = require 'ffi' or 'cffi' +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end local KafkaTopicConfig = {} KafkaTopicConfig.__index = KafkaTopicConfig diff --git a/stream-connectors/modules/docs/sc_macros.md b/stream-connectors/modules/docs/sc_macros.md index 8d655572c3c..98ecca20d48 100644 --- a/stream-connectors/modules/docs/sc_macros.md +++ b/stream-connectors/modules/docs/sc_macros.md @@ -76,7 +76,7 @@ This means that it is possible to use the following macros This one is a bit more complicated. The purpose is to retrieve information from the event cache using a macro. If you rely on the centreon-stream-connectors-lib to fill the cache, here is what you need to know. -There are X kind of cache +There are 8 kind of cache - host cache (for any event that is linked to a host, which means any event but BA events) - service cache (for any event that is linked to a service) diff --git a/stream-connectors/modules/docs/templating.md b/stream-connectors/modules/docs/templating.md index 1bff215768a..c8d612e696f 100644 --- a/stream-connectors/modules/docs/templating.md +++ b/stream-connectors/modules/docs/templating.md @@ -118,4 +118,4 @@ Let say we would like to handle **ba_status** events. To do so, we need to add t } ``` -As state in the previous example, the service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. +As stated in the previous example, the service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. From cb988b3be91a4cd460910af2f8c3780b2e7aa4a0 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 7 Sep 2021 17:13:11 +0200 Subject: [PATCH 079/219] add rockspec file for 1.4.1-1 (#67) * add rockspec file for release --- ...eon-stream-connectors-lib-1.4.1-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec new file mode 100644 index 00000000000..a3a44fb62de --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 660f2b19a5f8285ec95dfdc264a3f27152f8109e Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 9 Sep 2021 18:15:24 +0200 Subject: [PATCH 080/219] Fix bigquery (#68) * fix sc_macro object creation + remove hardcoded id * fix log message by removing var name conflict --- .../google/bigquery-events-apiv2.lua | 135 ++++++++++-------- .../sc_macros.lua | 6 +- 2 files changed, 81 insertions(+), 60 deletions(-) diff --git a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua index f54867c8093..fc6fab554b5 100644 --- a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -22,41 +22,9 @@ function EventQueue.new(params) [4] = "scope_list" } - -- initiate EventQueue variables - self.events = { - [1] = {}, - [6] = {} - } - - self.events[1] = { - [1] = {}, - [5] = {}, - [14] = {}, - [24] = {} - } - - self.events[6] = { - [1] = {} - } - - self.flush = { - [1] = {}, - [6] = {} - } - - self.flush[1] = { - [1] = function () return self:flush_ack() end, - [5] = function () return self:flush_dt() end, - [14] = function () return self:flush_host() end, - [24] = function () return self:flush_service() end - } - - self.flush[6] = { - [1] = function () return self:flush_ba() end - } - + self.fail = false - + -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" local log_level = params.log_level or 2 @@ -66,8 +34,8 @@ function EventQueue.new(params) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - - -- checking mandatory parameters and setting a fail flag + + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end @@ -79,7 +47,7 @@ function EventQueue.new(params) self.sc_params.params.proxy_port = params.proxy_port self.sc_params.params.proxy_username = params.proxy_username self.sc_params.params.proxy_password = params.proxy_password - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -89,22 +57,58 @@ function EventQueue.new(params) self.sc_params.params.__internal_ts_ack_last_flush = os.time() self.sc_params.params.__internal_ts_dt_last_flush = os.time() self.sc_params.params.__internal_ts_ba_last_flush = os.time() - + self.sc_params.params.host_table = params.host_table or "hosts" self.sc_params.params.service_table = params.service_table or "services" self.sc_params.params.ack_table = params.ack_table or "acknowledgements" self.sc_params.params.downtime_table = params.downtime_table or "downtimes" self.sc_params.params.ba_table = params.ba_table or "bas" self.sc_params.params._sc_gbq_use_default_schemas = 1 - + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + -- initiate EventQueue variables + self.events = { + [categories.neb.id] = {}, + [categories.bam.id] = {} + } + + self.events[categories.neb.id] = { + [elements.acknowledgement.id] = {}, + [elements.downtime.id] = {}, + [elements.host_status.id] = {}, + [elements.service_status.id] = {} + } + + self.events[categories.bam.id] = { + [elements.ba_status.id] = {} + } + + self.flush = { + [categories.neb.id] = {}, + [categories.bam.id] = {} + } + + self.flush[categories.neb.id] = { + [elements.acknowledgement.id] = function () return self:flush_ack() end, + [elements.downtime.id] = function () return self:flush_dt() end, + [elements.host_status.id] = function () return self:flush_host() end, + [elements.service_status.id] = function () return self:flush_service() end + } + + self.flush[categories.bam.id] = { + [elements.ba_status.id] = function () return self:flush_ba() end + } + self.sc_params.params.google_bq_api_url = params.google_bq_api_url or "https://content-bigquery.googleapis.com/bigquery/v2" - - self.sc_macros = sc_macros.new(self.sc_common, self.sc_params.params, self.sc_logger) + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.sc_oauth = sc_oauth.new(self.sc_params.params, self.sc_common, self.sc_logger) -- , self.sc_common, self.sc_logger) self.sc_bq = sc_bq.new(self.sc_params.params, self.sc_logger) self.sc_bq:get_tables_schema() - - + + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -115,16 +119,16 @@ end -- @return true (boolean) -------------------------------------------------------------------------------- function EventQueue:format_event() - + self.sc_event.event.formated_event = {} self.sc_event.event.formated_event.json = {} - + for column, value in pairs(self.sc_bq.schemas[self.sc_event.event.category][self.sc_event.event.element]) do self.sc_event.event.formated_event.json[column] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) end - + self:add() - + return true end @@ -144,13 +148,16 @@ end -- @return (boolean) -------------------------------------------------------------------------------- function EventQueue:flush_host () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + self.sc_logger:debug("EventQueue:flush: Concatenating all the host events as one string") -- send stored events retval = self:send_data(self.sc_params.params.host_table) -- reset stored events list - self.events[1][14] = {} + self.events[categories.neb.id][elements.host_status.id] = {} -- and update the timestamp self.sc_params.params.__internal_ts_host_last_flush = os.time() @@ -164,13 +171,16 @@ end -- @return (boolean) -------------------------------------------------------------------------------- function EventQueue:flush_service () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + self.sc_logger:debug("EventQueue:flush: Concatenating all the service events as one string") -- send stored events retval = self:send_data(self.sc_params.params.service_table) -- reset stored events list - self.events[1][24] = {} + self.events[categories.neb.id][elements.service_status.id] = {} -- and update the timestamp self.sc_params.params.__internal_ts_service_last_flush = os.time() @@ -184,13 +194,16 @@ end -- @return (boolean) -------------------------------------------------------------------------------- function EventQueue:flush_ack () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + self.sc_logger:debug("EventQueue:flush: Concatenating all the ack events as one string") -- send stored events retval = self:send_data(self.sc_params.params.ack_table) -- reset stored events list - self.events[1][1] = {} + self.events[categories.neb.id][elements.acknowledgement.id] = {} -- and update the timestamp self.sc_params.params.__internal_ts_ack_last_flush = os.time() @@ -204,13 +217,16 @@ end -- @return (boolean) -------------------------------------------------------------------------------- function EventQueue:flush_dt () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + self.sc_logger:debug("EventQueue:flush: Concatenating all the downtime events as one string") -- send stored events retval = self:send_data(self.sc_params.params.downtime_table) -- reset stored events list - self.events[1][5] = {} + self.events[categories.neb.id][elements.downtime.id] = {} -- and update the timestamp self.sc_params.params.__internal_ts_dt_last_flush = os.time() @@ -224,13 +240,16 @@ end -- @return (boolean) -------------------------------------------------------------------------------- function EventQueue:flush_ba () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + self.sc_logger:debug("EventQueue:flush: Concatenating all the BA events as one string") -- send stored events retval = self:send_data(self.sc_params.params.ba_table) -- reset stored events list - self.events[6][1] = {} + self.events[categories.bam.id][elements.ba_status.id] = {} -- and update the timestamp self.sc_params.params.__internal_ts_ba_last_flush = os.time() @@ -239,34 +258,36 @@ function EventQueue:flush_ba () end function EventQueue:flush_old_queues() + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements local current_time = os.time() -- flush old ack events - if #self.events[1][1] > 0 and os.time() - self.sc_params.params.__internal_ts_ack_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[categories.neb.id][elements.acknowledgement.id] > 0 and os.time() - self.sc_params.params.__internal_ts_ack_last_flush > self.sc_params.params.max_buffer_age then self:flush_ack() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ack_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end -- flush old downtime events - if #self.events[1][5] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[categories.neb.id][elements.downtime.id] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then self:flush_dt() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_dt_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end -- flush old host events - if #self.events[1][14] > 0 and os.time() - self.sc_params.params.__internal_ts_host_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[categories.neb.id][elements.host_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_host_last_flush > self.sc_params.params.max_buffer_age then self:flush_host() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_host_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end -- flush old service events - if #self.events[1][24] > 0 and os.time() - self.sc_params.params.__internal_ts_service_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[categories.neb.id][elements.service_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_service_last_flush > self.sc_params.params.max_buffer_age then self:flush_service() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_service_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end -- flush old BA events - if #self.events[6][1] > 0 and os.time() - self.sc_params.params.__internal_ts_ba_last_flush > self.sc_params.params.max_buffer_age then + if #self.events[categories.bam.id][elements.ba_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_ba_last_flush > self.sc_params.params.max_buffer_age then self:flush_ba() self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ba_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 506fa64c589..7ac88f22306 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -214,14 +214,14 @@ end -- @param event (table) the event table (obivously, cache must be in the event table if we want to find something in it) -- @return false (boolean) if the macro is not a cache macro ({host_id} instead of {cache.xxxx.yyy} for example) or we can't find the cache type or the macro in the cache -- @return macro_value (string|boolean|number) the value of the macro -function ScMacros:get_cache_macro(macro, event) +function ScMacros:get_cache_macro(raw_macro, event) -- try to cut the macro in three parts - local cache, cache_type, macro = string.match(macro, "^{(cache)%.(%w+)%.(.*)}") + local cache, cache_type, macro = string.match(raw_macro, "^{(cache)%.(%w+)%.(.*)}") -- if cache is not set, it means that the macro wasn't a cache macro if not cache then - self.sc_logger:info("[sc_macros:get_cache_macro]: macro: " .. tostring(macro) .. " is not a cache macro") + self.sc_logger:info("[sc_macros:get_cache_macro]: macro: " .. tostring(raw_macro) .. " is not a cache macro") return false end From 3953cf3e3cc55a446eca6c3e4fd12dd75aea67f6 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 9 Sep 2021 18:22:01 +0200 Subject: [PATCH 081/219] add rockspec for 1.4.2-1 (#69) * add rockspec file for 1.4.2-1 --- ...eon-stream-connectors-lib-1.4.2-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec new file mode 100644 index 00000000000..7f0eb5d668c --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.2-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.2-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 909024443a94c44524c4e0ed6dc8e42ee8f3ef49 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 23 Sep 2021 15:12:45 +0200 Subject: [PATCH 082/219] fix Cache and flag and json (#70) * fix transformation flags not working * fix cache when setting skip_annon_events to 0 * better naming in sc_common --- .../sc_common.lua | 20 ++++++++-------- .../sc_event.lua | 23 ++++++++++--------- .../sc_macros.lua | 10 ++++---- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 7ba6272e92e..0d4aacb77dc 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -23,12 +23,12 @@ end local ScCommon = {} -function sc_common.new(logger) +function sc_common.new(sc_logger) local self = {} - self.logger = logger - if not self.logger then - self.logger = sc_logger.new() + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() end setmetatable(self, { __index = ScCommon }) @@ -101,7 +101,7 @@ end function ScCommon:split (text, separator) -- return false if text is nil or empty if text == nil or text == "" then - self.logger:error("[sc_common:split]: could not split text because it is nil or empty") + self.sc_logger:error("[sc_common:split]: could not split text because it is nil or empty") return false end @@ -167,7 +167,7 @@ end function ScCommon:generate_postfield_param_string(params) -- return false because params type is wrong if (type(params) ~= "table") then - self.logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") + self.sc_logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") return false end @@ -205,12 +205,12 @@ function ScCommon:load_json_file(json_file) io.close(file) -- parse it - local content = broker.json_decode(file_content) + local content, error = broker.json_decode(file_content) -- return false if json couldn't be parsed - if (type(content) ~= "table") then - self.sc_logger:error("[sc_common:load_json_file]: file " - .. tostring(json_file) .. ". Is not a valid json file.") + if error then + self.sc_logger:error("[sc_common:load_json_file]: could not parse json file " + .. tostring(json_file) .. ". Error is: " .. tostring(error)) return false end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 259c44321b1..40b6bafcac8 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -238,7 +238,9 @@ function ScEvent:is_valid_host() .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.host and self.params.skip_anon_events == 0) then - self.event.cache.host.name = self.event.host_id + self.event.cache.host = { + name = self.event.host_id + } end -- force host name to be its id if no name has been found @@ -273,7 +275,9 @@ function ScEvent:is_valid_service() .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.service and self.params.skip_anon_events == 0) then - self.event.cache.service.description = self.event.service_id + self.event.cache.service = { + description = self.event.service_id + } end -- force service description to its id if no description has been found @@ -442,10 +446,10 @@ function ScEvent:is_valid_servicegroup() self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) return false - else - self.sc_logger:debug("[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) - .. "matched servicegroup: " .. accepted_servicegroup_name) end + + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) + .. "matched servicegroup: " .. accepted_servicegroup_name) return true end @@ -518,12 +522,9 @@ function ScEvent:is_valid_ba() .. ". Found BA name is: " .. tostring(self.event.cache.ba.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) return false elseif (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 0) then - self.event.cache.ba.ba_name = self.event.ba_id - end - - -- force ba name to be its id if no name has been found - if not self.event.cache.ba.ba_name then - self.event.cache.ba.ba_name = self.event.ba_id + self.event.cache.ba = { + ba_name = self.event.ba_id + } end return true diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 7ac88f22306..3b1837b0f3b 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -27,10 +27,10 @@ function sc_macros.new(params, logger) -- mapping of macro that we will convert if asked self.transform_macro = { - date = function () return self:transform_date(macro_value) end, - type = function () return self:transform_type(macro_value) end, - short = function () return self:transform_short(macro_value) end, - state = function () return self:transform_state(macro_value, event) end + date = function (macro_value) return self:transform_date(macro_value) end, + type = function (macro_value) return self:transform_type(macro_value) end, + short = function (macro_value) return self:transform_short(macro_value) end, + state = function (macro_value, event) return self:transform_state(macro_value, event) end } -- mapping of centreon standard macros to their stream connectors counterparts @@ -263,7 +263,7 @@ function ScMacros:get_event_macro(macro, event) if event[macro_value] then if flag then self.sc_logger:info("[sc_macros:get_event_macro]: macro has a flag associated. Flag is: " .. tostring(flag) - .. ", a macro value conversion will be done.") + .. ", a macro value conversion will be done. Macro value is: " .. tostring(macro_value)) -- convert the found value according to the flag that has been sent return self.transform_macro[flag](event[macro_value], event) else From 38894810b21141cbe80af221c3e00dcfd50b9b54 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 23 Sep 2021 17:04:41 +0200 Subject: [PATCH 083/219] Fix kafka format event (#71) * avoid function naming conflict --- .../centreon-certified/kafka/kafka-events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index a6884ae5015..9c069ead2c4 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -99,7 +99,7 @@ end -- EventQueue:format_event, build your own table with the desired information -- @return true (boolean) -------------------------------------------------------------------------------- -function EventQueue:format_event() +function EventQueue:format_accepted_event() local category = self.sc_event.event.category local element = self.sc_event.event.element local template = self.sc_params.params.format_template[category][element] @@ -260,7 +260,7 @@ function write(event) -- drop event if it is not validated if queue.sc_event:is_valid_event() then - queue:format_event() + queue:format_accepted_event() else return true end From aacf609c314bd2a275bdf8b9f26a52d68dfc96fc Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 5 Oct 2021 11:46:27 +0200 Subject: [PATCH 084/219] Refacto pagerduty (#73) --- .../pagerduty/pagerduty-events-apiv2.lua | 428 ++++++++++++++++++ .../sc_common.lua | 32 +- .../sc_macros.lua | 47 +- .../sc_params.lua | 13 +- stream-connectors/modules/docs/README.md | 1 + stream-connectors/modules/docs/sc_common.md | 30 ++ stream-connectors/modules/docs/sc_param.md | 68 ++- ...eon-stream-connectors-lib-1.5.0-1.rockspec | 39 ++ 8 files changed, 642 insertions(+), 16 deletions(-) create mode 100644 stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua create mode 100644 stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua new file mode 100644 index 00000000000..6e870ee2421 --- /dev/null +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -0,0 +1,428 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "pdy_routing_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/pagerduty-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.pdy_centreon_url = params.pdy_centreon_url or "http://set.pdy_centreon_url.parameter" + self.sc_params.params.http_server_url = params.http_server_url or "https://events.pagerduty.com/v2/enqueue" + self.sc_params.params.client = params.client or "Centreon Stream Connector" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.pdy_source = params.pdy_source or nil + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + self.state_to_severity_mapping = { + [0] = { + severity = "info", + action = "resolve" + }, + [1] = { + severity = "warning", + action = "trigger" + }, + [2] = { + severity = "critical", + action = "trigger" + }, + [3] = { + severity = "error", + type = "trigger" + } + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + local pdy_custom_details = {} + + -- handle hostgroup + local hostgroups = self.sc_broker:get_hostgroups(event.host_id) + local pdy_hostgroups = "" + + -- retrieve hostgroups and store them in pdy_custom_details["Hostgroups"] + if not hostgroups then + pdy_hostgroups = "empty host group" + else + for index, hg_data in ipairs(hostgroups) do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. hg_data.group_name + else + pdy_hostgroups = hg_data.group_name + end + end + + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + + -- handle severity + local host_severity = self.sc_broker:get_severity(event.host_id) + + if host_severity then + pdy_custom_details['Hostseverity'] = host_severity + end + + + self.sc_event.event.formated_event = { + payload = { + summary = tostring(event.cache.host.name) .. ": " .. self.sc_common:ifnil_or_empty(string.match(event.output, "^(.*)\n"), 'no output'), + timestamp = new_from_timestamp(event.last_update):rfc_3339(), + severity = self.state_to_severity_mapping[event.state].severity, + source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), + component = tostring(event.cache.host.name), + group = pdy_hostgroups, + class = "host", + custom_details = pdy_custom_details, + }, + routing_key = self.sc_params.params.pdy_routing_key, + event_action = self.state_to_severity_mapping[event.state].action, + dedup_key = event.host_id .. "_H", + client = self.sc_params.params.client, + client_url = self.sc_params.params.client_url, + links = { + { + -- should think about using the new resources page but keep it as is for compatibility reasons + href = self.sc_params.params.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. tostring(event.cache.host.name), + text = "Link to Centreon host summary" + } + } + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + local pdy_custom_details = {} + + -- handle hostgroup + local hostgroups = self.sc_broker:get_hostgroups(event.host_id) + local pdy_hostgroups = "" + + -- retrieve hostgroups and store them in pdy_custom_details["Hostgroups"] + if not hostgroups then + pdy_hostgroups = "empty host group" + else + for index, hg_data in ipairs(hostgroups) do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. hg_data.group_name + else + pdy_hostgroups = hg_data.group_name + end + end + + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + + -- handle servicegroups + local servicegroups = self.sc_broker:get_servicegroups(event.host_id, event.service_id) + local pdy_servicegroups = "" + + -- retrieve servicegroups and store them in pdy_custom_details["Servicegroups"] + if not servicegroups then + pdy_servicegroups = "empty service group" + else + for index, sg_data in ipairs(servicegroups) do + if pdy_servicegroups ~= "" then + pdy_servicegroups = pdy_servicegroups .. ", " .. sg_data.group_name + else + pdy_servicegroups = sg_data.group_name + end + end + + pdy_custom_details["Servicegroups"] = pdy_servicegroups + end + + -- handle host severity + local host_severity = self.sc_broker:get_severity(event.host_id) + + if host_severity then + pdy_custom_details["Hostseverity"] = host_severity + end + + -- handle service severity + local service_severity = self.sc_broker:get_severity(event.host_id, event.service_id) + + if service_severity then + pdy_custom_details["Serviceseverity"] = service_severity + end + + self.sc_event.event.formated_event = { + payload = { + summary = tostring(event.cache.host.name) .. "/" .. tostring(event.cache.service.description) .. ": " .. self.sc_common:ifnil_or_empty(string.match(event.output, "^(.*)\n"), 'no output'), + timestamp = new_from_timestamp(event.last_update):rfc_3339(), + severity = self.state_to_severity_mapping[event.state].severity, + source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), + component = tostring(event.cache.service.description), + group = pdy_hostgroups, + class = "service", + custom_details = pdy_custom_details, + }, + routing_key = self.sc_params.params.pdy_routing_key, + event_action = self.state_to_severity_mapping[event.state].action, + dedup_key = event.host_id .. "_" .. event.service_id, + client = self.sc_params.params.client, + client_url = self.sc_params.params.client_url, + links = { + { + -- should think about using the new resources page but keep it as is for compatibility reasons + href = self.sc_params.params.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. tostring(event.cache.host.name), + text = "Link to Centreon host summary" + } + } + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local http_post_data = "" + + for _, raw_event in ipairs(data) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(http_post_data)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- pagerduty use 202 https://developer.pagerduty.com/api-reference/reference/events-v2/openapiv3.json/paths/~1enqueue/post + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + return true + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + else + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) + return true +end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 0d4aacb77dc..0d42b7a318e 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -217,4 +217,34 @@ function ScCommon:load_json_file(json_file) return true, content end -return sc_common \ No newline at end of file +--- json_escape: escape json special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:json_escape(string) + local type = type(string) + + -- check that param is a valid string + if string == nil or type == "table" then + self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + return string + end + + -- nothing to escape in a boolean or number value + if type ~= "string" then + return string + end + + -- escape all characters + string = string.gsub(string, '\\', '\\\\') + string = string.gsub(string, '\t', '\\t') + string = string.gsub(string, '\n', '\\n') + string = string.gsub(string, '\b', '\\b') + string = string.gsub(string, '\r', '\\r') + string = string.gsub(string, '\f', '\\f') + string = string.gsub(string, '/', '\\/') + string = string.gsub(string, '"', '\\"') + + return string +end + +return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 3b1837b0f3b..8b19bc68c54 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -7,13 +7,15 @@ local sc_macros = {} local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") local ScMacros = {} --- sc_macros.new: sc_macros constructor -- @param params (table) the stream connector parameter table --- @param sc_logger (object) object instance from sc_logger module -function sc_macros.new(params, logger) +-- @param logger (object) object instance from sc_logger module +-- @param common (object) object instance from sc_common module +function sc_macros.new(params, logger, common) local self = {} -- initiate mandatory libs @@ -22,6 +24,11 @@ function sc_macros.new(params, logger) self.sc_logger = sc_logger.new() end + self.sc_common = common + if not self.sc_common then + self.sc_common = sc_common.new(self.sc_logger) + end + -- initiate params self.params = params @@ -171,8 +178,9 @@ end --- replace_sc_macro: replace any stream connector macro with it's value -- @param string (string) the string in which there might be some stream connector macros to replace -- @param event (table) the current event table --- @return converted_string (string) the input string but with the macro replaced with their values -function ScMacros:replace_sc_macro(string, event) +-- @param json_string (boolean) +-- @return converted_string (string) the input string but with the macro replaced with their json escaped values +function ScMacros:replace_sc_macro(string, event, json_string) local cache_macro_value = false local event_macro_value = false local converted_string = string @@ -180,7 +188,7 @@ function ScMacros:replace_sc_macro(string, event) -- find all macros for exemple the string: -- {cache.host.name} is the name of host with id: {host_id} -- will generate two macros {cache.host.name} and {host_id}) - for macro in string.gmatch(string, "{.*}") do + for macro in string.gmatch(string, "{[%w_.]+}") do self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) -- check if macro is in the cache @@ -190,7 +198,13 @@ function ScMacros:replace_sc_macro(string, event) if cache_macro_value then self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a cache macro. Macro name: " .. tostring(macro) .. ", value is: " .. tostring(cache_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) - converted_string = string.gsub(converted_string, macro, cache_macro_value) + + -- if the input string was a json encoded string, we must make sure that the value we are going to insert is json ready + if json_string then + cache_macro_value = self.sc_common:json_escape(cache_macro_value) + end + + converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(cache_macro_value)) else -- if not in cache, try to find a matching value in the event itself event_macro_value = self:get_event_macro(macro, event) @@ -199,13 +213,32 @@ function ScMacros:replace_sc_macro(string, event) if event_macro_value then self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is an event macro. Macro name: " .. tostring(macro) .. ", value is: " .. tostring(event_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) - converted_string = string.gsub(converted_string, macro, event_macro_value) + + -- if the input string was a json encoded string, we must make sure that the value we are going to insert is json ready + if json_string then + cache_macro_value = self.sc_common:json_escape(cache_macro_value) + end + + converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(event_macro_value)) else self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro") end end end + -- the input string was a json, we decode the result + if json_string then + local decoded_json, error = broker.json_decode(converted_string) + + if error then + self.sc_logger:error("[sc_macros:replace_sc_macro]: couldn't decode json string: " .. tostring(converted_string) + .. ". Error is: " .. tostring(error)) + return converted_string + end + + return decoded_json + end + return converted_string end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 5ae50f7170a..8b5172dbfd0 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -677,27 +677,36 @@ function ScParams:is_mandatory_config_set(mandatory_params, params) end --- load_event_format_file: load a json file which purpose is to serve as a template to format events +-- @param json_string [opt] (boolean) convert template from a lua table to a json string -- @return true|false (boolean) if file is valid template file or not -function ScParams:load_event_format_file() +function ScParams:load_event_format_file(json_string) + -- return if there is no file configured if self.params.format_file == "" or self.params.format_file == nil then return false end local retval, content = self.common:load_json_file(self.params.format_file) + -- return if we couldn't load the json file if not retval then return false end + -- initiate variables local categories = self.params.bbdo.categories local elements = self.params.bbdo.elements - local tpl_category local tpl_element -- store format template in their appropriate category/element table for cat_el, format in pairs(content) do tpl_category, tpl_element = string.match(cat_el, "^(%w+)_(.*)") + + -- convert back to json if + if json_string then + format = broker.json_encode(format) + end + self.params.format_template[categories[tpl_category].id][elements[tpl_element].id] = format end diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 03729258fcf..bfbe56b47d5 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -41,6 +41,7 @@ | compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | | generate_postfield_param_string | convert a table of parameters into an url encoded parameters string | [Documentation](sc_common.md#generate_postfield_param_string-method) | | load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | +| json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | ## sc_logger methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index 592cc013e3b..10ca97c6e0c 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -41,6 +41,10 @@ - [load_json_file: parameters](#load_json_file-parameters) - [load_json_file: returns](#load_json_file-returns) - [load_json_file: example](#load_json_file-example) + - [json_escape method](#json_escape-method) + - [json_escape: parameters](#json_escape-parameters) + - [json_escape: returns](#json_escape-returns) + - [json_escape: example](#json_escape-example) ## Introduction @@ -369,3 +373,29 @@ json_file = 3 result, content = test_common:load_json_file(json_file) --> result is false, content is nil ``` + +## json_escape method + +The **json_escape** method escape json special characters. + +### json_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### json_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### json_escape: example + +```lua +local string = 'string with " and backslashes \\ and tab:\tend tab' +--> string is 'string with " and backslashes \ and tab: end tab' + +local result = test_common:json_escape(string) +--> result is 'string with \" and backslashes \\ and tab:\tend tab' +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 175fe9612cc..9a4cc24c45e 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -19,6 +19,7 @@ - [is_mandatory_config_set: returns](#is_mandatory_config_set-returns) - [is_mandatory_config_set: example](#is_mandatory_config_set-example) - [load_event_format_file method](#load_event_format_file-method) + - [load_event_format_file: parameters](#load_event_format_file-parameters) - [load_event_format_file: returns](#load_event_format_file-returns) - [load_event_format_file: example](#load_event_format_file-example) - [build_accepted_elements_info method](#build_accepted_elements_info-method) @@ -60,7 +61,7 @@ The sc_param module provides methods to help you handle parameters for your stre | enable_service_status_dedup | number | 1 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | | accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | | local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | -| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | (date format information)[https://www.lua.org/pil/22.1.html] | +| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | | send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | | format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | | proxy_address | string | | | address of the proxy | | @@ -68,7 +69,7 @@ The sc_param module provides methods to help you handle parameters for your stre | proxy_username | string | | | user for the proxy | | | proxy_password | string | | | pasword of the proxy user | | | connection_timeout | number | 60 | | time to wait in second when opening connection | | -| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | +| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | ## Module initialization @@ -243,7 +244,13 @@ result = test_param:is_mandatory_config_set(mandatory_params, params) ## load_event_format_file method -The **load_event_format_file** load a json file which purpose is to serve as a template to format events. It will use the [**format_file parameter**](#default-parameters) in order to know which file to load. If a file has been successfully loaded, a template table will be created in the self.params table. +The **load_event_format_file** load a json file which purpose is to serve as a template to format events. It will use the [**format_file parameter**](#default-parameters) in order to know which file to load. If a file has been successfully loaded, a template table will be created in the self.params table. If the **json_string** parameter is set to true, the template format won't be a table but a json string. + +### load_event_format_file: parameters + +| parameter | type | optional | default value | +| ----------- | ------- | -------- | ------------------ | +| json_string | boolean | yes | nil (act as false) | ### load_event_format_file: returns @@ -254,17 +261,66 @@ The **load_event_format_file** load a json file which purpose is to serve as a t ### load_event_format_file: example ```lua +--[[ + /etc/centreon-broker/sc_template.json content is: + + { + "neb_service_status": { + "time_of_event": "{last_check_scdate}", + "index": "centreon", + "payload": { + "host_name": "{cache.host.name}", + "service_name": "{cache.service.description}", + "status": "{state}" + } + } + } +]]-- test_param.params.format_file = "/etc/centreon-broker/sc_template.json" +-- using true as a parameter +local result = test_param:load_event_format_file(true) -local result = test_param:load_event_format_file() +--> result is true +--[[ +test_param.params.format_template is now created and looks like + +test_param.params = { + format_template = { + [1] = { + [24] = '{"time_of_event":"{last_check_scdate}","index":"centreon","payload":{"host_name":"{cache.host.name}","service_name":"{cache.service.description}","status":"{state}"}}' + } + } +} +]]-- + +-- using false as a parameter +result = test_param:load_event_format_file(false) --> result is true ---> test_param.params.format_template is now created +--[[ +test_param.params.format_template is now created and looks like + +test_param.params = { + format_template = { + [1] = { + [24] = { + time_of_event = "{last_check_scdate}", + index = "centreon", + payload = { + host_name = "{cache.host.name}", + service_name = "{cache.service.description}", + status = "{state}" + } + } + } + } +} +]]-- test_param.params.format_file = 3 -result = test_param:load_event_format_file(mandatory_params, params) +result = test_param:load_event_format_file(true) --> result is false ``` diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec new file mode 100644 index 00000000000..e07503e4bc8 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 688a62c3978cc31f836ebc657958bd13f31c2e10 Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 6 Oct 2021 11:02:10 +0200 Subject: [PATCH 085/219] fix proxy param (#74) * fix proxy param * add rockspec 1.5.1 --- .../sc_params.lua | 2 +- ...eon-stream-connectors-lib-1.5.1-1.rockspec | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 8b5172dbfd0..07c879b64a8 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -635,7 +635,7 @@ function ScParams:check_params() self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_address, "string", "") - self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_port, "number", "") + self.params.proxy_port = self.common:if_wrong_type(self.params.proxy_port, "number", "") self.params.proxy_username = self.common:if_wrong_type(self.params.proxy_username, "string", "") self.params.proxy_password = self.common:if_wrong_type(self.params.proxy_password, "string", "") self.params.connection_timeout = self.common:if_wrong_type(self.params.connection_timeout, "number", 60) diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec new file mode 100644 index 00000000000..f7850706c83 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From cc4f9b6d1c2513d40cfd8b891a28e6b05876b5b0 Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 6 Oct 2021 14:08:01 +0200 Subject: [PATCH 086/219] Buffer issue (#75) * fix bulk events * add rockspec for 1.5.2 --- .../sc_flush.lua | 5 ++- ...eon-stream-connectors-lib-1.5.2-1.rockspec | 39 +++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua index df09cdef527..e7b89f30afc 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -76,10 +76,11 @@ function ScFlush:flush_queue(send_method, category, element) local rem = self.params.reverse_element_mapping; -- flush if events in the queue are too old or if the queue is full - if (self.queues[category][element].flush_date > self.params.max_buffer_age) + if (os.time() > self.queues[category][element].flush_date + self.params.max_buffer_age) or (#self.queues[category][element].events > self.params.max_buffer_size) then - self.sc_logger:debug("sc_queue:flush_queue: flushing all the " .. rem[category][element] .. " events") + self.sc_logger:debug("[sc_flush:flush_queue]: flushing all the " .. rem[category][element] .. " events. Last flush date was: " + .. tostring(self.queues[category][element].flush_date) .. ". Buffer size is: " .. tostring(#self.queues[category][element].events)) local retval = send_method(self.queues[category][element].events, rem[category][element]) if retval then diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec new file mode 100644 index 00000000000..1f96a3de553 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.2-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.2-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 74e6d164aaf7f6a2817975f900ab3dcb34afa864 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 12 Oct 2021 15:43:03 +0200 Subject: [PATCH 087/219] bad log messages and typos (#76) * bad log messages and typos * fix wrong severity mapping pagerduty * remove event bulking pagerduty * update snow sc with latest features * prevent bad macro replacement when % in string * update snow * handle state conversion flag for ack events * fix bad log message for log params * add log_level corresponding table in doc * add rockspec for version 1.5.3 --- .../pagerduty/pagerduty-events-apiv2.lua | 9 +- .../servicenow/servicenow-events-apiv2.lua | 207 +++++++++++------- .../sc_macros.lua | 13 +- .../sc_params.lua | 29 ++- stream-connectors/modules/docs/sc_logger.md | 13 ++ ...eon-stream-connectors-lib-1.5.3-1.rockspec | 39 ++++ 6 files changed, 212 insertions(+), 98 deletions(-) create mode 100644 stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 6e870ee2421..7feac6e9cd0 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -1,6 +1,6 @@ #!/usr/bin/lua -------------------------------------------------------------------------------- --- Centreon Broker Splunk Connector Events +-- Centreon Broker Pagerduty Connector Events -------------------------------------------------------------------------------- @@ -56,6 +56,9 @@ function EventQueue.new(params) self.fail = true end + -- force buffer size to 1 to avoid breaking the communication with pagerduty (can't send more than one event at once) + params.max_buffer_size = 1 + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.pdy_centreon_url = params.pdy_centreon_url or "http://set.pdy_centreon_url.parameter" self.sc_params.params.http_server_url = params.http_server_url or "https://events.pagerduty.com/v2/enqueue" @@ -103,7 +106,7 @@ function EventQueue.new(params) }, [3] = { severity = "error", - type = "trigger" + action = "trigger" } } @@ -314,7 +317,7 @@ function EventQueue:send_data(data, element) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) - self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(self.sc_params.params.http_server_url)) local http_response_body = "" local http_request = curl.easy() diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua index 29d41323683..a7b5747fa6b 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua @@ -13,6 +13,8 @@ local sc_logger = require("centreon-stream-connectors-lib.sc_logger") local sc_broker = require("centreon-stream-connectors-lib.sc_broker") local sc_event = require("centreon-stream-connectors-lib.sc_event") local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") -------------------------------------------------------------------------------- -- EventQueue class @@ -27,7 +29,7 @@ EventQueue.__index = EventQueue -- @return the new EventQueue -------------------------------------------------------------------------------- -function EventQueue:new (params) +function EventQueue.new (params) local self = {} local mandatory_parameters = { [1] = "instance", @@ -72,6 +74,26 @@ function EventQueue:new (params) self.sc_params:param_override(params) self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + setmetatable(self, { __index = EventQueue }) return self @@ -193,6 +215,12 @@ function EventQueue:call (url, method, data, authToken) local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. ".service-now.com/" .. tostring(url) self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) + return true + end + local res = "" local request = curl.easy() :setopt_url(endpoint) @@ -268,102 +296,124 @@ function EventQueue:call (url, method, data, authToken) return broker.json_decode(res) end +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] -function EventQueue:format_event() - self.sc_event.event.formated_event = { - source = "centreon", - event_class = "centreon", - severity = 5, - node = tostring(self.sc_event.event.cache.host.name), - time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), - description = self.sc_event.event.output - } - - if self.sc_event.event.element == 14 then - - self.sc_event.event.formated_event.resource = tostring(self.sc_event.event.cache.host.name) - self.sc_event.event.formated_event.severity = self.sc_event.event.state - - elseif self.sc_event.event.element == 24 then - self.sc_event.event.formated_event.resource = tostring(self.sc_event.event.cache.service.description) - if self.sc_event.event.state == 0 then - self.sc_event.event.formated_event.severity = 0 - elseif self.sc_event.event.state == 1 then - self.sc_event.event.formated_event.severity = 3 - elseif self.sc_event.event.state == 2 then - self.sc_event.event.formated_event.severity = 1 - elseif self.sc_event.event.state == 3 then - self.sc_event.event.formated_event.severity = 4 + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() end end - + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") end - +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + source = "centreon", + event_class = "centreon", + node = tostring(self.sc_event.event.cache.host.name), + time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), + description = self.sc_event.event.output, + resource = tostring(self.sc_event.event.cache.host.name), + severity = self.sc_event.event.state + } +end -local queue +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + source = "centreon", + event_class = "centreon", + node = tostring(self.sc_event.event.cache.host.name), + time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), + description = self.sc_event.event.output, + resource = tostring(self.sc_event.event.cache.service.description), + severity = 5 + } --------------------------------------------------------------------------------- --- init, initiate stream connector with parameters from the configuration file --- @param {table} parameters, the table with all the configuration parameters --------------------------------------------------------------------------------- -function init (parameters) - queue = EventQueue:new(parameters) + if self.sc_event.event.state == 0 then + self.sc_event.event.formated_event.severity = 0 + elseif self.sc_event.event.state == 1 then + self.sc_event.event.formated_event.severity = 3 + elseif self.sc_event.event.state == 2 then + self.sc_event.event.formated_event.severity = 1 + elseif self.sc_event.event.state == 3 then + self.sc_event.event.formated_event.severity = 4 + end end --------------------------------------------------------------------------------- --- EventQueue:add, add an event to the queue --- @param {table} eventData, the data related to the event --- @return {boolean} --------------------------------------------------------------------------------- -function EventQueue:add () - self.events[#self.events + 1] = self.sc_event.event.formated_event - return true +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) end -------------------------------------------------------------------------------- --- EventQueue:flush, flush stored events --- Called when the max number of events or the max age are reached --- @return {boolean} +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters -------------------------------------------------------------------------------- -function EventQueue:flush () - self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element - self:send_data() + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) - self.events = {} - - -- and update the timestamp - self.sc_params.params.__internal_ts_last_flush = os.time() - return true + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- -- EventQueue:send_data, send data to external tool -- @return {boolean} -------------------------------------------------------------------------------- -function EventQueue:send_data () - local data = '' - local authToken = self:getAuthToken() +function EventQueue:send_data(data, element) + local authToken local counter = 0 + local http_post_data + + -- generate a fake token for test purpose or use a real one if not testing + if self.sc_params.params.send_data_test == 1 then + authToken = "fake_token" + else + authToken = self:getAuthToken() + end - for _, raw_event in ipairs(self.events) do + for _, raw_event in ipairs(data) do if counter == 0 then - data = broker.json_encode(raw_event) + http_post_data = broker.json_encode(raw_event) counter = counter + 1 else - data = data .. ',' .. broker.json_encode(raw_event) + http_post_data = http_post_data .. ',' .. broker.json_encode(raw_event) end end - data = '{"records":[' .. data .. ']}' - self.sc_logger:notice('EventQueue:send_data: creating json: ' .. data) + http_post_data = '{"records":[' .. http_post_data .. ']}' + self.sc_logger:info('EventQueue:send_data: creating json: ' .. http_post_data) if self:call( "api/global/em/jsonv2", "POST", - data, + http_post_data, authToken ) then return true @@ -378,6 +428,9 @@ end -- @return {boolean} -------------------------------------------------------------------------------- function write (event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") @@ -389,39 +442,27 @@ function write (event) -- drop event if wrong category if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) return true end -- drop event if wrong element if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) return true end - - -- First, are there some old events waiting in the flush queue ? - if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then - queue.sc_logger:warning("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") - queue:flush() - end - - -- Then we check that the event queue is not already full - if (#queue.events >= queue.sc_params.params.max_buffer_size) then - queue.sc_logger:warning("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") - queue:flush() - end - -- adding event to the queue + -- drop event if it is not validated if queue.sc_event:is_valid_event() then - queue:format_event() + queue:format_accepted_event() else return true end - -- Then we check whether it is time to send the events to the receiver and flush - if (#queue.events >= queue.sc_params.params.max_buffer_size) then - queue.sc_logger:warning( "write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") - return queue:flush() - end - + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) return true end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 8b19bc68c54..ca900af7d48 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -204,7 +204,7 @@ function ScMacros:replace_sc_macro(string, event, json_string) cache_macro_value = self.sc_common:json_escape(cache_macro_value) end - converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(cache_macro_value)) + converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(event_macro_value, "%%", "%%%%"))) else -- if not in cache, try to find a matching value in the event itself event_macro_value = self:get_event_macro(macro, event) @@ -219,7 +219,7 @@ function ScMacros:replace_sc_macro(string, event, json_string) cache_macro_value = self.sc_common:json_escape(cache_macro_value) end - converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(event_macro_value)) + converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(event_macro_value, "%%", "%%%%"))) else self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro") end @@ -397,6 +397,15 @@ end -- @param event (table) the event table -- @return string (string) the status of the event in a human readable format (e.g: OK, WARNING) function ScMacros:transform_state(macro_value, event) + + -- acknowledgement events are special, the state can be for a host or a service. + -- We force the element to be host_status or service_status in order to properly convert the state + if event.element == 1 and event.service_id == 0 then + return self.params.status_mapping[event.category][event.element].host_status[macro_value] + elseif event.element == 1 and event.service_id ~= 0 then + return self.params.status_mapping[event.category][event.element].service_status[macro_value] + end + return self.params.status_mapping[event.category][event.element][macro_value] end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 07c879b64a8..94dbfce35dc 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -90,6 +90,10 @@ function sc_params.new(common, logger) -- testing parameters send_data_test = 0, + + -- logging parameters + logfile = "", + log_level = "", -- initiate mappings element_mapping = {}, @@ -558,6 +562,10 @@ function sc_params.new(common, logger) -- initiate category and status mapping self.params.status_mapping = { [categories.neb.id] = { + [elements.acknowledgement.id] = { + host_status = {}, + service_status = {} + }, [elements.downtime.id] = { [1] = {}, [2] = {} @@ -586,9 +594,15 @@ function sc_params.new(common, logger) [categories.bam.id] = {} } + -- downtime status mapping self.params.status_mapping[categories.neb.id][elements.downtime.id][1] = self.params.status_mapping[categories.neb.id][elements.service_status.id] self.params.status_mapping[categories.neb.id][elements.downtime.id][2] = self.params.status_mapping[categories.neb.id][elements.host_status.id] + -- acknowledgement status mapping + self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].host_status = self.params.status_mapping[categories.neb.id][elements.host_status.id] + self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].service_status = self.params.status_mapping[categories.neb.id][elements.service_status.id] + + setmetatable(self, { __index = ScParams }) return self end @@ -640,6 +654,8 @@ function ScParams:check_params() self.params.proxy_password = self.common:if_wrong_type(self.params.proxy_password, "string", "") self.params.connection_timeout = self.common:if_wrong_type(self.params.connection_timeout, "number", 60) self.params.allow_insecure_connection = self.common:number_to_boolean(self.common:check_boolean_number_option_syntax(self.params.allow_insecure_connection, 0)) + self.params.logfile = self.common:ifnil_or_empty(self.params.logfile, "/var/log/centreon-broker/stream-connector.log") + self.params.log_level = self.common:ifnil_or_empty(self.params.log_level, 1) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table @@ -663,9 +679,9 @@ end -- @eturn true|false (boolean) function ScParams:is_mandatory_config_set(mandatory_params, params) for index, mandatory_param in ipairs(mandatory_params) do - if not params[mandatory_param] then + if not params[mandatory_param] or params[mandatory_param] == "" then self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) - .. " parameter is not set in the stream connector web configuration") + .. " parameter is not set in the stream connector web configuration (or value is empty)") return false end @@ -719,17 +735,10 @@ function ScParams:build_accepted_elements_info() -- list all accepted elements for _, accepted_element in ipairs(self.common:split(self.params.accepted_elements, ",")) do - self.logger:debug("[sc_params:build_accetped_elements_info]: accepted element: " .. tostring(accepted_element)) -- try to find element in known categories - for category_name, category_info in pairs(categories) do - self.logger:debug("[sc_params:build_accetped_elements_info]: category id: " .. tostring(category_info.id)) - for i, v in pairs(self.params.element_mapping) do - self.logger:debug("[sc_params:build_accepted_elements_info]: mapping: " .. tostring(i) .. " value: " .. tostring(v)) - end - + for category_name, category_info in pairs(categories) do if self.params.element_mapping[category_info.id][accepted_element] then -- if found, store information in a dedicated table - self.logger:debug("[sc_params:build_accetped_elements_info] dans le param setup: " .. tostring(self.params.element_mapping[category_info.id][accepted_element])) self.params.accepted_elements_info[accepted_element] = { category_id = category_info.id, category_name = category_name, diff --git a/stream-connectors/modules/docs/sc_logger.md b/stream-connectors/modules/docs/sc_logger.md index d6d0dc0ad95..8b7044d823d 100644 --- a/stream-connectors/modules/docs/sc_logger.md +++ b/stream-connectors/modules/docs/sc_logger.md @@ -26,6 +26,19 @@ The sc_logger module provides methods to help you handle logging in your stream connectors. It has been made in OOP (object oriented programming) +Logs can be configured with two parameters called + +- logfile +- log_level + +there are three different **log_level** going from 1 to 3. Below is the list of the logs message type you can expect with their corresponding **log_level**. + +| log_level | message type | +| --------- | ----------------------------------- | +| 1 | notice, error | +| 2 | info, warning, notice, error | +| 3 | debug, info, warning, notice, error | + ## Best practices All the stream-connectors-lib are using the following syntax when logging: diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec new file mode 100644 index 00000000000..a3e968e2fe4 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.3-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.3-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From cd3666d5906d681830ae652ed6dd80cedaf6206a Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 15 Oct 2021 14:42:53 +0200 Subject: [PATCH 088/219] fix splunk json (#79) avoid having special characters in field names and fix json structure --- .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index ec8b72099d5..afd842af05e 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -108,6 +108,7 @@ function EventQueue:format_accepted_event() -- add metrics in the formated event for metric_name, metric_data in pairs(self.sc_metrics.metrics) do + metric_name = string.gsub(metric_name, "[^a-zA-Z0-9_]", "_") self.sc_event.event.formated_event["metric_name:" .. tostring(metric_name)] = metric_data.value end end @@ -155,6 +156,7 @@ function EventQueue:add() index = self.sc_params.params.splunk_index, host = self.sc_params.params.splunk_host, time = self.sc_event.event.last_check, + event = "metric_event", fields = self.sc_event.event.formated_event } From 7709720e6319cc31466226325888d9e54b7151d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Duret?= Date: Mon, 18 Oct 2021 12:07:28 +0200 Subject: [PATCH 089/219] fix Splunk metric event We have to remove the line 'event = "metric_event"' in order to write an index of type "Mesure" --- .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index afd842af05e..45ce416fc1b 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -156,7 +156,6 @@ function EventQueue:add() index = self.sc_params.params.splunk_index, host = self.sc_params.params.splunk_host, time = self.sc_event.event.last_check, - event = "metric_event", fields = self.sc_event.event.formated_event } From ad2e993b7c3cd841f18732026f09dfd7088638d5 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 22 Oct 2021 10:42:46 +0200 Subject: [PATCH 090/219] fix wrong variable name --- .../modules/centreon-stream-connectors-lib/sc_macros.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index ca900af7d48..6e316974f42 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -204,7 +204,7 @@ function ScMacros:replace_sc_macro(string, event, json_string) cache_macro_value = self.sc_common:json_escape(cache_macro_value) end - converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(event_macro_value, "%%", "%%%%"))) + converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(cache_macro_value, "%%", "%%%%"))) else -- if not in cache, try to find a matching value in the event itself event_macro_value = self:get_event_macro(macro, event) @@ -409,4 +409,4 @@ function ScMacros:transform_state(macro_value, event) return self.params.status_mapping[event.category][event.element][macro_value] end -return sc_macros \ No newline at end of file +return sc_macros From 4815a9aa20b3d24a742d0eac5ad1447d60f69663 Mon Sep 17 00:00:00 2001 From: ponchoh Date: Tue, 16 Nov 2021 13:04:40 -0500 Subject: [PATCH 091/219] enh(doc) update links (#81) update Documentation URL's --- stream-connectors/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 50912d7f38b..9fa96240c2f 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -17,17 +17,17 @@ Here is a list of the Centreon powered scripts: | Software | Connectors | Documentations | | -------- | ---------- | -------------- | -| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/bsm.html) | -| ElasticSearch | [ElasticSearch Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/elasticsearch) | [Events Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/elasticsearch-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/elasticsearch-metrics.html) | +| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-bsm.html) | +| ElasticSearch | [ElasticSearch Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/elasticsearch) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-metrics.html) | | InfluxDB | [InfluxDB Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/influxdb) | WIP | | NDO | [NDO Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/ndo) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/ndo.html) | -| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/hp-omi.html) | -| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/opsgenie.html) | -| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/pagerduty.html) | +| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-omi.html) | +| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-opsgenie.html) | +| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-pagerduty-events.html) | | Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | -| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html) | -| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/splunk-events.html) | -| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/warp10.html) | +| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-service-now-events.html) | +| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-metrics.html) | +| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-warp10.html) | Here is a list of the Community powered scripts From 5073baa57e3706850608b7e8ea87c6431c300b2e Mon Sep 17 00:00:00 2001 From: Simon Bomm Date: Thu, 18 Nov 2021 14:59:30 +0100 Subject: [PATCH 092/219] enh(elastic) add events apiv2 (#72) --- .../elasticsearch/elastic-events-apiv2.lua | 297 ++++++++++++++++++ 1 file changed, 297 insertions(+) create mode 100644 stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua new file mode 100644 index 00000000000..f5fa6965ab5 --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -0,0 +1,297 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elastic Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require("cURL") +local ltn12 = require("ltn12") +local mime = require("mime") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "elastic_url", + "elastic_username", + "elastic_password", + "elastic_index_status" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/elastic-events-apiv2.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self + end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") + end + + function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + event_type = "host", + timestamp = self.sc_event.event.last_check, + host = self.sc_event.event.cache.host.name, + output = string.gsub(self.sc_event.event.output, "\n", " "), + status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type + } + end + + function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + event_type = "service", + timestamp = self.sc_event.event.last_check, + host = self.sc_event.event.cache.host.name, + service = self.sc_event.event.cache.service.description, + status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + output = string.gsub(self.sc_event.event.output, "\n", " "), + } + end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end + + function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:info("[send_data]: " .. broker.json_encode(data)) + return true + end + + local http_post_metadata = { + ["index"] = { + ["_index"] = tostring((self.sc_params.params.elastic_index_status)) + } + } + + local http_post_data = broker.json_encode(http_post_metadata) + for _, raw_event in ipairs(data) do + http_post_data = http_post_data .. broker.json_encode(http_post_metadata) .. "\n" .. broker.json_encode(raw_event) .. "\n" + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Elastic URL is: " .. tostring(self.sc_params.params.elastic_url) .. "/_bulk") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.elastic_url .. "/_bulk") + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json;charset=UTF-8", + "content-length: " .. string.len(http_post_data), + "Authorization: Basic " .. (mime.b64(self.sc_params.params.elastic_username .. ":" .. self.sc_params.params.elastic_password)) + } + ) + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval + end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + return true + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + else + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) + return true +end From 3a92a842196444c1a04c667d3acf807908798d61 Mon Sep 17 00:00:00 2001 From: Simon Bomm Date: Fri, 26 Nov 2021 09:58:54 +0100 Subject: [PATCH 093/219] (stream) signl4-sc initial release (#84) * (stream)first-commit-signl4-connector * (stream)fix-example-signl4-connector * + review fix and enhancements * enh: replace broker_log by wrapper --- .../pagerduty/pagerduty-events-apiv2.lua | 2 +- .../signl4/signl4-events-apiv2.lua | 313 ++++++++++++++++++ .../splunk/splunk-events-apiv2.lua | 2 +- 3 files changed, 315 insertions(+), 2 deletions(-) create mode 100644 stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 7feac6e9cd0..6b80e40dd97 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -351,7 +351,7 @@ function EventQueue:send_data(data, element) if (self.sc_params.params.proxy_password ~= '') then http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) else - broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") end end diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua new file mode 100644 index 00000000000..d3fbd60594d --- /dev/null +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -0,0 +1,313 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Signl4 Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require("cURL") +local mime = require("mime") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "server_address", + "team_secret" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/signl4-events-apiv2.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- force buffer size to 1 to avoid breaking the communication with signl4 (can't send more than one event at once) + params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.x_s4_source_system = params.x_s4_source_system or "Centreon" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + self.state_to_signlstatus_mapping = { + [0] = "resolved", + [1] = "new", + [2] = "new", + [3] = "new" + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self + end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") + end + + -- { + -- "Event Type": "HOST", + -- "Date": "HUMAN READABLE DATE TIME", + -- "Host": "Highway", + -- "Message": "to hell!", + -- "Status": "DOWN", + -- "State": "1", + -- "State Type": "1", + -- "Timestamp": "163[...]542" + -- "X-S4-SourceSystem": "Centreon", + -- "X-S4-AlertingScenario": "multi_ack", #NOT INCLUDED ATM + -- "X-S4-ExternalID": "HOSTALERT_666", + -- "X-S4-Status": "new" + -- } + + function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + "Event Type" = "HOST", + "Date" = self.sc_macros:transform_date(self.sc_event.event.last_check), + "Host" = self.sc_event.event.cache.host.name, + "Message" = string.gsub(self.sc_event.event.output, "\n", " "), + "Status" = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + "State" = self.sc_event.event.state, + "State Type" = self.sc_event.event.state_type, + "Timestamp" = self.sc_event.event.last_check, + "X-S4-SourceSystem" = self.sc_params.params.x_s4_source_system, + "X-S4-ExternalID" = "HOSTALERT_" .. event.host_id, + "X-S4-Status" = self.state_to_signlstatus_mapping[event.state] + } + end + + function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + "Event Type" = "SERVICE", + "Date" = self.sc_macros:transform_date(self.sc_event.event.last_check), + "Host" = self.sc_event.event.cache.host.name, + "Service" = self.sc_event.event.cache.service.description, + "Message" = string.gsub(self.sc_event.event.output, "\n", " "), + "Status" = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + "State" = self.sc_event.event.state, + "State Type" = self.sc_event.event.state_type, + "Timestamp" = self.sc_event.event.last_check, + "X-S4-SourceSystem" = self.sc_params.params.x_s4_source_system, + "X-S4-ExternalID" = "SERVICEALERT_" event.host_id .. "_" .. event.service_id, + "X-S4-Status" = self.state_to_signlstatus_mapping[event.state] + } + end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:info("[send_data]: " .. broker.json_encode(data)) + return true + end + + local http_post_data = "" + for _, raw_event in ipairs(data) do + http_post_data = http_post_data .. broker.json_encode(raw_event) .. "\n" + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(self.sc_params.params.server_address) .. "/webhook/" .. tostring(self.sc_params.params.team_secret)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.server_address .. "/webhook/" .. self.sc_params.params.team_secret) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json;charset=UTF-8", + "content-length: " .. string.len(http_post_data) + } + ) + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + -- performing the HTTP request + http_request:perform() + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + -- Handling the return code + local retval = false + if http_response_code == 200 or http_response_code == 201 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + return true + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + else + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) + return true +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index e917df7eb18..263beb78570 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -217,7 +217,7 @@ function EventQueue:send_data(data, element) if (self.sc_params.params.proxy_password ~= '') then http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) else - broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") end end From c5f26668816ddcea2270ff76c1174fe6299a8757 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 3 Dec 2021 09:41:43 +0100 Subject: [PATCH 094/219] Training course (#88) --- .../training_course/centreon_classroom.lua | 46 ++++++++++ .../training_course/centreon_classroom.md | 90 +++++++++++++++++++ .../training_course/exercices_answers.md | 57 ++++++++++++ .../training_course/exercises.md | 81 +++++++++++++++++ 4 files changed, 274 insertions(+) create mode 100644 stream-connectors/training_course/centreon_classroom.lua create mode 100644 stream-connectors/training_course/centreon_classroom.md create mode 100644 stream-connectors/training_course/exercices_answers.md create mode 100644 stream-connectors/training_course/exercises.md diff --git a/stream-connectors/training_course/centreon_classroom.lua b/stream-connectors/training_course/centreon_classroom.lua new file mode 100644 index 00000000000..c2adbdb53ac --- /dev/null +++ b/stream-connectors/training_course/centreon_classroom.lua @@ -0,0 +1,46 @@ +local centreon_classroom = {} + +local CentreonClassroom = {} + +function centreon_classroom.new(teacher) + local self = {} + + if teacher then + self.teacher = teacher + else + self.teacher = { + first_name = "Minerva", + last_name = "McGonagall", + speciality = "Transfiguration" + } + end + + setmetatable(self, { __index = CentreonClassroom }) + return self +end + +function CentreonClassroom:put_tables(tables) + if not tables then + math.randomseed(os.time()) + self.tables = math.random(1,20) + elseif tables > 20 then + print(tables .. " tables is a bit much, it is a classroom not a stadium") + math.randomseed(os.time()) + self.tables = math.random(1,20) + else + self.tables = tables + end +end + +function CentreonClassroom:put_chairs(chairs) + if not tables then + self:put_tables() + end + + if chairs > self.tables * 2 then + print("there are only " .. tostring(self.tables) .. " tables in the classroom," + .. "you can't have more than 2 chairs per table") + end +end + +return centreon_classroom \ No newline at end of file diff --git a/stream-connectors/training_course/centreon_classroom.md b/stream-connectors/training_course/centreon_classroom.md new file mode 100644 index 00000000000..6d5a587840a --- /dev/null +++ b/stream-connectors/training_course/centreon_classroom.md @@ -0,0 +1,90 @@ +# centreon_classroom module documentation + +- [centreon_classroom module documentation](#centreon_classroom-module-documentation) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [put_tables method](#put_tables-method) + - [put_tables: parameters](#put_tables-parameters) + - [put_tables: example](#put_tables-example) + - [put_chairs method](#put_chairs-method) + - [put_chairs: parameters](#put_chairs-parameters) + - [put_chairs: example](#put_chairs-example) + +## Introduction + +The centreon_classroom module provides methods to help setting up your classroom. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- teacher. This is a table with teacher informations + +If you don't provide this parameter it will hire a default teacher + +### constructor: Example + +```lua +-- load classroom module +local centreon_classroom = require("centreon_classroom") + +local teacher = { + first_name = "Horace", + last_name = "Slughorn", + speciality = "Potions" +} + +-- create a new instance of the centreon_classroom module +local classroom = centreon_classroom.new(teacher) +``` + +## put_tables method + +The **put_tables** method put tables in the classroom. You can decide how many tables you want or it will put between 1 or 20 tables in your classroom + +### put_tables: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| tables | number | yes | | + +### put_tables: example + +```lua +local tables = 15 + +classroom:put_tables(tables) +print(classroom.tables) +--> it will print 15 + +classroom:put_tables() +print(classroom.tables) +--> it will print a number between 1 and 20 +``` + +## put_chairs method + +The **put_chairs** method add chairs in your classroom. You can't have more than 2 chairs per table. +If you don't have any tables in your classroom, it will add tables before and put 2 chairs per table. + +### put_chairs: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| chairs | number | no | | + +### put_chairs: example + +```lua +local chairs = 14 + +classroom:put_chairs(14) +print(classroom.chairs) +--> result is 14 +``` diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md new file mode 100644 index 00000000000..d871549829c --- /dev/null +++ b/stream-connectors/training_course/exercices_answers.md @@ -0,0 +1,57 @@ +# Exercices Answers + +- [Exercices Answers](#exercices-answers) + - [Exercise 1](#exercise-1) + - [Exercise 2](#exercise-2) + +## Exercise 1 + +you can use the default teacher + +```lua +centreon_classroom = require("centreon_classroom") + +local classroom = centreon_classroom.new() +print(tostring(classroom.teacher.first_name)) +--> will print "Minerva" +``` + +or you can hire your own teacher + +```lua +centreon_classroom = require("centreon_classroom") + +local teacher = { + first_name = "Sybill" + last_name = "Trelawney" + speciality = "Divination" +} + +local classroom = centreon_classroom.new() +print(tostring(classroom.teacher.first_name)) +--> will print "Sybill" +``` + +## Exercise 2 + +you can let someone else decide how many tables and chairs there will be + +```lua +-- if you do not have tables, using put chairs will also put tables in the classroom +classroom:put_chairs() +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) +--> will print "tables: xx, chairs: yy" + +-- or you can first add tables and then add chairs +classroom:put_tables() +classroom:put_chairs() +``` + +or you can decide how many tables and chairs you want + +```lua +classroom:put_tables(10) +classroom:put_chairs(15) +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) +--> will print "tables: 10, chairs: 15" +``` diff --git a/stream-connectors/training_course/exercises.md b/stream-connectors/training_course/exercises.md new file mode 100644 index 00000000000..677a4a210db --- /dev/null +++ b/stream-connectors/training_course/exercises.md @@ -0,0 +1,81 @@ +# centreon_classroom exercices + +- [centreon_classroom exercices](#centreon_classroom-exercices) + - [Exercise 1](#exercise-1) + - [Exercise 1: What you must do](#exercise-1-what-you-must-do) + - [Exercise 1: How can you check that it works](#exercise-1-how-can-you-check-that-it-works) + - [Exercise 2](#exercise-2) + - [Exercise 2: What you must do](#exercise-2--what-you-must-do) + - [Exercise 2: How can you check that it works](#exercise-2-how-can-you-check-that-it-works) + - [Exercise 3](#exercise-3) + - [Exercice 3: What you must do](#exercice-3-what-you-must-do) + - [Exercice 3: How can you check that it works](#exercice-3-how-can-you-check-that-it-works) + +## Exercise 1 + +Create a `my_first_lesson.lua` script. + +To get your first lesson, you will need a classroom. Luckily, we got you covered. +In your lua script, you must build a new classroom. To do so, use the centreon_classroom module. +Maybe this module documentation can help you go through that + +### Exercise 1: What you must do + +- instantiate a new classroom +- check if a teacher is in your classroom + +### Exercise 1: How can you check that it works + +```lua +print(tostring(classroom.teacher.first_name)) +--> must print the first name of your teacher +``` + +## Exercise 2 + +You have a classroom, maybe you want to sit somewhere. So add at least one table and one chair + +### Exercise 2: What you must do + +- add tables in your classroom +- add chairs in your classroom + +### Exercise 2: How can you check that it works + +```lua + print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) + --> must print "tables: xx, chairs: yy" +``` + +## Exercise 3 + +You do not like numbers and for some reason, you don't want **2** tables but **two** tables + +This means that you are going to use the following method + +```lua +classroom:put_tables("two") +``` + +Now that you have tables, you want chairs. + +```lua +classroom:put_chairs() +``` + +This is going to break all the classroom. + +### Exercice 3: What you must do + +- find a way to handle bad parameters + +### Exercice 3: How can you check that it works + +```lua +classroom:put_tables("two") +classroom:put_chairs() + +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) + +--> must print "tables: xx, chairs: yy" +``` From 88825f47569f5130eff6052271d43eafebe84e4a Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 3 Dec 2021 11:10:52 +0100 Subject: [PATCH 095/219] Training course (#89) --- .../training_course/centreon_classroom.lua | 3 +- .../training_course/exercises.md | 42 +++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/stream-connectors/training_course/centreon_classroom.lua b/stream-connectors/training_course/centreon_classroom.lua index c2adbdb53ac..df040f71d27 100644 --- a/stream-connectors/training_course/centreon_classroom.lua +++ b/stream-connectors/training_course/centreon_classroom.lua @@ -33,7 +33,7 @@ function CentreonClassroom:put_tables(tables) end function CentreonClassroom:put_chairs(chairs) - if not tables then + if not self.tables then self:put_tables() end @@ -41,6 +41,7 @@ function CentreonClassroom:put_chairs(chairs) print("there are only " .. tostring(self.tables) .. " tables in the classroom," .. "you can't have more than 2 chairs per table") end + self.chairs = chairs end return centreon_classroom \ No newline at end of file diff --git a/stream-connectors/training_course/exercises.md b/stream-connectors/training_course/exercises.md index 677a4a210db..3f14defe86f 100644 --- a/stream-connectors/training_course/exercises.md +++ b/stream-connectors/training_course/exercises.md @@ -10,6 +10,11 @@ - [Exercise 3](#exercise-3) - [Exercice 3: What you must do](#exercice-3-what-you-must-do) - [Exercice 3: How can you check that it works](#exercice-3-how-can-you-check-that-it-works) + - [Exercise 4](#exercise-4) + - [Exercice 4: What you must do](#exercice-4-what-you-must-do) + - [Exercice 4: How can you check that it works](#exercice-4-how-can-you-check-that-it-works) + - [Exercise 5](#exercise-5) + - [Exercice 5: What you must do](#exercice-5-what-you-must-do) ## Exercise 1 @@ -79,3 +84,40 @@ print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(class --> must print "tables: xx, chairs: yy" ``` + +## Exercise 4 + +There is an old legend saying that people must eat and drink in order to survive. We are going to build a cafeteria + +### Exercice 4: What you must do + +- create a lua module called centreon_cafeteria +- a cafeteria must have a cook and a menu. + - a menu is made of starters, dishes and desserts + - each starter, dish and dessert has a name, a number of calories and a list of ingredients + - a cook has a nickname and a favourite dish + +### Exercice 4: How can you check that it works + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +print(tostring(cook.nickname)) +--> must print the nickname of your cook + +print(tostring(menu.starters[1].name)) +--> must print the name of the first dishes +``` + +## Exercise 5 + +We should make sure that we don't serve dishes to people that are not alergic to an ingredient. Our cafeteria module will have a method called check_alergy() that has two parameters, the dish that our student wants and the list of ingredients that the studend is alergic to. + +### Exercice 5: What you must do + +- create a method called check_alergy in your module +- it needs to have the dish and the list of ingredients that the studend can't eat +- it must return false if there is at least one ingredient that the student can't eat it the dish +- it must return true if the dish is safe for the student + From ced28868d31ffb5d1ad9d58ba7a6f9fc9b0e85ac Mon Sep 17 00:00:00 2001 From: Simon Bomm Date: Fri, 3 Dec 2021 19:04:30 +0100 Subject: [PATCH 096/219] (stream)signl4 bugfix and json simplification (#87) * (stream)signl4 bugfix and json simplification * - info instead of debug --- .../signl4/signl4-events-apiv2.lua | 92 ++++++++----------- 1 file changed, 36 insertions(+), 56 deletions(-) diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index d3fbd60594d..64afeb89fc2 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -33,7 +33,6 @@ function EventQueue.new(params) local self = {} local mandatory_parameters = { - "server_address", "team_secret" } @@ -60,8 +59,9 @@ function EventQueue.new(params) -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.server_address = params.server_address or "https://connect.signl4.com" self.sc_params.params.x_s4_source_system = params.x_s4_source_system or "Centreon" - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -124,55 +124,36 @@ function EventQueue:format_accepted_event() self:add() self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") - end +end - -- { - -- "Event Type": "HOST", - -- "Date": "HUMAN READABLE DATE TIME", - -- "Host": "Highway", - -- "Message": "to hell!", - -- "Status": "DOWN", - -- "State": "1", - -- "State Type": "1", - -- "Timestamp": "163[...]542" - -- "X-S4-SourceSystem": "Centreon", - -- "X-S4-AlertingScenario": "multi_ack", #NOT INCLUDED ATM - -- "X-S4-ExternalID": "HOSTALERT_666", - -- "X-S4-Status": "new" - -- } - - function EventQueue:format_event_host() - self.sc_event.event.formated_event = { - "Event Type" = "HOST", - "Date" = self.sc_macros:transform_date(self.sc_event.event.last_check), - "Host" = self.sc_event.event.cache.host.name, - "Message" = string.gsub(self.sc_event.event.output, "\n", " "), - "Status" = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - "State" = self.sc_event.event.state, - "State Type" = self.sc_event.event.state_type, - "Timestamp" = self.sc_event.event.last_check, - "X-S4-SourceSystem" = self.sc_params.params.x_s4_source_system, - "X-S4-ExternalID" = "HOSTALERT_" .. event.host_id, - "X-S4-Status" = self.state_to_signlstatus_mapping[event.state] - } - end +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + EventType = "HOST", + Date = self.sc_macros:transform_date(self.sc_event.event.last_check), + Host = self.sc_event.event.cache.host.name, + Message = string.gsub(self.sc_event.event.output, "\n", " "), + Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + Title = "HOST ALERT:" .. self.sc_event.event.cache.host.name .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, + ["X-S4-ExternalID"] = "HOSTALERT_" .. self.sc_event.event.host_id, + ["X-S4-Status"] = self.state_to_signlstatus_mapping[self.sc_event.event.state] + } +end - function EventQueue:format_event_service() - self.sc_event.event.formated_event = { - "Event Type" = "SERVICE", - "Date" = self.sc_macros:transform_date(self.sc_event.event.last_check), - "Host" = self.sc_event.event.cache.host.name, - "Service" = self.sc_event.event.cache.service.description, - "Message" = string.gsub(self.sc_event.event.output, "\n", " "), - "Status" = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - "State" = self.sc_event.event.state, - "State Type" = self.sc_event.event.state_type, - "Timestamp" = self.sc_event.event.last_check, - "X-S4-SourceSystem" = self.sc_params.params.x_s4_source_system, - "X-S4-ExternalID" = "SERVICEALERT_" event.host_id .. "_" .. event.service_id, - "X-S4-Status" = self.state_to_signlstatus_mapping[event.state] - } - end +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + EventType = "SERVICE", + Date = self.sc_macros:transform_date(self.sc_event.event.last_check), + Host = self.sc_event.event.cache.host.name, + Service = self.sc_event.event.cache.service.description, + Message = string.gsub(self.sc_event.event.output, "\n", " "), + Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + Title = "SERVICE ALERT:" .. self.sc_event.event.cache.host.name .. "/" .. self.sc_event.event.cache.service.description .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, + ["X-S4-ExternalID"] = "SERVICEALERT_" .. self.sc_event.event.host_id .. "_" .. self.sc_event.event.service_id, + ["X-S4-Status"] = self.state_to_signlstatus_mapping[self.sc_event.event.state] + } +end -------------------------------------------------------------------------------- -- EventQueue:add, add an event to the sending queue @@ -195,18 +176,18 @@ end function EventQueue:send_data(data, element) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") - + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:info("[send_data]: " .. broker.json_encode(data)) return true end - + local http_post_data = "" for _, raw_event in ipairs(data) do - http_post_data = http_post_data .. broker.json_encode(raw_event) .. "\n" + http_post_data = broker.json_encode(raw_event) .. "\n" end - + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(self.sc_params.params.server_address) .. "/webhook/" .. tostring(self.sc_params.params.team_secret)) @@ -223,8 +204,7 @@ function EventQueue:send_data(data, element) :setopt( curl.OPT_HTTPHEADER, { - "content-type: application/json;charset=UTF-8", - "content-length: " .. string.len(http_post_data) + "content-type: application/json", } ) -- set proxy address configuration @@ -310,4 +290,4 @@ function write(event) -- Since we've added an event to a specific queue, flush it if queue is full queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) return true -end \ No newline at end of file +end From 34ae6145d56337caa20cf67c81de3b21e9ecb2ac Mon Sep 17 00:00:00 2001 From: Simon Bomm Date: Fri, 3 Dec 2021 19:25:59 +0100 Subject: [PATCH 097/219] (lib)sc_macro fix buggy transform_date function (#86) --- .../modules/centreon-stream-connectors-lib/sc_macros.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 6e316974f42..3668c4a049a 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -371,7 +371,7 @@ end -- @param macro_value (number) the timestamp that needs to be converted -- @return date (string) the converted timestamp function ScMacros:transform_date(macro_value) - return os.date(self.params.timestamp_conversion_format, os.time(os.date("!*t", macro_value) + self.params.local_time_diff_from_utc)) + return os.date(self.params.timestamp_conversion_format, os.time(os.date("!*t", macro_value + self.params.local_time_diff_from_utc))) end --- transform_short: mostly used to convert the event output into a short output by keeping only the data before the new line From d96157819a6b43b3dd134d98a81442aac1461eaa Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 9 Dec 2021 14:55:14 +0100 Subject: [PATCH 098/219] Traning course plus (#90) --- .../training_course/answers/build_school.lua | 141 ++++++++++++++ .../answers/centreon_cafeteria.lua | 96 ++++++++++ .../answers/centreon_school.lua | 168 +++++++++++++++++ .../training_course/centreon_classroom.lua | 4 + .../training_course/exercises.md | 176 ++++++++++++++---- .../training_course/sport_facilities.json | 47 +++++ 6 files changed, 594 insertions(+), 38 deletions(-) create mode 100644 stream-connectors/training_course/answers/build_school.lua create mode 100644 stream-connectors/training_course/answers/centreon_cafeteria.lua create mode 100644 stream-connectors/training_course/answers/centreon_school.lua create mode 100644 stream-connectors/training_course/sport_facilities.json diff --git a/stream-connectors/training_course/answers/build_school.lua b/stream-connectors/training_course/answers/build_school.lua new file mode 100644 index 00000000000..bb27efae642 --- /dev/null +++ b/stream-connectors/training_course/answers/build_school.lua @@ -0,0 +1,141 @@ +#!/usr/bin/lua + +-- load required dependencies +local JSON = require("JSON") +local centreon_classroom = require("centreon_classroom") +local centreon_cafeteria = require("centreon_cafeteria") +local centreon_school = require("centreon_school") + +-- hire our first teacher +local first_teacher = { + first_name = "John", + last_name = "Doe", + speciality = "Maths" +} +-- build our first classroom +local first_classroom = centreon_classroom.new(first_teacher) + +-- put chairs and tables in our classroom +first_classroom:put_tables(13) +first_classroom:put_chairs(26) + +-- hire our second teacher +local second_teacher = { + first_name = "Jane", + last_name = "Doe", + speciality = "History" +} +-- build our second classroom +local second_classroom = centreon_classroom.new(second_teacher) + +-- put chairs and tables in our classroom +second_classroom:put_tables(5) +second_classroom:put_chairs(10) + +-- hire our third teacher +local third_teacher = { + first_name = "Robert", + last_name = "Bridge", + speciality = "Chemistry" +} +-- build our third classroom +local third_classroom = centreon_classroom.new(third_teacher) + +-- put chairs and tables in our classroom +third_classroom:put_tables(16) +third_classroom:put_chairs(32) + +-- hire a cook +local cook = { + nickname = "SpicyBob", + favourite_dish = "water" +} + +-- create a menu +local menu = { + starters = { + ["apple pie"] = { + name = "apple pie", + calories = 35, + ingredients = {"apple", "pie"} + }, + ["oignon soup"] = { + name = "oignon soup", + calories = 64, + ingredients = {"oignon", "soup"} + } + }, + dishes = { + ["fish and chips"] = { + name = "fish and chips", + calories = 666, + ingredients = {"fish", "chips"} + }, + ["mashed potatoes"] = { + name = "mashed potatoes", + calories = 25, + ingredients = {"potatoes", "milk"} + } + }, + desserts = { + ["cheese cake"] = { + name = "cheese cake", + calories = 251, + ingredients = {"cheese", "cake"} + }, + ["ice cream"] = { + name = "ice cream", + calories = 353, + ingredients = {"ice", "cream"} + } + } +} + +-- build our cafeteria +local cafeteria = centreon_cafeteria.new(menu, cook) + +-- add all classrooms in a table +local classrooms = { + first_classroom, + second_classroom, + third_classroom +} + +-- chose a city in which the school will be build +local city = { + country = "USA", + state = "Louisiana", + name = "New Orleans" +} + +-- build our school +local school = centreon_school.new(classrooms, cafeteria, city) + +-- display the capacity of the school +print("school capacity: " .. school:get_capacity()) + +-- get the school latitude and longitude +local school_location = JSON:decode(school:get_school_geocoordinates()) +-- store them in the appropriate table inside our school object +school.city.lat = school_location[1].lat +school.city.lon = school_location[1].lon + +-- open the list of facilities +local sport_facilities_file = io.open("/tmp/sport_facilities.json", "r") +-- read the content of the file and store it +local file_content = sport_facilities_file:read("*a") +-- close the file +io.close(sport_facilities_file) + +-- decode the list of facilities +local sport_facilities = JSON:decode(file_content) +-- try to find the best facility +local code, sport_facility = school:get_nearest_sport_facility(sport_facilities) + +-- print the result with the appropriate message +if code then + print("facility name is: " .. sport_facility.name .. ". Distance from school is: " .. sport_facility.distance .. "m") +else + print("no sport for our children, we should find new partnership with facilities near: " .. school.city.name) +end + diff --git a/stream-connectors/training_course/answers/centreon_cafeteria.lua b/stream-connectors/training_course/answers/centreon_cafeteria.lua new file mode 100644 index 00000000000..a340bea536b --- /dev/null +++ b/stream-connectors/training_course/answers/centreon_cafeteria.lua @@ -0,0 +1,96 @@ +-- initiate centreon_cafeteria object +local centreon_cafeteria = {} +local CentreonCafeteria = {} + +-- begin the centreon_cafeteria constructor +function centreon_cafeteria.new(menu, cook) + local self = {} + + -- use the hired cook or hire one if there is none + if cook then + self.cook = cook + else + self.cook = { + nickname = "Ratatouille", + favourite_dish = "Apple pie" + } + end + + -- use provided menu or use a default one is there is none + if menu then + self.menu = menu + else + self.menu = { + starters = { + ["apple pie"] = { + name = "apple pie", + calories = 35, + ingredients = {"apple", "pie"} + }, + ["oignon soup"] = { + name = "oignon soup", + calories = 64, + ingredients = {"oignon", "soup"} + } + }, + dishes = { + ["fish and chips"] = { + name = "fish and chips", + calories = 666, + ingredients = {"fish", "chips"} + }, + ["mashed potatoes"] = { + name = "mashed potatoes", + calories = 25, + ingredients = {"potatoes", "milk"} + } + }, + desserts = { + ["cheese cake"] = { + name = "cheese cake", + calories = 251, + ingredients = {"cheese", "cake"} + }, + ["ice cream"] = { + name = "ice cream", + calories = 353, + ingredients = {"ice", "cream"} + } + } + } + end + + -- end the constructor + setmetatable(self, { __index = CentreonCafeteria }) + return self +end + + +function CentreonCafeteria:check_alergy(dish, alergies) + -- find dish + local type = false + + if self.menu.starters[dish] then + type = "starters" + elseif self.menu.dishes[dish] then + type = "dishes" + elseif self.menu.desserts[dish] then + type = "desserts" + end + + if not type then + return false, "dish: " .. tostring(dish) .. " is not on the menu today." + end + + for index, customer_ingredient in pairs(alergies) do + for key, dish_ingredient in pairs(self.menu[type][dish].ingredients) do + if customer_ingredient == dish_ingredient then + return false, "you are alergic to: " .. tostring(customer_ingredient) .. " and there is: " .. tostring(dish_ingredient) .. " in the dish: " .. tostring(dish) + end + end + end + + return true, "Here is your: " .. tostring(dish) +end + +return centreon_cafeteria \ No newline at end of file diff --git a/stream-connectors/training_course/answers/centreon_school.lua b/stream-connectors/training_course/answers/centreon_school.lua new file mode 100644 index 00000000000..6ae13afe49c --- /dev/null +++ b/stream-connectors/training_course/answers/centreon_school.lua @@ -0,0 +1,168 @@ +-- load required dependencies +local curl = require("cURL") +local JSON = require("JSON") + + +-- initiate centreon_school object +local centreon_school = {} +local CentreonSchool = {} + +-- beginning of the constructor +function centreon_school.new(classrooms, cafeteria, city) + local self = {} + + -- create a default city if there's none + if not city or type(city) ~= "table" then + self.city = { + country = "France", + state = "Landes", + name = "Mont de Marsan" + } + else + self.city = city + end + + -- store classrooms and cafeteria inside the school object + self.classrooms = classrooms + self.cafeteria = cafeteria + + -- end of constructor + setmetatable(self, { __index = CentreonSchool }) + return self +end + +-- get capacity method +function CentreonSchool:get_capacity() + -- one chair per people + local chairs_number = 0 + + -- count each chair in each classroom + for index, classroom in ipairs(self.classrooms) do + chairs_number = chairs_number + classroom.chairs + end + + -- return the numbers of chairs that is equal to the maximum capacity of the school + return chairs_number +end + +-- get school geocoordinates method +function CentreonSchool:get_school_geocoordinates() + -- using openstreetmap to get lat and lon of our school + local openstreetmap = "https://nominatim.openstreetmap.org" + -- remote " " from names and replace it with "-" to build the OSM query + local query = "/search?q=" .. string.gsub(self.city.name, " ", "-") + .. "-" .. string.gsub(self.city.state, " ", "-") + .. "-" .. string.gsub(self.city.country, " ", "-") + .. "&format=json&polygon=1&addressdetails=1" + + local url = openstreetmap .. query + + -- create curl object + local http_response_body = "" + local http_request = curl.easy() + -- use the url we just built + :setopt_url(url) + -- store curl body result inside a dedicated variable + :setopt_writefunction( + function (response) + http_response_body = tostring(response) + end + ) + -- add a timeout to the connection + :setopt(curl.OPT_TIMEOUT, 60) + -- make sure we check the certificates + :setopt(curl.OPT_SSL_VERIFYPEER, true) + -- add the user-agent header so we don't get blocked + :setopt( + curl.OPT_HTTPHEADER, + { + "user-agent: user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" + } + ) + + -- run the query + http_request:perform() + -- store http code + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + -- close curl object + http_request:close() + -- return result (we could at least check http return code before doing that) + return http_response_body +end + +-- get nearest sport facility. It requires a table with all the sport facilities +function CentreonSchool:get_nearest_sport_facility(sport_facilities_list) + -- use project OSRM to get routing info from OSM + local routing_osm = "https://router.project-osrm.org" + -- kids do not drive so they are going to walk + local endpoint = "/route/v1/foot/" + local option = "overview=false" + + -- at the moment, we do not have any best facility + local best_facility = { + name = nil, + distance = nil + } + + local result + -- create curl object + local http_request = curl.easy() + -- store curl response body + :setopt_writefunction( + function (response) + http_response_body = tostring(response) + end + ) + -- add a connection timeout + :setopt(curl.OPT_TIMEOUT, 60) + -- make sure we check the certificates + :setopt(curl.OPT_SSL_VERIFYPEER, true) + -- add user-agent header to not be blocked + :setopt( + curl.OPT_HTTPHEADER, + { + "user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" + } + ) + + -- we are going to get the distance from our school to every sport facility + for index, facility in ipairs(sport_facilities_list.facilities) do + -- build the OSRM query + query = endpoint .. self.city.lon .. "," .. self.city.lat .. ";" .. facility.lon .. "," .. facility.lat + + -- add the url to our curl object + http_request:setopt(curl.OPT_URL, routing_osm .. query .. "?" .. option) + -- run the curl query + http_request:perform() + + -- get the http return code + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + -- decode result (should check before if it is a json to avoid getting an error) + result = JSON:decode(http_response_body) + + -- apparently kids don't know how to walk over water so they can't go to some specific facilities + if result.code ~= "Ok" or http_response_code ~= 200 then + print("can't use facility located in: " .. tostring(facility.comment)) + -- if there is walkable route to the facility, this might be the good one + else + -- only store the facitility info in the best_facility table if it is the best one + if best_facility.distance == nil or result.routes[1].distance < best_facility.distance then + best_facility.name = facility.name + best_facility.distance = result.routes[1].distance + end + end + end + + -- do not forget to close the curl object when all the queries are done + http_request:close() + + -- maybe there wasn't any facility that could be reach by the kids + if not best_facility.name then + return false, best_facility + end + + return true, best_facility +end + + +return centreon_school \ No newline at end of file diff --git a/stream-connectors/training_course/centreon_classroom.lua b/stream-connectors/training_course/centreon_classroom.lua index df040f71d27..b7333c9a244 100644 --- a/stream-connectors/training_course/centreon_classroom.lua +++ b/stream-connectors/training_course/centreon_classroom.lua @@ -41,6 +41,10 @@ function CentreonClassroom:put_chairs(chairs) print("there are only " .. tostring(self.tables) .. " tables in the classroom," .. "you can't have more than 2 chairs per table") end +<<<<<<< HEAD + +======= +>>>>>>> 30c2cb636253a2c64a94821b02f96ff1c5e231be self.chairs = chairs end diff --git a/stream-connectors/training_course/exercises.md b/stream-connectors/training_course/exercises.md index 3f14defe86f..8db3f51710b 100644 --- a/stream-connectors/training_course/exercises.md +++ b/stream-connectors/training_course/exercises.md @@ -1,22 +1,44 @@ -# centreon_classroom exercices - -- [centreon_classroom exercices](#centreon_classroom-exercices) - - [Exercise 1](#exercise-1) - - [Exercise 1: What you must do](#exercise-1-what-you-must-do) - - [Exercise 1: How can you check that it works](#exercise-1-how-can-you-check-that-it-works) - - [Exercise 2](#exercise-2) - - [Exercise 2: What you must do](#exercise-2--what-you-must-do) - - [Exercise 2: How can you check that it works](#exercise-2-how-can-you-check-that-it-works) - - [Exercise 3](#exercise-3) - - [Exercice 3: What you must do](#exercice-3-what-you-must-do) - - [Exercice 3: How can you check that it works](#exercice-3-how-can-you-check-that-it-works) - - [Exercise 4](#exercise-4) - - [Exercice 4: What you must do](#exercice-4-what-you-must-do) - - [Exercice 4: How can you check that it works](#exercice-4-how-can-you-check-that-it-works) - - [Exercise 5](#exercise-5) - - [Exercice 5: What you must do](#exercice-5-what-you-must-do) - -## Exercise 1 +# centreon_classroom Exercises + +- [centreon_classroom Exercises](#centreon_classroom-exercises) + - [CHAPTER 1: use a module](#chapter-1-use-a-module) + - [Exercise 1](#exercise-1) + - [Exercise 1: What you must do](#exercise-1-what-you-must-do) + - [Exercise 1: How can you check that it works](#exercise-1-how-can-you-check-that-it-works) + - [Exercise 2](#exercise-2) + - [Exercise 2: What you must do](#exercise-2--what-you-must-do) + - [Exercise 2: How can you check that it works](#exercise-2-how-can-you-check-that-it-works) + - [Exercise 3](#exercise-3) + - [Exercise 3: What you must do](#exercise-3-what-you-must-do) + - [Exercise 3: How can you check that it works](#exercise-3-how-can-you-check-that-it-works) + - [CHAPTER 2: create your lua module](#chapter-2-create-your-lua-module) + - [Exercise 4](#exercise-4) + - [Exercise 4: What you must do](#exercise-4-what-you-must-do) + - [Exercise 4: How can you check that it works](#exercise-4-how-can-you-check-that-it-works) + - [Exercise 5](#exercise-5) + - [Exercise 5: What you must do](#exercise-5-what-you-must-do) + - [CHAPTER 6: module interactions](#chapter-6-module-interactions) + - [Exercise 6](#exercise-6) + - [Exercise 6: What you must do](#exercise-6-what-you-must-do) + - [Exercise 7](#exercise-7) + - [Exercise 7: What you must do](#exercise-7-what-you-must-do) + - [Exercie 7: important information](#exercie-7-important-information) + - [Exercise 8](#exercise-8) + - [Exercise 8: What you must do](#exercise-8-what-you-must-do) + - [Exercise 8: how to succeed](#exercise-8-how-to-succeed) + - [Exercise 9](#exercise-9) + - [Exercise 9: What you must do](#exercise-9-what-you-must-do) + - [Exercise 9: How to succeed](#exercise-9-how-to-succeed) + +## CHAPTER 1: use a module + +What you will learn: + +- use a simple lua module using its documentation +- use its methods +- improve lua codes + +### Exercise 1 Create a `my_first_lesson.lua` script. @@ -24,35 +46,35 @@ To get your first lesson, you will need a classroom. Luckily, we got you covered In your lua script, you must build a new classroom. To do so, use the centreon_classroom module. Maybe this module documentation can help you go through that -### Exercise 1: What you must do +#### Exercise 1: What you must do - instantiate a new classroom - check if a teacher is in your classroom -### Exercise 1: How can you check that it works +#### Exercise 1: How can you check that it works ```lua print(tostring(classroom.teacher.first_name)) --> must print the first name of your teacher ``` -## Exercise 2 +### Exercise 2 You have a classroom, maybe you want to sit somewhere. So add at least one table and one chair -### Exercise 2: What you must do +#### Exercise 2: What you must do - add tables in your classroom - add chairs in your classroom -### Exercise 2: How can you check that it works +#### Exercise 2: How can you check that it works ```lua print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) --> must print "tables: xx, chairs: yy" ``` -## Exercise 3 +### Exercise 3 You do not like numbers and for some reason, you don't want **2** tables but **two** tables @@ -70,11 +92,11 @@ classroom:put_chairs() This is going to break all the classroom. -### Exercice 3: What you must do +#### Exercise 3: What you must do - find a way to handle bad parameters -### Exercice 3: How can you check that it works +#### Exercise 3: How can you check that it works ```lua classroom:put_tables("two") @@ -85,11 +107,19 @@ print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(class --> must print "tables: xx, chairs: yy" ``` -## Exercise 4 +## CHAPTER 2: create your lua module + +What you will learn: + +- create your first lua module +- create your first method +- looping through tables + +### Exercise 4 There is an old legend saying that people must eat and drink in order to survive. We are going to build a cafeteria -### Exercice 4: What you must do +#### Exercise 4: What you must do - create a lua module called centreon_cafeteria - a cafeteria must have a cook and a menu. @@ -97,7 +127,7 @@ There is an old legend saying that people must eat and drink in order to survive - each starter, dish and dessert has a name, a number of calories and a list of ingredients - a cook has a nickname and a favourite dish -### Exercice 4: How can you check that it works +#### Exercise 4: How can you check that it works ```lua local centreon_cafeteria = require("centreon_cafeteria") @@ -106,18 +136,88 @@ local cafeteria = centreon_cafeteria.new(cook, menu) print(tostring(cook.nickname)) --> must print the nickname of your cook -print(tostring(menu.starters[1].name)) ---> must print the name of the first dishes +print(tostring(menu.starters["duck soup"].name)) +--> must print the name of the dish "duck soup" ``` -## Exercise 5 +### Exercise 5 -We should make sure that we don't serve dishes to people that are not alergic to an ingredient. Our cafeteria module will have a method called check_alergy() that has two parameters, the dish that our student wants and the list of ingredients that the studend is alergic to. +We should make sure that we don't serve dishes to people that are not alergic to an ingredient. Our cafeteria module will have a method called check_alergy() that has two parameters, the dish that our student wants and the list of ingredients that the studend is alergic to. -### Exercice 5: What you must do +#### Exercise 5: What you must do - create a method called check_alergy in your module -- it needs to have the dish and the list of ingredients that the studend can't eat -- it must return false if there is at least one ingredient that the student can't eat it the dish -- it must return true if the dish is safe for the student +- it needs to have the dish and the list of ingredients that the studend can't eat as parameters +- it must return false and a message if there is at least one ingredient that the student can't eat in the dish or if the dish doesn't exist +- it must return true and a message if the dish is safe for the student + +## CHAPTER 6: module interactions + +What you will learn: + +- use external modules such as lua-json +- work with basic API using cURL +- create/update an object +- interactions between objects + +### Exercise 6 + +We know how to build a classroom and a cafeteria. Nothing is stopping us from building our school. +A school needs three or more classroom and one cafeteria and a city to be built in. + +#### Exercise 6: What you must do + +- create a lua module called centreon_school +- a school must have two parameters + - a list of classrooms + - one cafeteria + - a city that is caracterised by the following information (you must use a real city): + - country + - state + - name + +### Exercise 7 + +The city mayor needs to know how many children can attend classes in your school. + +#### Exercise 7: What you must do + +- create a method called get_capacity() that returns the number of children that can sit in your school + +#### Exercie 7: important information + +The maximum number of children is equal to the number of chairs in all classrooms. We are not monsters, they will not learn by standing up all day. + +### Exercise 8 + +We want the children that attend classes in our school to be as healthy as possible. To do so, we need to send them to the closest sport facility. First of all, we need to know the geo coordinates of our school + +#### Exercise 8: What you must do + +- create a method called get_school_geocoordinates() that returns a table with the latitude and the longitude +- you must use the following user agent http header with curl to not be blocked **user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36** + +#### Exercise 8: how to succeed + +- to get your locations data, you can use the following url where: + - Mont de Marsan is the city name + - Landes is the state + - France is the country + +https://nominatim.openstreetmap.org/search?q=mont-de-marsan-landes-france&format=json&polygon=1&addressdetails=1 + +- your script needs to use a json parser, the answers use lua-json. + +### Exercise 9 + +Find the closest sport facility. Our administrative department concluded a partnership with 6 sport facilities across the world. Our children may have to walk a few thousand kilometers in order to play basketball but if they don't attend their sport class, they are going to be expelled from our school! + +#### Exercise 9: What you must do + +- use the sport_facilities.json file to locate every sport facility +- return the name of the closest sport facility and the distance the children will have to walk to get there + +#### Exercise 9: How to succeed +- depending on where your school is located, there might not be any facility where children can go. You should handle that specific case +- to get distance information between your school and the sport_facilities you can use the following api documentation http://project-osrm.org/docs/v5.24.0/api/# diff --git a/stream-connectors/training_course/sport_facilities.json b/stream-connectors/training_course/sport_facilities.json new file mode 100644 index 00000000000..97009e44149 --- /dev/null +++ b/stream-connectors/training_course/sport_facilities.json @@ -0,0 +1,47 @@ +{ + "facilities": + [ + { + "name": "Marco Polo", + "lat": 35.078807, + "lon": -106.59378, + "comment": "North America" + }, + { + "name": "Christopher Columbus", + "lat": -33.483838, + "lon": -70.631712, + "comment": "South America" + }, + { + "name": "Amerigo Vespucci", + "lat": 35.25125182, + "lon": 25.125182, + "comment": "Europe" + }, + { + "name": "John Cabot", + "lat": -1.285892, + "lon": 36.897156, + "comment": "Africa" + }, + { + "name": "Ferdinand Magellan", + "lat": 22.571086, + "lon": 88.366460, + "comment": "India" + }, + { + "name": "James Cook", + "lat": 62.011106, + "lon": -6.775236, + "comment": "Feroe Island" + }, + { + "name": "Vasco da Gama", + "lat": -18.138824, + "lon": -178.425807, + "comment": "Fidji Island" + } + ] +} \ No newline at end of file From f1ca85850de290d2df911fad48b0cd8f7bae3f96 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 9 Dec 2021 15:16:21 +0100 Subject: [PATCH 099/219] Update centreon_classroom.lua --- stream-connectors/training_course/centreon_classroom.lua | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/stream-connectors/training_course/centreon_classroom.lua b/stream-connectors/training_course/centreon_classroom.lua index b7333c9a244..04b272cdfe1 100644 --- a/stream-connectors/training_course/centreon_classroom.lua +++ b/stream-connectors/training_course/centreon_classroom.lua @@ -41,11 +41,8 @@ function CentreonClassroom:put_chairs(chairs) print("there are only " .. tostring(self.tables) .. " tables in the classroom," .. "you can't have more than 2 chairs per table") end -<<<<<<< HEAD - -======= ->>>>>>> 30c2cb636253a2c64a94821b02f96ff1c5e231be + self.chairs = chairs end -return centreon_classroom \ No newline at end of file +return centreon_classroom From 2d9e6f1ad74a873a4dc4d9458828bcb655b56ac5 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 9 Dec 2021 17:28:31 +0100 Subject: [PATCH 100/219] Update exercises.md --- stream-connectors/training_course/exercises.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/training_course/exercises.md b/stream-connectors/training_course/exercises.md index 8db3f51710b..1af610d6fea 100644 --- a/stream-connectors/training_course/exercises.md +++ b/stream-connectors/training_course/exercises.md @@ -133,10 +133,10 @@ There is an old legend saying that people must eat and drink in order to survive local centreon_cafeteria = require("centreon_cafeteria") local cafeteria = centreon_cafeteria.new(cook, menu) -print(tostring(cook.nickname)) +print(tostring(cafeteria.cook.nickname)) --> must print the nickname of your cook -print(tostring(menu.starters["duck soup"].name)) +print(tostring(cafeteria.menu.starters["duck soup"].name)) --> must print the name of the dish "duck soup" ``` From 04ba41726408324b46f77626122d704c30df82ca Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 10:15:20 +0100 Subject: [PATCH 101/219] exercise 3 answer --- .../training_course/exercices_answers.md | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md index d871549829c..d3a360cbe02 100644 --- a/stream-connectors/training_course/exercices_answers.md +++ b/stream-connectors/training_course/exercices_answers.md @@ -55,3 +55,24 @@ classroom:put_chairs(15) print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) --> will print "tables: 10, chairs: 15" ``` + +## Exercise 3 + +you need to add a "security" layer in the centreon_school module. The table parameter must be a number so we are going to make sure people call the put_tables method with a number and nothing else. + +```lua +function CentreonClassroom:put_tables(tables) + if not tables or type(tables) ~= "number" then + math.randomseed(os.time()) + self.tables = math.random(1,20) + elseif tables > 20 then + print(tables .. " tables is a bit much, it is a classroom not a stadium") + math.randomseed(os.time()) + self.tables = math.random(1,20) + else + self.tables = tables + end +end +``` + +In the above example, we've added a check that says if the type of the "tables" variables is not a number, then we are going to ignore it and add a random number of tables in the classroom. From 732e624364f6519d574895b8a88246d8c03fc6d1 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 10:17:49 +0100 Subject: [PATCH 102/219] exercises 4 and 5 answers --- .../answers/centreon_cafeteria.lua | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/stream-connectors/training_course/answers/centreon_cafeteria.lua b/stream-connectors/training_course/answers/centreon_cafeteria.lua index a340bea536b..119c81ff8c4 100644 --- a/stream-connectors/training_course/answers/centreon_cafeteria.lua +++ b/stream-connectors/training_course/answers/centreon_cafeteria.lua @@ -1,3 +1,8 @@ +--[[ +----------------- EXERCICE 4 BEGINNING -------------------- +-- do not forget the return at the end of the file +--]] + -- initiate centreon_cafeteria object local centreon_cafeteria = {} local CentreonCafeteria = {} @@ -65,6 +70,14 @@ function centreon_cafeteria.new(menu, cook) return self end +--[[ +----------------- EXERCICE 4 ENDING -------------------- +--]] + + +--[[ +----------------- EXERCICE 5 BEGINNING -------------------- +--]] function CentreonCafeteria:check_alergy(dish, alergies) -- find dish @@ -93,4 +106,10 @@ function CentreonCafeteria:check_alergy(dish, alergies) return true, "Here is your: " .. tostring(dish) end -return centreon_cafeteria \ No newline at end of file + +--[[ +----------------- EXERCICE 5 ENDIG -------------------- +--]] + +-- below is the return thas is part of the exercice 4 +return centreon_cafeteria From 1814add4b65bf58dc084d4bfe4e5f3744ca49828 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 11:19:11 +0100 Subject: [PATCH 103/219] exercise 4 and 5 answers --- .../training_course/exercices_answers.md | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md index d3a360cbe02..241ce11a879 100644 --- a/stream-connectors/training_course/exercices_answers.md +++ b/stream-connectors/training_course/exercices_answers.md @@ -76,3 +76,37 @@ end ``` In the above example, we've added a check that says if the type of the "tables" variables is not a number, then we are going to ignore it and add a random number of tables in the classroom. + +## Exercise 4 + +In this exercise, you must create your first lua module and its constructor. There is an example in the [centreon_cafetria.lua module](answers/centreon_cafeteria.lua) file + +you can test your constructor using the following code in a lua script + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +print(tostring(cafeteria.cook.nickname)) +--> must print the nickname of your cook + +print(tostring(cafeteria.menu.starters["duck soup"].name)) +--> must print the name of the dish "duck soup" +``` + +## Exercise 5 + +In this exercise, you must check if a kid has an allergy to an ingredient that is in the dish that he want. There is an example of method that check allergies in the [centreon_cafetria.lua module](answers/centreon_cafeteria.lua) file + +you can check your code using the following lua script + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +local return_code, return_message = cafeteria.check_alergy("duck soup", {"duck", "salt"}) + +if not return_code then + print(return_message) +end +``` From 53c86290551d8bc22ee418d511ed13eac91afc07 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 12:02:59 +0100 Subject: [PATCH 104/219] Update exercices_answers.md --- stream-connectors/training_course/exercices_answers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md index 241ce11a879..6a3ee4994e2 100644 --- a/stream-connectors/training_course/exercices_answers.md +++ b/stream-connectors/training_course/exercices_answers.md @@ -104,7 +104,7 @@ you can check your code using the following lua script local centreon_cafeteria = require("centreon_cafeteria") local cafeteria = centreon_cafeteria.new(cook, menu) -local return_code, return_message = cafeteria.check_alergy("duck soup", {"duck", "salt"}) +local return_code, return_message = cafeteria:check_alergy("duck soup", {"duck", "salt"}) if not return_code then print(return_message) From ed3e63cb5fa83f064a5ff5991020990226d96af7 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 16:11:57 +0100 Subject: [PATCH 105/219] add summary --- stream-connectors/training_course/exercices_answers.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md index 6a3ee4994e2..4e9bc174bbe 100644 --- a/stream-connectors/training_course/exercices_answers.md +++ b/stream-connectors/training_course/exercices_answers.md @@ -3,6 +3,8 @@ - [Exercices Answers](#exercices-answers) - [Exercise 1](#exercise-1) - [Exercise 2](#exercise-2) + - [Exercise 3](#exercise-3) + - [Exercise 5](#exercise-5) ## Exercise 1 From dabd47e954cbfadfd73268baf2169746bec41f24 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Dec 2021 16:12:20 +0100 Subject: [PATCH 106/219] Update exercices_answers.md --- stream-connectors/training_course/exercices_answers.md | 1 + 1 file changed, 1 insertion(+) diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md index 4e9bc174bbe..e5fe50ae995 100644 --- a/stream-connectors/training_course/exercices_answers.md +++ b/stream-connectors/training_course/exercices_answers.md @@ -4,6 +4,7 @@ - [Exercise 1](#exercise-1) - [Exercise 2](#exercise-2) - [Exercise 3](#exercise-3) + - [Exercise 4](#exercise-4) - [Exercise 5](#exercise-5) ## Exercise 1 From cdaeea56eda920bdab860bb3ef0695b60335b109 Mon Sep 17 00:00:00 2001 From: Simon Bomm Date: Mon, 13 Dec 2021 15:31:01 +0100 Subject: [PATCH 107/219] + add Signl4 doc link --- stream-connectors/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 9fa96240c2f..62589178b3b 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -26,6 +26,7 @@ Here is a list of the Centreon powered scripts: | PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-pagerduty-events.html) | | Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | | ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-service-now-events.html) | +| Signl4 | [Signl4 Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/signl4) | [Events Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-signl4-events.html) | | Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-metrics.html) | | Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-warp10.html) | @@ -49,4 +50,4 @@ If you wish to help us improve this project, feel free to read the [Contribute.m [contributors-url]: https://github.com/centreon/centreon-stream-connector-scripts/graphs/contributors [forks-url]: https://github.com/centreon/centreon-stream-connector-scripts/network/members [stars-url]: https://github.com/centreon/centreon-stream-connector-scripts/stargazers -[issues-url]: https://github.com/centreon/centreon-stream-connector-scripts/issues \ No newline at end of file +[issues-url]: https://github.com/centreon/centreon-stream-connector-scripts/issues From db020df1bb7c7143630534f860a0706516d4033a Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Thu, 16 Dec 2021 09:55:44 +0100 Subject: [PATCH 108/219] Refacto streamconnector BSM v2 --- .../bsm/bsm_connector-apiv2.lua | 300 ++++++++++++++++++ 1 file changed, 300 insertions(+) create mode 100644 stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua new file mode 100644 index 00000000000..358d662107b --- /dev/null +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua @@ -0,0 +1,300 @@ +-- +-- Copyright © 2021 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the Licensself.sc_event.event. +-- You may obtain a copy of the License at +-- +-- http://www.apachself.sc_event.event.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the Licensself.sc_event.event. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- http_server_url (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. +-- http_proxy_string (string): the full proxy URL if needed to reach the BSM server. Default: empty. +-- log_path (string): the log file to use +-- log_level (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbosself.sc_event.event. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. +-- max_buffer_size (number): how many events to store before sending them to the server. +-- max_buffer_age (number): flush the events when the specified time (in second) is reached (even if max_size is not reached). + +-- Libraries +local curl = require "cURL" +require("LuaXML") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-- workaround https://github.com/centreon/centreon-broker/issues/201 +local previous_event = "" + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + self.fail = false + + local mandatory_parameters = { + "http_server_url" + } + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/bsm_connector-apiv2.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.source_ci = params.source_ci or "Centreon" + self.sc_params.params.max_output_length = params.max_output_length or 1024 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + self.send_data_method = { + [1] = function (data, element) return self:send_data(data, element) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-- Format XML file with host infoamtion +function EventQueue:format_event_host() + local xml_host_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id) , '0') .. "" + local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.action_url, 'no action url for this host') + local xml_notes = "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes, 'OS not set') .. "" + + self.sc_event.event.formated_event = { + "" + .. "" .. hostname .. "" + .. xml_host_severity .. xml_notes + .. "" .. xml_url .. "" + .. "" .. ifnil_or_empty(self.source_ci, 'Centreon') .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.host_id, '0') .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, '0') .. "" + .. "" + } +end + +-- Format XML file with service infoamtion +function EventQueue:format_event_service() + local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes_url, 'no notes url for this service') + local xml_service_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) , '0') .. "" + + self.sc_event.event.formated_event = { + "" + .. "" .. hostname .. "" + .. "" .. service_description .. "" + .. "" ..self.sc_event.event.state .. "" + .. "" ..self.sc_event.event.last_update .. "" + .. "" .. string.match(e.output, "^(.*)\n") .. "" + .. xml_service_severity + .. "" .. xml_url .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.host_id, '0') .. "" + .."" .. ifnil_or_empty(self.sc_event.event.service_id, '0') .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, '0') .. "" + .."" + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +function EventQueue:send_data(data, element) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + for _, xml_str in ipairs(data) do + http_post_data = http_post_data .. tostring(xml.eval(xml_str)) + end + + self.sc_logger:info(http_post_data) + return true + end + + local http_post_data = "" + + for _, xml_str in ipairs(data) do + http_post_data = http_post_data .. tostring(xml.eval(xml_str)) + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "Content-Type: text/xml", + "content-length: " .. string.len(http_post_data) + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + -- performing the HTTP request + http_request:perform() + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 202 or http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.sc_event.event.new(conf) +end + +-- Fonction write() +function write(event) + -- First, flush all queues if needed (too old or size too big) + Queue.sc_event.event.sc_flush:flush_all_queues(Queue.sc_event.event.send_data_method[1]) + + -- skip event if a mandatory parameter is missing + if Queue.sc_event.event.fail then + Queue.sc_event.event.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + Queue.sc_event.event.sc_event = sc_event.new(event, Queue.sc_event.event.sc_params.params, Queue.sc_event.event.sc_common, Queue.sc_event.event.sc_logger, Queue.sc_event.event.sc_broker) + + -- drop event if wrong category + if not Queue.sc_event.event.sc_event:is_valid_category() then + Queue.sc_event.event.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(Queue.sc_event.event.sc_params.params.reverse_category_mapping[Queue.sc_event.event.sc_event.event.category])) + return true + end + + -- drop event if wrong element + if not Queue.sc_event.event.sc_event:is_valid_element() then + Queue.sc_event.event.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(Queue.sc_event.event.sc_params.params.reverse_element_mapping[Queue.sc_event.event.sc_event.event.category][Queue.sc_event.event.sc_event.event.element])) + return true + end + + -- drop event if it is not validated + if Queue.sc_event.event.sc_event:is_valid_event() then + queue:format_accepted_event() + else + return true + end + + -- Since we've added an event to a specific queue, flush it if queue is full + Queue.sc_event.event.sc_flush:flush_queue(Queue.sc_event.event.send_data_method[1], Queue.sc_event.event.sc_event.event.category, Queue.sc_event.event.sc_event.event.element) + return true +end From b4b9b31760f63092d995e46756e85039035eb45d Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 16 Dec 2021 11:35:16 +0100 Subject: [PATCH 109/219] Add spec 1 5 4 (#92) --- .../splunk/splunk-events-apiv2.lua | 2 +- ...eon-stream-connectors-lib-1.5.4-1.rockspec | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 263beb78570..b1cfc0fcc28 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -201,7 +201,7 @@ function EventQueue:send_data(data, element) "content-length:" .. string.len(http_post_data), "authorization: Splunk " .. self.sc_params.params.splunk_token, } - ) + ) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec new file mode 100644 index 00000000000..9986d5cf5ac --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.4-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.4-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 5e8eb9e5db6fc2d9d3d8e579c63c9e92d0876677 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Mon, 20 Dec 2021 10:39:04 +0100 Subject: [PATCH 110/219] Review code after firts recommandations --- .../bsm/bsm_connector-apiv2.lua | 55 ++++++++++--------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua index 358d662107b..192f4a17e99 100644 --- a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua @@ -114,26 +114,27 @@ end -- Format XML file with host infoamtion function EventQueue:format_event_host() - local xml_host_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id) , '0') .. "" - local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.action_url, 'no action url for this host') - local xml_notes = "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes, 'OS not set') .. "" + local xml_host_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id) , 0) .. "" + local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.action_url, 'no action url for this host') + local xml_notes = "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, 'no notes found on host') .. "" self.sc_event.event.formated_event = { "" .. "" .. hostname .. "" - .. xml_host_severity .. xml_notes + .. "" .. xml_host_severity .. "" + .. "" .. xml_notes .. "" .. "" .. xml_url .. "" .. "" .. ifnil_or_empty(self.source_ci, 'Centreon') .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.host_id, '0') .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, '0') .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" .. "" } end -- Format XML file with service infoamtion function EventQueue:format_event_service() - local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes_url, 'no notes url for this service') - local xml_service_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) , '0') .. "" + local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes_url, 'no url for this service') + local xml_service_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) , 0) .. "" self.sc_event.event.formated_event = { "" @@ -141,13 +142,13 @@ function EventQueue:format_event_service() .. "" .. service_description .. "" .. "" ..self.sc_event.event.state .. "" .. "" ..self.sc_event.event.last_update .. "" - .. "" .. string.match(e.output, "^(.*)\n") .. "" - .. xml_service_severity + .. "" .. string.match(self.sc_event.event.output, "^(.*)\n") .. "" + .. "" .. xml_service_severity .. "" .. "" .. xml_url .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.host_id, '0') .. "" - .."" .. ifnil_or_empty(self.sc_event.event.service_id, '0') .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, '0') .. "" - .."" + .. "" .. ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.service_id, 0) .. "" + .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" + .. "" } end @@ -256,45 +257,45 @@ local queue -- Fonction init() function init(conf) - queue = EventQueue.sc_event.event.new(conf) + queue = EventQueue.new(conf) end -- Fonction write() function write(event) -- First, flush all queues if needed (too old or size too big) - Queue.sc_event.event.sc_flush:flush_all_queues(Queue.sc_event.event.send_data_method[1]) + queue.sc_flush:flush_all_queues(queue.send_data_method[1]) -- skip event if a mandatory parameter is missing - if Queue.sc_event.event.fail then - Queue.sc_event.event.sc_logger:error("Skipping event because a mandatory parameter is not set") + if queue.event.fail then + queue.event.sc_logger:error("Skipping event because a mandatory parameter is not set") return true end -- initiate event object - Queue.sc_event.event.sc_event = sc_event.new(event, Queue.sc_event.event.sc_params.params, Queue.sc_event.event.sc_common, Queue.sc_event.event.sc_logger, Queue.sc_event.event.sc_broker) + queue.event.sc_event = sc_event.new(event, queue.event.sc_params.params, queue.event.sc_common, queue.event.sc_logger, queue.event.sc_broker) -- drop event if wrong category - if not Queue.sc_event.event.sc_event:is_valid_category() then - Queue.sc_event.event.sc_logger:debug("dropping event because category is not valid. Event category is: " - .. tostring(Queue.sc_event.event.sc_params.params.reverse_category_mapping[Queue.sc_event.event.sc_event.event.category])) + if not queue.event.sc_event:is_valid_category() then + queue.event.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.event.sc_params.params.reverse_category_mapping[queue.event.sc_event.event.category])) return true end -- drop event if wrong element - if not Queue.sc_event.event.sc_event:is_valid_element() then - Queue.sc_event.event.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(Queue.sc_event.event.sc_params.params.reverse_element_mapping[Queue.sc_event.event.sc_event.event.category][Queue.sc_event.event.sc_event.event.element])) + if not queue.event.sc_event:is_valid_element() then + queue.event.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.event.sc_params.params.reverse_element_mapping[queue.event.sc_event.event.category][queue.event.sc_event.event.element])) return true end -- drop event if it is not validated - if Queue.sc_event.event.sc_event:is_valid_event() then + if queue.event.sc_event:is_valid_event() then queue:format_accepted_event() else return true end -- Since we've added an event to a specific queue, flush it if queue is full - Queue.sc_event.event.sc_flush:flush_queue(Queue.sc_event.event.send_data_method[1], Queue.sc_event.event.sc_event.event.category, Queue.sc_event.event.sc_event.event.element) + queue.event.sc_flush:flush_queue(queue.event.send_data_method[1], queue.event.sc_event.event.category, queue.event.sc_event.event.element) return true end From 49aa4dc40532a7f1db96b552e088a79ba9c157b9 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Mon, 20 Dec 2021 10:40:56 +0100 Subject: [PATCH 111/219] Change name file --- .../bsm/{bsm_connector-apiv2.lua => bsm-events-apiv2.lua} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename stream-connectors/centreon-certified/bsm/{bsm_connector-apiv2.lua => bsm-events-apiv2.lua} (100%) diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua similarity index 100% rename from stream-connectors/centreon-certified/bsm/bsm_connector-apiv2.lua rename to stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua From 235b4949a2d2f05dd561b720ddc5cee75f5029d7 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Mon, 20 Dec 2021 16:20:00 +0100 Subject: [PATCH 112/219] Review function write --- .../bsm/bsm-events-apiv2.lua | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index 192f4a17e99..9bbbb9e0148 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -266,36 +266,36 @@ function write(event) queue.sc_flush:flush_all_queues(queue.send_data_method[1]) -- skip event if a mandatory parameter is missing - if queue.event.fail then - queue.event.sc_logger:error("Skipping event because a mandatory parameter is not set") + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") return true end -- initiate event object - queue.event.sc_event = sc_event.new(event, queue.event.sc_params.params, queue.event.sc_common, queue.event.sc_logger, queue.event.sc_broker) + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) -- drop event if wrong category - if not queue.event.sc_event:is_valid_category() then - queue.event.sc_logger:debug("dropping event because category is not valid. Event category is: " - .. tostring(queue.event.sc_params.params.reverse_category_mapping[queue.event.sc_event.event.category])) + if not queue.sc_event:is_valid_category() then + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) return true end -- drop event if wrong element - if not queue.event.sc_event:is_valid_element() then - queue.event.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.event.sc_params.params.reverse_element_mapping[queue.event.sc_event.event.category][queue.event.sc_event.event.element])) + if not queue.sc_event:is_valid_element() then + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) return true end -- drop event if it is not validated - if queue.event.sc_event:is_valid_event() then + if queue.sc_event:is_valid_event() then queue:format_accepted_event() else return true end -- Since we've added an event to a specific queue, flush it if queue is full - queue.event.sc_flush:flush_queue(queue.event.send_data_method[1], queue.event.sc_event.event.category, queue.event.sc_event.event.element) + queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) return true end From 5b74f36994da2a36e10e24e92caa59df8a31ea80 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Tue, 28 Dec 2021 13:47:35 +0100 Subject: [PATCH 113/219] Refacto event treatment for when send_data_test=1 --- .../bsm/bsm-events-apiv2.lua | 82 +++++++++++++------ 1 file changed, 58 insertions(+), 24 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index 9bbbb9e0148..85732feab9f 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -112,21 +112,54 @@ function EventQueue.new(params) return self end +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + -- Format XML file with host infoamtion function EventQueue:format_event_host() - local xml_host_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id) , 0) .. "" + local xml_host_severity = self.sc_broker:get_severity(self.sc_event.event.host_id) local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.action_url, 'no action url for this host') - local xml_notes = "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, 'no notes found on host') .. "" + local xml_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, 'no notes found on host') + if xml_host_severity == false then + xml_host_severity = 0 + end self.sc_event.event.formated_event = { "" - .. "" .. hostname .. "" + .. "" .. self.sc_event.event.cache.host.name .. "" .. "" .. xml_host_severity .. "" - .. "" .. xml_notes .. "" + .. "" .. xml_notes .. "" .. "" .. xml_url .. "" - .. "" .. ifnil_or_empty(self.source_ci, 'Centreon') .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon') .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" .. "" } end @@ -134,20 +167,24 @@ end -- Format XML file with service infoamtion function EventQueue:format_event_service() local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes_url, 'no url for this service') - local xml_service_severity = "" .. self.sc_common:ifnil_or_empty(self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) , 0) .. "" - + local xml_service_severity = self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) + + if xml_service_severity == false then + xml_service_severity = 0 + end + self.sc_event.event.formated_event = { "" - .. "" .. hostname .. "" - .. "" .. service_description .. "" + .. "" .. self.sc_event.event.cache.host.name .. "" + .. "" .. self.sc_event.event.cache.service.description .. "" .. "" ..self.sc_event.event.state .. "" .. "" ..self.sc_event.event.last_update .. "" .. "" .. string.match(self.sc_event.event.output, "^(.*)\n") .. "" - .. "" .. xml_service_severity .. "" + .. "" .. xml_service_severity .. "" .. "" .. xml_url .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.service_id, 0) .. "" - .. "" .. ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0) .. "" + .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" .. "" } end @@ -177,21 +214,18 @@ function EventQueue:send_data(data, element) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - for _, xml_str in ipairs(data) do - http_post_data = http_post_data .. tostring(xml.eval(xml_str)) + for _, xml_str in pairs(data) do + self.sc_logger:info( " value is: " .. xml.str(xml.new(xml_str,0,'<event_data>'))) end - - self.sc_logger:info(http_post_data) - return true + return true end - local http_post_data = "" - for _, xml_str in ipairs(data) do - http_post_data = http_post_data .. tostring(xml.eval(xml_str)) + for _, xml_str in pairs(data) do + http_post_data = http_post_data .. xml.str(xml.new(xml_str,0,'<event_data>')) end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. xml.str(http_post_data)) self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) local http_response_body = "" From a31602f03b48b08b9adcc08de5f630f4fb689f3c Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Fri, 14 Jan 2022 19:33:05 +0000 Subject: [PATCH 114/219] Review code with of formated data --- .../bsm/bsm-events-apiv2.lua | 55 ++++++++++++++----- 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index 85732feab9f..b2424c95329 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -65,8 +65,8 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/bsm_connector-apiv2.log" - local log_level = params.log_level or 1 - + local log_level = params.log_level or 1 + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) @@ -106,7 +106,7 @@ function EventQueue.new(params) self.send_data_method = { [1] = function (data, element) return self:send_data(data, element) end } - + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -125,7 +125,7 @@ function EventQueue:format_accepted_event() if self.format_template and template ~= nil and template ~= "" then for index, value in pairs(template) do self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) - end + end else -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file if not self.format_event[category][element] then @@ -147,11 +147,11 @@ function EventQueue:format_event_host() local xml_host_severity = self.sc_broker:get_severity(self.sc_event.event.host_id) local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.action_url, 'no action url for this host') local xml_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, 'no notes found on host') - + if xml_host_severity == false then xml_host_severity = 0 end - self.sc_event.event.formated_event = { + --[[.sc_event.event.formated_event = { "" .. "" .. self.sc_event.event.cache.host.name .. "" .. "" .. xml_host_severity .. "" @@ -161,6 +161,15 @@ function EventQueue:format_event_host() .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" .. "" + }--]] + self.sc_event.event.formated_event = { + hostname = self.sc_event.event.cache.host.name, + host_severity = xml_host_severity, + host_notes = xml_notes, + url = xml_url, + source_ci = self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon'), + source_host_id = self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0), + scheduled_downtime_depth = self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) } end @@ -173,7 +182,7 @@ function EventQueue:format_event_service() xml_service_severity = 0 end - self.sc_event.event.formated_event = { + --[[self.sc_event.event.formated_event = { "" .. "" .. self.sc_event.event.cache.host.name .. "" .. "" .. self.sc_event.event.cache.service.description .. "" @@ -186,7 +195,20 @@ function EventQueue:format_event_service() .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0) .. "" .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" .. "" - } + }--]] + + self.sc_event.event.formated_event = { + hostname = self.sc_event.event.cache.host.name, + svc_desc = self.sc_event.event.cache.service.description, + state = self.sc_event.event.state, + last_update = self.sc_event.event.last_update, + output = string.match(self.sc_event.event.output, "^(.*)\n"), + service_severity = xml_service_severity, + url = xml_url, + source_host_id = self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0), + source_svc_id = self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0), + scheduled_downtime_depth = self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) + } end -------------------------------------------------------------------------------- @@ -211,19 +233,26 @@ end function EventQueue:send_data(data, element) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") - + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then + local data_formated = "" for _, xml_str in pairs(data) do - self.sc_logger:info( " value is: " .. xml.str(xml.new(xml_str,0,'<event_data>'))) + for index, http_post_data in pairs(xml_str) do + data_formated = data_formated .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(http_post_data)) .. "" + end end + self.sc_logger:notice(tostring(data_formated) .. "") return true end - local http_post_data = "" + local http_post_data = "" for _, xml_str in pairs(data) do - http_post_data = http_post_data .. xml.str(xml.new(xml_str,0,'<event_data>')) + for index, data_formated in pairs(xml_str) do + http_post_data = http_post_data .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(data_formated)) .. "" + end end + http_post_data = http_post_data .. "" self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. xml.str(http_post_data)) self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) @@ -332,4 +361,4 @@ function write(event) -- Since we've added an event to a specific queue, flush it if queue is full queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) return true -end +end \ No newline at end of file From 378c6debb50c276c0997b84139736e251e0300d1 Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Fri, 14 Jan 2022 19:55:01 +0000 Subject: [PATCH 115/219] delete comment block --- .../bsm/bsm-events-apiv2.lua | 29 ++----------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index b2424c95329..542dc927823 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -151,17 +151,7 @@ function EventQueue:format_event_host() if xml_host_severity == false then xml_host_severity = 0 end - --[[.sc_event.event.formated_event = { - "" - .. "" .. self.sc_event.event.cache.host.name .. "" - .. "" .. xml_host_severity .. "" - .. "" .. xml_notes .. "" - .. "" .. xml_url .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon') .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" - .. "" - }--]] + self.sc_event.event.formated_event = { hostname = self.sc_event.event.cache.host.name, host_severity = xml_host_severity, @@ -182,21 +172,6 @@ function EventQueue:format_event_service() xml_service_severity = 0 end - --[[self.sc_event.event.formated_event = { - "" - .. "" .. self.sc_event.event.cache.host.name .. "" - .. "" .. self.sc_event.event.cache.service.description .. "" - .. "" ..self.sc_event.event.state .. "" - .. "" ..self.sc_event.event.last_update .. "" - .. "" .. string.match(self.sc_event.event.output, "^(.*)\n") .. "" - .. "" .. xml_service_severity .. "" - .. "" .. xml_url .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0) .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0) .. "" - .. "" .. self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) .. "" - .. "" - }--]] - self.sc_event.event.formated_event = { hostname = self.sc_event.event.cache.host.name, svc_desc = self.sc_event.event.cache.service.description, @@ -361,4 +336,4 @@ function write(event) -- Since we've added an event to a specific queue, flush it if queue is full queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) return true -end \ No newline at end of file +end From 8b25677f58a11898dd4afa18d7b5cf3ce6d1eb67 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Fri, 14 Jan 2022 20:57:22 +0100 Subject: [PATCH 116/219] Create function xml_escape for bsm-event SC --- .../sc_common.lua | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 0d42b7a318e..9d9ba13fe76 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -247,4 +247,31 @@ function ScCommon:json_escape(string) return string end +--- xml_escape: escape xml special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:xml_escape(string) + local type = type(string) + + -- check that param is a valid string + if string == nil or type == "table" then + self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + return string + end + + -- nothing to escape in a boolean or number value + if type ~= "string" then + return string + end + + -- escape all characters + string = string.gsub(string, '<', '<') + string = string.gsub(string, '>', '>') + string = string.gsub(string, '&', '&') + string = string.gsub(string, ''', "'") + string = string.gsub(string, '"', '"') + + return string +end + return sc_common From b5d80952b73a3a69ab5153a83c08385496bdafe5 Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Fri, 21 Jan 2022 16:16:26 +0000 Subject: [PATCH 117/219] refacto to v3 --- .../bsm/bsm-events-apiv2.lua | 112 ++++++++++++------ 1 file changed, 74 insertions(+), 38 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index 542dc927823..d60ad368603 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -104,7 +104,11 @@ function EventQueue.new(params) [categories.bam.id] = {} } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } -- return EventQueue object @@ -151,7 +155,7 @@ function EventQueue:format_event_host() if xml_host_severity == false then xml_host_severity = 0 end - + self.sc_event.event.formated_event = { hostname = self.sc_event.event.cache.host.name, host_severity = xml_host_severity, @@ -206,30 +210,41 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -function EventQueue:send_data(data, element) +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = "[send_data]: " .. "" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" + end + payload = payload .. "" + + else + payload = payload .. "[send_data]: " .. "" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" + end + payload = payload .. "" + end + + return payload +end + +function EventQueue:send_data(payload) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - local data_formated = "" - for _, xml_str in pairs(data) do - for index, http_post_data in pairs(xml_str) do - data_formated = data_formated .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(http_post_data)) .. "" - end - end - self.sc_logger:notice(tostring(data_formated) .. "") + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - local http_post_data = "" - for _, xml_str in pairs(data) do - for index, data_formated in pairs(xml_str) do - http_post_data = http_post_data .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(data_formated)) .. "" - end - end - http_post_data = http_post_data .. "" - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. xml.str(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) local http_response_body = "" @@ -246,7 +261,7 @@ function EventQueue:send_data(data, element) curl.OPT_HTTPHEADER, { "Content-Type: text/xml", - "content-length: " .. string.len(http_post_data) + "content-length: " .. string.len(payload) } ) @@ -269,7 +284,7 @@ function EventQueue:send_data(data, element) end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() -- collecting results @@ -300,40 +315,61 @@ end -- Fonction write() function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true + -- there are events in the queue but they were not ready to be send + return false end From f10f0495f8a58020bad3b425472ec880e728d48a Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Fri, 21 Jan 2022 17:20:13 +0100 Subject: [PATCH 118/219] Delete file because already in others PR --- .../sc_common.lua | 277 ------------------ 1 file changed, 277 deletions(-) delete mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua deleted file mode 100644 index 9d9ba13fe76..00000000000 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/lua - ---- --- Module with common methods for Centreon Stream Connectors --- @module sc_common --- @alias sc_common - -local sc_common = {} - -local sc_logger = require("centreon-stream-connectors-lib.sc_logger") - ---- ifnil_or_empty: change a nil or empty variable for a specified value --- @param var (string|number) the variable that needs to be checked --- @param alt (string|number|table) the alternate value if "var" is nil or empty --- @return var or alt (string|number|table) the variable or the alternate value -local function ifnil_or_empty(var, alt) - if var == nil or var == "" then - return alt - else - return var - end -end - -local ScCommon = {} - -function sc_common.new(sc_logger) - local self = {} - - self.sc_logger = sc_logger - if not self.sc_logger then - self.sc_logger = sc_logger.new() - end - - setmetatable(self, { __index = ScCommon }) - - return self -end - ---- ifnil_or_empty: change a nil or empty variable for a specified value --- @param var (string|number) the variable that needs to be checked --- @param alt (string|number|table) the alternate value if "var" is nil or empty --- @return var or alt (string|number|table) the variable or the alternate value -function ScCommon:ifnil_or_empty(var, alt) - return ifnil_or_empty(var, alt) -end - ---- if_wrong_type: change a wrong type variable with a default value --- @param var (any) the variable that needs to be checked --- @param type (string) the expected type of the variable --- @param default (any) the default value for the variable if type is wrong --- @return var or default (any) the variable if type is good or the default value -function ScCommon:if_wrong_type(var, var_type, default) - if type(var) == var_type then - return var - end - - return default -end - ---- boolean_to_number: convert boolean variable to number --- @param boolean (boolean) the boolean that will be converted --- @return (number) a number according to the boolean value -function ScCommon:boolean_to_number(boolean) - return boolean and 1 or 0 -end - ---- number_to_boolean: convert a 0, 1 number to its boolean counterpart --- @param number (number) the number to convert --- @return (boolean) true if param is 1, false if param is 0 -function ScCommon:number_to_boolean(number) - if number ~= 0 and number ~= 1 then - self.sc_logger:error("[sc_common:number_to_boolean]: number is not 1 or 0. Returning nil. Parameter value is: " .. tostring(number)) - return nil - end - - if number == 1 then - return true - end - - return false -end - - ---- check_boolean_number_option_syntax: make sure the number is either 1 or 0 --- @param number (number) the boolean number that must be validated --- @param default (number) the default value that is going to be return if the default number is not validated --- @return number (number) a boolean number -function ScCommon:check_boolean_number_option_syntax(number, default) - if number ~= 1 and number ~= 0 then - number = default - end - - return number -end - ---- split: convert a string into a table --- @param text (string) the string that is going to be splitted into a table --- @param [opt] separator (string) the separator character that will be used to split the string --- @return false (boolean) if text param is empty or nil --- @return table (table) a table of strings -function ScCommon:split (text, separator) - -- return false if text is nil or empty - if text == nil or text == "" then - self.sc_logger:error("[sc_common:split]: could not split text because it is nil or empty") - return false - end - - local hash = {} - - -- set default separator - separator = ifnil_or_empty(separator, ",") - - for value in string.gmatch(text, "([^" .. separator .. "]+)") do - table.insert(hash, value) - end - - return hash -end - ---- compare_numbers: compare two numbers, if comparison is valid, then return true --- @param firstNumber {number} --- @param secondNumber {number} --- @param operator {string} the mathematical operator that is used for the comparison --- @return {boolean} -function ScCommon:compare_numbers(firstNumber, secondNumber, operator) - if operator ~= "==" and operator ~= "~=" and operator ~= "<" and operator ~= ">" and operator ~= ">=" and operator ~= "<=" then - return nil - end - - if type(firstNumber) ~= "number" or type(secondNumber) ~= "number" then - return nil - end - - if operator == "<" then - if firstNumber < secondNumber then - return true - end - elseif operator == ">" then - if firstNumber > secondNumber then - return true - end - elseif operator == ">=" then - if firstNumber >= secondNumber then - return true - end - elseif operator == "<=" then - if firstNumber <= secondNumber then - return true - end - elseif operator == "==" then - if firstNumber == secondNumber then - return true - end - elseif operator == "~=" then - if firstNumber ~= secondNumber then - return true - end - end - - return false -end - ---- generate_postfield_param_string: convert a table of parameters into an url encoded url parameters string --- @param params (table) the table of all url string parameters to convert --- @return false (boolean) if params variable is not a table --- @return param_string (string) the url encoded parameters string -function ScCommon:generate_postfield_param_string(params) - -- return false because params type is wrong - if (type(params) ~= "table") then - self.sc_logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") - return false - end - - local param_string = "" - - -- concatenate data in params table into a string - for field, value in pairs(params) do - if param_string == "" then - param_string = field .. "=" .. broker.url_encode(value) - else - param_string = param_string .. "&" .. field .. "=" .. broker.url_encode(value) - end - end - - -- return url encoded string - return param_string -end - ---- load_json_file: load a json file --- @param json_file (string) path to the json file --- @return true|false (boolean) if json file is valid or not --- @return content (table) the parsed json -function ScCommon:load_json_file(json_file) - local file = io.open(json_file, "r") - - -- return false if we can't open the file - if not file then - self.sc_logger:error("[sc_common:load_json_file]: couldn't open file " - .. tostring(json_file) .. ". Make sure your file is there and that it is readable by centreon-broker") - return false - end - - -- get content of the file - local file_content = file:read("*a") - io.close(file) - - -- parse it - local content, error = broker.json_decode(file_content) - - -- return false if json couldn't be parsed - if error then - self.sc_logger:error("[sc_common:load_json_file]: could not parse json file " - .. tostring(json_file) .. ". Error is: " .. tostring(error)) - return false - end - - return true, content -end - ---- json_escape: escape json special characters in a string --- @param string (string) the string that must be escaped --- @return string (string) the string with escaped characters -function ScCommon:json_escape(string) - local type = type(string) - - -- check that param is a valid string - if string == nil or type == "table" then - self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) - return string - end - - -- nothing to escape in a boolean or number value - if type ~= "string" then - return string - end - - -- escape all characters - string = string.gsub(string, '\\', '\\\\') - string = string.gsub(string, '\t', '\\t') - string = string.gsub(string, '\n', '\\n') - string = string.gsub(string, '\b', '\\b') - string = string.gsub(string, '\r', '\\r') - string = string.gsub(string, '\f', '\\f') - string = string.gsub(string, '/', '\\/') - string = string.gsub(string, '"', '\\"') - - return string -end - ---- xml_escape: escape xml special characters in a string --- @param string (string) the string that must be escaped --- @return string (string) the string with escaped characters -function ScCommon:xml_escape(string) - local type = type(string) - - -- check that param is a valid string - if string == nil or type == "table" then - self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) - return string - end - - -- nothing to escape in a boolean or number value - if type ~= "string" then - return string - end - - -- escape all characters - string = string.gsub(string, '<', '<') - string = string.gsub(string, '>', '>') - string = string.gsub(string, '&', '&') - string = string.gsub(string, ''', "'") - string = string.gsub(string, '"', '"') - - return string -end - -return sc_common From a98b5f12f3dc82c9b76384c11670d41e0e3d60f0 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Tue, 25 Jan 2022 11:01:09 +0100 Subject: [PATCH 119/219] Apply suggestions from code review Co-authored-by: tcharles --- stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index d60ad368603..f61c1a65f0d 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -218,14 +218,14 @@ end -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) if not payload then - payload = "[send_data]: " .. "" + payload = "" for index, xml_str in pairs(event) do payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" end payload = payload .. "" else - payload = payload .. "[send_data]: " .. "" + payload = payload .. "" for index, xml_str in pairs(event) do payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" end From 843f553ed1dbaab22c6502c0289838fed0e2ca81 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Tue, 25 Jan 2022 15:15:49 +0100 Subject: [PATCH 120/219] Create sc_common.lua --- .../sc_common.lua | 250 ++++++++++++++++++ 1 file changed, 250 insertions(+) create mode 100644 stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua new file mode 100644 index 00000000000..0d42b7a318e --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -0,0 +1,250 @@ +#!/usr/bin/lua + +--- +-- Module with common methods for Centreon Stream Connectors +-- @module sc_common +-- @alias sc_common + +local sc_common = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +local function ifnil_or_empty(var, alt) + if var == nil or var == "" then + return alt + else + return var + end +end + +local ScCommon = {} + +function sc_common.new(sc_logger) + local self = {} + + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + setmetatable(self, { __index = ScCommon }) + + return self +end + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +function ScCommon:ifnil_or_empty(var, alt) + return ifnil_or_empty(var, alt) +end + +--- if_wrong_type: change a wrong type variable with a default value +-- @param var (any) the variable that needs to be checked +-- @param type (string) the expected type of the variable +-- @param default (any) the default value for the variable if type is wrong +-- @return var or default (any) the variable if type is good or the default value +function ScCommon:if_wrong_type(var, var_type, default) + if type(var) == var_type then + return var + end + + return default +end + +--- boolean_to_number: convert boolean variable to number +-- @param boolean (boolean) the boolean that will be converted +-- @return (number) a number according to the boolean value +function ScCommon:boolean_to_number(boolean) + return boolean and 1 or 0 +end + +--- number_to_boolean: convert a 0, 1 number to its boolean counterpart +-- @param number (number) the number to convert +-- @return (boolean) true if param is 1, false if param is 0 +function ScCommon:number_to_boolean(number) + if number ~= 0 and number ~= 1 then + self.sc_logger:error("[sc_common:number_to_boolean]: number is not 1 or 0. Returning nil. Parameter value is: " .. tostring(number)) + return nil + end + + if number == 1 then + return true + end + + return false +end + + +--- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param number (number) the boolean number that must be validated +-- @param default (number) the default value that is going to be return if the default number is not validated +-- @return number (number) a boolean number +function ScCommon:check_boolean_number_option_syntax(number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +--- split: convert a string into a table +-- @param text (string) the string that is going to be splitted into a table +-- @param [opt] separator (string) the separator character that will be used to split the string +-- @return false (boolean) if text param is empty or nil +-- @return table (table) a table of strings +function ScCommon:split (text, separator) + -- return false if text is nil or empty + if text == nil or text == "" then + self.sc_logger:error("[sc_common:split]: could not split text because it is nil or empty") + return false + end + + local hash = {} + + -- set default separator + separator = ifnil_or_empty(separator, ",") + + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +--- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param firstNumber {number} +-- @param secondNumber {number} +-- @param operator {string} the mathematical operator that is used for the comparison +-- @return {boolean} +function ScCommon:compare_numbers(firstNumber, secondNumber, operator) + if operator ~= "==" and operator ~= "~=" and operator ~= "<" and operator ~= ">" and operator ~= ">=" and operator ~= "<=" then + return nil + end + + if type(firstNumber) ~= "number" or type(secondNumber) ~= "number" then + return nil + end + + if operator == "<" then + if firstNumber < secondNumber then + return true + end + elseif operator == ">" then + if firstNumber > secondNumber then + return true + end + elseif operator == ">=" then + if firstNumber >= secondNumber then + return true + end + elseif operator == "<=" then + if firstNumber <= secondNumber then + return true + end + elseif operator == "==" then + if firstNumber == secondNumber then + return true + end + elseif operator == "~=" then + if firstNumber ~= secondNumber then + return true + end + end + + return false +end + +--- generate_postfield_param_string: convert a table of parameters into an url encoded url parameters string +-- @param params (table) the table of all url string parameters to convert +-- @return false (boolean) if params variable is not a table +-- @return param_string (string) the url encoded parameters string +function ScCommon:generate_postfield_param_string(params) + -- return false because params type is wrong + if (type(params) ~= "table") then + self.sc_logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") + return false + end + + local param_string = "" + + -- concatenate data in params table into a string + for field, value in pairs(params) do + if param_string == "" then + param_string = field .. "=" .. broker.url_encode(value) + else + param_string = param_string .. "&" .. field .. "=" .. broker.url_encode(value) + end + end + + -- return url encoded string + return param_string +end + +--- load_json_file: load a json file +-- @param json_file (string) path to the json file +-- @return true|false (boolean) if json file is valid or not +-- @return content (table) the parsed json +function ScCommon:load_json_file(json_file) + local file = io.open(json_file, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[sc_common:load_json_file]: couldn't open file " + .. tostring(json_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + return false + end + + -- get content of the file + local file_content = file:read("*a") + io.close(file) + + -- parse it + local content, error = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if error then + self.sc_logger:error("[sc_common:load_json_file]: could not parse json file " + .. tostring(json_file) .. ". Error is: " .. tostring(error)) + return false + end + + return true, content +end + +--- json_escape: escape json special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:json_escape(string) + local type = type(string) + + -- check that param is a valid string + if string == nil or type == "table" then + self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + return string + end + + -- nothing to escape in a boolean or number value + if type ~= "string" then + return string + end + + -- escape all characters + string = string.gsub(string, '\\', '\\\\') + string = string.gsub(string, '\t', '\\t') + string = string.gsub(string, '\n', '\\n') + string = string.gsub(string, '\b', '\\b') + string = string.gsub(string, '\r', '\\r') + string = string.gsub(string, '\f', '\\f') + string = string.gsub(string, '/', '\\/') + string = string.gsub(string, '"', '\\"') + + return string +end + +return sc_common From 192f244061b35c7925f2f825d568e011ccf714f7 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Tue, 25 Jan 2022 16:50:39 +0100 Subject: [PATCH 121/219] Last review --- stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua index f61c1a65f0d..a9169de2ed6 100644 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua @@ -28,7 +28,6 @@ -- Libraries local curl = require "cURL" -require("LuaXML") -- Centreon lua core libraries local sc_common = require("centreon-stream-connectors-lib.sc_common") @@ -103,6 +102,7 @@ function EventQueue.new(params) }, [categories.bam.id] = {} } + self.send_data_method = { [1] = function (payload) return self:send_data(payload) end } @@ -330,6 +330,7 @@ function write(event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " From 20fff7de62a3bc076518a2ff0e23485822bd8dbd Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Thu, 27 Jan 2022 14:00:44 +0000 Subject: [PATCH 122/219] refacto hp omi to apiv2 --- .../omi/omi_events-apiv2.lua | 362 ++++++++++++++++++ 1 file changed, 362 insertions(+) create mode 100644 stream-connectors/centreon-certified/omi/omi_events-apiv2.lua diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua new file mode 100644 index 00000000000..d047012940b --- /dev/null +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -0,0 +1,362 @@ +-- +-- Copyright 2022 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- ipaddr (string): the ip address of the operation connector server +-- url (string): url of the operation connector endpoint +-- logfile (string): the log file to use +-- loglevel (number): th log level (0, 1, 2, 3) where 3 is the maximum level +-- port (number): the operation connector server port +-- max_size (number): how many events to store before sending them to the server. +-- max_age (number): flush the events when the specified time (in second) is reach (even if max_size is not reach). + +-- Libraries +local http = require("socket.http") +local ltn12 = require("ltn12") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-- workaround https://github.com/centreon/centreon-broker/issues/201 +local previous_event = "" + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- +function EventQueue.new(params) + local self = {} + self.fail = false + + local mandatory_parameters = { + "ipaddr", + "url", + "port" + } + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/omi_event.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "service_status" + self.sc_params.params.source_ci = params.source_ci or "Centreon" + self.sc_params.params.ipaddr = params.ipaddr or "192.168.56.15" + self.sc_params.params.url = params.url or "/bsmc/rest/events/opscx-sdk/v1/" + self.sc_params.params.port = params.port or 30005 + self.sc_params.params.max_output_length = params.max_output_length or 1024 + self.sc_params.params.max_buffer_size = params.max_buffer_size or 5 + self.sc_params.params.max_buffer_age = params.max_buffer_age or 60 + self.sc_params.params.flush_time = params.flush_time or os.time() + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-- Format XML file with service infoamtion +function EventQueue:format_event_service() + local service_severity = self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) + + if service_severity == false then + service_severity = 0 + end + + self.sc_event.event.formated_event = { + title = self.sc_event.event.cache.service.description, + description = string.match(self.sc_event.event.output, "^(.*)\n"), + severity = self.sc_event.event.state, + time_created = self.sc_event.event.last_update, + node = self.sc_event.event.cache.host.name, + related_ci = self.sc_event.event.cache.host.name, + source_ci = self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon'), + source_event_id = self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0) + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = "\t" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "\t" + end + payload = payload .. "" + + else + payload = payload .. "\t" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "\t" + end + payload = payload .. "" + end + + return payload +end + +function EventQueue:send_data(payload) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "Content-Type: text/xml", + "content-length: " .. string.len(payload) + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 202 or http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file From a83459ecd8b2e496145b85a8542f9f76cf34e594 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Thu, 27 Jan 2022 16:06:44 +0100 Subject: [PATCH 123/219] Delete bsm-events-apiv2.lua --- .../bsm/bsm-events-apiv2.lua | 376 ------------------ 1 file changed, 376 deletions(-) delete mode 100644 stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua b/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua deleted file mode 100644 index a9169de2ed6..00000000000 --- a/stream-connectors/centreon-certified/bsm/bsm-events-apiv2.lua +++ /dev/null @@ -1,376 +0,0 @@ --- --- Copyright © 2021 Centreon --- --- Licensed under the Apache License, Version 2.0 (the "License"); --- you may not use this file except in compliance with the Licensself.sc_event.event. --- You may obtain a copy of the License at --- --- http://www.apachself.sc_event.event.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the Licensself.sc_event.event. --- --- For more information : contact@centreon.com --- --- To work you need to provide to this script a Broker stream connector output configuration --- with the following informations: --- --- source_ci (string): Name of the transmiter, usually Centreon server name --- http_server_url (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. --- http_proxy_string (string): the full proxy URL if needed to reach the BSM server. Default: empty. --- log_path (string): the log file to use --- log_level (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbosself.sc_event.event. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. --- max_buffer_size (number): how many events to store before sending them to the server. --- max_buffer_age (number): flush the events when the specified time (in second) is reached (even if max_size is not reached). - --- Libraries -local curl = require "cURL" - --- Centreon lua core libraries -local sc_common = require("centreon-stream-connectors-lib.sc_common") -local sc_logger = require("centreon-stream-connectors-lib.sc_logger") -local sc_broker = require("centreon-stream-connectors-lib.sc_broker") -local sc_event = require("centreon-stream-connectors-lib.sc_event") -local sc_params = require("centreon-stream-connectors-lib.sc_params") -local sc_macros = require("centreon-stream-connectors-lib.sc_macros") -local sc_flush = require("centreon-stream-connectors-lib.sc_flush") - --- workaround https://github.com/centreon/centreon-broker/issues/201 -local previous_event = "" - --------------------------------------------------------------------------------- --- EventQueue class --------------------------------------------------------------------------------- - -local EventQueue = {} -EventQueue.__index = EventQueue - --------------------------------------------------------------------------------- --- Constructor --- @param conf The table given by the init() function and returned from the GUI --- @return the new EventQueue --------------------------------------------------------------------------------- - -function EventQueue.new(params) - local self = {} - self.fail = false - - local mandatory_parameters = { - "http_server_url" - } - - -- set up log configuration - local logfile = params.logfile or "/var/log/centreon-broker/bsm_connector-apiv2.log" - local log_level = params.log_level or 1 - - -- initiate mandatory objects - self.sc_logger = sc_logger.new(logfile, log_level) - self.sc_common = sc_common.new(self.sc_logger) - self.sc_broker = sc_broker.new(self.sc_logger) - self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - - -- checking mandatory parameters and setting a fail flag - if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then - self.fail = true - end - - -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs - self.sc_params.params.accepted_categories = params.accepted_categories or "neb" - self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" - self.sc_params.params.source_ci = params.source_ci or "Centreon" - self.sc_params.params.max_output_length = params.max_output_length or 1024 - - -- apply users params and check syntax of standard ones - self.sc_params:param_override(params) - self.sc_params:check_params() - - self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) - self.format_template = self.sc_params:load_event_format_file(true) - self.sc_params:build_accepted_elements_info() - self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) - - local categories = self.sc_params.params.bbdo.categories - local elements = self.sc_params.params.bbdo.elements - - self.format_event = { - [categories.neb.id] = { - [elements.host_status.id] = function () return self:format_event_host() end, - [elements.service_status.id] = function () return self:format_event_service() end - }, - [categories.bam.id] = {} - } - - self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end - } - - self.build_payload_method = { - [1] = function (payload, event) return self:build_payload(payload, event) end - } - - -- return EventQueue object - setmetatable(self, { __index = EventQueue }) - return self -end - --------------------------------------------------------------------------------- ----- EventQueue:format_event method ---------------------------------------------------------------------------------- -function EventQueue:format_accepted_event() - local category = self.sc_event.event.category - local element = self.sc_event.event.element - local template = self.sc_params.params.format_template[category][element] - self.sc_logger:debug("[EventQueue:format_event]: starting format event") - self.sc_event.event.formated_event = {} - - if self.format_template and template ~= nil and template ~= "" then - for index, value in pairs(template) do - self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) - end - else - -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file - if not self.format_event[category][element] then - self.sc_logger:error("[format_event]: You are trying to format an event with category: " - .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " - .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) - .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") - else - self.format_event[category][element]() - end - end - - self:add() - self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") -end - --- Format XML file with host infoamtion -function EventQueue:format_event_host() - local xml_host_severity = self.sc_broker:get_severity(self.sc_event.event.host_id) - local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.action_url, 'no action url for this host') - local xml_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, 'no notes found on host') - - if xml_host_severity == false then - xml_host_severity = 0 - end - - self.sc_event.event.formated_event = { - hostname = self.sc_event.event.cache.host.name, - host_severity = xml_host_severity, - host_notes = xml_notes, - url = xml_url, - source_ci = self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon'), - source_host_id = self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0), - scheduled_downtime_depth = self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) - } -end - --- Format XML file with service infoamtion -function EventQueue:format_event_service() - local xml_url = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes_url, 'no url for this service') - local xml_service_severity = self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) - - if xml_service_severity == false then - xml_service_severity = 0 - end - - self.sc_event.event.formated_event = { - hostname = self.sc_event.event.cache.host.name, - svc_desc = self.sc_event.event.cache.service.description, - state = self.sc_event.event.state, - last_update = self.sc_event.event.last_update, - output = string.match(self.sc_event.event.output, "^(.*)\n"), - service_severity = xml_service_severity, - url = xml_url, - source_host_id = self.sc_common:ifnil_or_empty(self.sc_event.event.host_id, 0), - source_svc_id = self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0), - scheduled_downtime_depth = self.sc_common:ifnil_or_empty(self.sc_event.event.scheduled_downtime_depth, 0) - } -end - --------------------------------------------------------------------------------- --- EventQueue:add method --- @param e An event --------------------------------------------------------------------------------- - -function EventQueue:add() - -- store event in self.events lists - local category = self.sc_event.event.category - local element = self.sc_event.event.element - - self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) - .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) - - self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) - self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) -end - --------------------------------------------------------------------------------- --- EventQueue:build_payload, concatenate data so it is ready to be sent --- @param payload {string} json encoded string --- @param event {table} the event that is going to be added to the payload --- @return payload {string} json encoded string --------------------------------------------------------------------------------- -function EventQueue:build_payload(payload, event) - if not payload then - payload = "" - for index, xml_str in pairs(event) do - payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" - end - payload = payload .. "" - - else - payload = payload .. "" - for index, xml_str in pairs(event) do - payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "" - end - payload = payload .. "" - end - - return payload -end - -function EventQueue:send_data(payload) - self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") - - -- write payload in the logfile for test purpose - if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(payload)) - return true - end - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) - - local http_response_body = "" - local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url) - :setopt_writefunction( - function (response) - http_response_body = http_response_body .. tostring(response) - end - ) - :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) - :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "Content-Type: text/xml", - "content-length: " .. string.len(payload) - } - ) - - -- set proxy address configuration - if (self.sc_params.params.proxy_address ~= '') then - if (self.sc_params.params.proxy_port ~= '') then - http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else - self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") - end - end - - -- set proxy user configuration - if (self.sc_params.params.proxy_username ~= '') then - if (self.sc_params.params.proxy_password ~= '') then - http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) - else - self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") - end - end - - -- adding the HTTP POST data - http_request:setopt_postfields(payload) - -- performing the HTTP request - http_request:perform() - -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) - http_request:close() - - -- Handling the return code - local retval = false - if http_response_code == 202 or http_response_code == 200 then - self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) - retval = true - else - self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) - end - return retval -end - --------------------------------------------------------------------------------- --- Required functions for Broker StreamConnector --------------------------------------------------------------------------------- - -local queue - --- Fonction init() -function init(conf) - queue = EventQueue.new(conf) -end - --- Fonction write() -function write(event) - -- skip event if a mandatory parameter is missing - if queue.fail then - queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return false - end - - -- initiate event object - queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - - if queue.sc_event:is_valid_category() then - if queue.sc_event:is_valid_element() then - -- format event if it is validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - end - - --- log why the event has been dropped - else - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end - else - queue.sc_logger:debug("dropping event because category is not valid. Event category is: " - .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) - end - - return flush() -end - --- flush method is called by broker every now and then (more often when broker has nothing else to do) -function flush() - local queues_size = queue.sc_flush:get_queues_size() - - -- nothing to flush - if queues_size == 0 then - return true - end - - -- flush all queues because last global flush is too old - if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then - if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then - return false - end - - return true - end - - -- flush queues because too many events are stored in them - if queues_size > queue.sc_params.params.max_buffer_size then - if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then - return false - end - - return true - end - - -- there are events in the queue but they were not ready to be send - return false -end From f79ca6b44f647329c4f9c09a1e6a776e7e6d01b8 Mon Sep 17 00:00:00 2001 From: psame <44295022+psamecentreon@users.noreply.github.com> Date: Thu, 27 Jan 2022 16:50:59 +0100 Subject: [PATCH 124/219] Update omi_events-apiv2.lua --- stream-connectors/centreon-certified/omi/omi_events-apiv2.lua | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index d047012940b..bcd9fb9c833 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -28,6 +28,7 @@ -- max_age (number): flush the events when the specified time (in second) is reach (even if max_size is not reach). -- Libraries +local curl = require("cURL") local http = require("socket.http") local ltn12 = require("ltn12") @@ -359,4 +360,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end From 444ffdf20f8d0b0c62d5c7d56bdf8150a5d6c4bd Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Tue, 1 Feb 2022 18:06:38 +0000 Subject: [PATCH 125/219] Delete max_buffer_size params because not correctly event format --- stream-connectors/centreon-certified/omi/omi_events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index bcd9fb9c833..ceee4613f88 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -197,9 +197,9 @@ end -------------------------------------------------------------------------------- -- EventQueue:build_payload, concatenate data so it is ready to be sent --- @param payload {string} json encoded string +-- @param payload {string} xml encoded string -- @param event {table} the event that is going to be added to the payload --- @return payload {string} json encoded string +-- @return payload {string} xml encoded string -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) if not payload then From 5e1e08c9125f60a8ea2ce47c929ffec7e446c536 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 17 Feb 2022 10:36:12 +0100 Subject: [PATCH 126/219] (stream/lib): sc_flush - refacto queue system and compatibility patch for apiv2 stream connectors (#93) * fix requires in test_kafka script * fix rockspec again * update kafka and fix readme * typos * (stream/lib) sc_flush: new queuing system * (stream/connectors) compat patch for queue system * (stream/documentation) add doc for new params * (stream/lib): sc_flush - fix bad param name * shaping flush methods * naming + missing parameter * better useless event management with flush * avoid calling same method twice * update all apiv2 sc * add xml_escape method * add rockspec file * add doc for xml escape method * prettier markdown * fix host_status event dedup --- .../elasticsearch/elastic-events-apiv2.lua | 137 ++++++++++------ .../kafka/kafka-events-apiv2.lua | 130 ++++++++------- .../pagerduty/pagerduty-events-apiv2.lua | 100 +++++++---- .../servicenow/servicenow-events-apiv2.lua | 93 +++++++---- .../signl4/signl4-events-apiv2.lua | 101 ++++++++---- .../splunk/splunk-events-apiv2.lua | 105 ++++++++---- .../splunk/splunk-metrics-apiv2.lua | 116 ++++++++----- .../sc_common.lua | 26 ++- .../sc_event.lua | 4 +- .../sc_flush.lua | 155 +++++++++++++----- .../sc_params.lua | 5 +- stream-connectors/modules/docs/README.md | 1 + stream-connectors/modules/docs/sc_common.md | 30 ++++ stream-connectors/modules/docs/sc_flush.md | 5 +- stream-connectors/modules/docs/sc_param.md | 2 + ...eon-stream-connectors-lib-2.0.0-1.rockspec | 39 +++++ 16 files changed, 737 insertions(+), 312 deletions(-) create mode 100644 stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index f5fa6965ab5..1d5f6b0cfe2 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -82,9 +82,21 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } + local http_post_metadata = { + ["index"] = { + ["_index"] = tostring((self.sc_params.params.elastic_index_status)) + } + } + + self.http_post_metadata = broker.json_encode(http_post_metadata) + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -143,47 +155,52 @@ function EventQueue:format_accepted_event() } end --------------------------------------------------------------------------------- --- EventQueue:add, add an event to the sending queue --------------------------------------------------------------------------------- - -function EventQueue:add() + -------------------------------------------------------------------------------- + -- EventQueue:add, add an event to the sending queue + -------------------------------------------------------------------------------- + + function EventQueue:add() -- store event in self.events lists local category = self.sc_event.event.category local element = self.sc_event.event.element - + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) - .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) - + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end + + -------------------------------------------------------------------------------- + -- EventQueue:build_payload, concatenate data so it is ready to be sent + -- @param payload {string} json encoded string + -- @param event {table} the event that is going to be added to the payload + -- @return payload {string} json encoded string + -------------------------------------------------------------------------------- + function EventQueue:build_payload(payload, event) + if not payload then + payload = self.http_post_metadata .. '\n' .. broker.json_encode(event) .. '\n' + else + payload = payload .. self.http_post_metadata .. '\n' .. broker.json_encode(event) .. '\n' + end + + return payload end - function EventQueue:send_data(data, element) + function EventQueue:send_data(payload) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:info("[send_data]: " .. broker.json_encode(data)) + self.sc_logger:info("[send_data]: " .. tostring(payload)) return true end - local http_post_metadata = { - ["index"] = { - ["_index"] = tostring((self.sc_params.params.elastic_index_status)) - } - } - - local http_post_data = broker.json_encode(http_post_metadata) - for _, raw_event in ipairs(data) do - http_post_data = http_post_data .. broker.json_encode(http_post_metadata) .. "\n" .. broker.json_encode(raw_event) .. "\n" - end - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Elastic URL is: " .. tostring(self.sc_params.params.elastic_url) .. "/_bulk") local http_response_body = "" @@ -200,7 +217,7 @@ function EventQueue:add() curl.OPT_HTTPHEADER, { "content-type: application/json;charset=UTF-8", - "content-length: " .. string.len(http_post_data), + "content-length: " .. string.len(payload), "Authorization: Basic " .. (mime.b64(self.sc_params.params.elastic_username .. ":" .. self.sc_params.params.elastic_password)) } ) @@ -223,7 +240,7 @@ function EventQueue:add() end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() @@ -256,42 +273,68 @@ function init(conf) queue = EventQueue.new(conf) end --- Fonction write() -function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true -end + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 9c069ead2c4..27b2fc1002f 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -90,6 +90,14 @@ function EventQueue.new(params) [categories.bam.id] = function () return self:format_ba_status() end } + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -162,47 +170,37 @@ function EventQueue:add () end -------------------------------------------------------------------------------- --- EventQueue:flush, flush stored events --- Called when the max number of events or the max age are reached --- @return (boolean) +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string -------------------------------------------------------------------------------- -function EventQueue:flush () - self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") - - -- send stored events - retval = self:send_data() - - -- reset stored events list - self.events = {} +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end - -- and update the timestamp - self.sc_params.params.__internal_ts_last_flush = os.time() - - return retval + return payload end -------------------------------------------------------------------------------- -- EventQueue:send_data, send data to external tool -- @return (boolean) -------------------------------------------------------------------------------- -function EventQueue:send_data () - local data = "" - local counter = 0 - - -- concatenate all stored event in the data variable - for _, formated_event in ipairs(self.events) do - if counter == 0 then - data = broker.json_encode(formated_event) - counter = counter + 1 - else - data = data .. "," .. broker.json_encode(formated_event) - end +function EventQueue:send_data (payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true end - self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + self.sc_logger:info("EventQueue:send_data: creating json: " .. tostring(payload)) -- output data to the tool we want - if self:call(data) then + if self:call(payload) then return true end @@ -226,50 +224,68 @@ function init(params) queue = EventQueue.new(params) end -function write(event) +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end - + -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then - return true + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end + + return flush() +end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - return true - end - -- First, are there some old events waiting in the flush queue ? - if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then - queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") - queue:flush() +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true end - -- Then we check that the event queue is not already full - if (#queue.events >= queue.sc_params.params.max_buffer_size) then - queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") - queue:flush() - end + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else return true end - -- Then we check whether it is time to send the events to the receiver and flush - if (#queue.events >= queue.sc_params.params.max_buffer_size) then - queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") - queue:flush() + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true end - return true + -- there are events in the queue but they were not ready to be send + return false end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 6b80e40dd97..4409e22f520 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -88,7 +88,11 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } self.state_to_severity_mapping = { @@ -301,22 +305,32 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -function EventQueue:send_data(data, element) - self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end - local http_post_data = "" + return payload +end - for _, raw_event in ipairs(data) do - http_post_data = http_post_data .. broker.json_encode(raw_event) - end +function EventQueue:send_data(payload) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(http_post_data)) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(self.sc_params.params.http_server_url)) local http_response_body = "" @@ -333,7 +347,7 @@ function EventQueue:send_data(data, element) curl.OPT_HTTPHEADER, { "content-type: application/json", - "content-length:" .. string.len(http_post_data), + "content-length:" .. string.len(payload), } ) @@ -356,7 +370,7 @@ function EventQueue:send_data(data, element) end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() @@ -390,42 +404,68 @@ function init(conf) queue = EventQueue.new(conf) end --- Fonction write() -function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true + -- there are events in the queue but they were not ready to be send + return false end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua index a7b5747fa6b..6eefecca634 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua @@ -91,7 +91,11 @@ function EventQueue.new (params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } setmetatable(self, { __index = EventQueue }) @@ -382,14 +386,29 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end + + return payload +end + -------------------------------------------------------------------------------- -- EventQueue:send_data, send data to external tool -- @return {boolean} -------------------------------------------------------------------------------- -function EventQueue:send_data(data, element) +function EventQueue:send_data(payload) local authToken local counter = 0 - local http_post_data -- generate a fake token for test purpose or use a real one if not testing if self.sc_params.params.send_data_test == 1 then @@ -398,16 +417,7 @@ function EventQueue:send_data(data, element) authToken = self:getAuthToken() end - for _, raw_event in ipairs(data) do - if counter == 0 then - http_post_data = broker.json_encode(raw_event) - counter = counter + 1 - else - http_post_data = http_post_data .. ',' .. broker.json_encode(raw_event) - end - end - - http_post_data = '{"records":[' .. http_post_data .. ']}' + local http_post_data = '{"records":[' .. payload .. ']}' self.sc_logger:info('EventQueue:send_data: creating json: ' .. http_post_data) if self:call( @@ -424,45 +434,66 @@ end -------------------------------------------------------------------------------- -- write, --- @param {array} event, the event from broker +-- @param {table} event, the event from broker -- @return {boolean} -------------------------------------------------------------------------------- function write (event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true + -- there are events in the queue but they were not ready to be send + return false end diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index 64afeb89fc2..eedfe4b3d58 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -83,7 +83,11 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } self.state_to_signlstatus_mapping = { @@ -174,21 +178,32 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -function EventQueue:send_data(data, element) +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) .. '\n' + else + payload = payload .. broker.json_encode(event) .. '\n' + end + + return payload +end + +function EventQueue:send_data(payload) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:info("[send_data]: " .. broker.json_encode(data)) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - local http_post_data = "" - for _, raw_event in ipairs(data) do - http_post_data = broker.json_encode(raw_event) .. "\n" - end - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(self.sc_params.params.server_address) .. "/webhook/" .. tostring(self.sc_params.params.team_secret)) local http_response_body = "" @@ -224,7 +239,7 @@ function EventQueue:send_data(data, element) end end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() -- collecting results @@ -252,42 +267,68 @@ function init(conf) queue = EventQueue.new(conf) end --- Fonction write() -function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true -end + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index b1cfc0fcc28..b997b5b9938 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -81,7 +81,11 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } -- return EventQueue object @@ -165,23 +169,33 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -function EventQueue:send_data(data, element) +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + + +function EventQueue:send_data(payload) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(broker.json_encode(data))) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - local http_post_data = "" - - - for _, raw_event in ipairs(data) do - http_post_data = http_post_data .. broker.json_encode(raw_event) - end - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) local http_response_body = "" @@ -198,7 +212,7 @@ function EventQueue:send_data(data, element) curl.OPT_HTTPHEADER, { "content-type: application/json", - "content-length:" .. string.len(http_post_data), + "content-length:" .. string.len(payload), "authorization: Splunk " .. self.sc_params.params.splunk_token, } ) @@ -222,7 +236,7 @@ function EventQueue:send_data(data, element) end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() @@ -255,42 +269,67 @@ function init(conf) queue = EventQueue.new(conf) end --- Fonction write() -function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) - -- drop event if wrong category - if not queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if wrong element - if not queue.sc_event:is_valid_element() then - queue.sc_logger:debug("dropping event because element is not valid. Event element is: " - .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- drop event if it is not validated - if queue.sc_event:is_valid_event() then - queue:format_accepted_event() - else + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true -end + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 45ce416fc1b..f6202d686c5 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -80,7 +80,11 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (data, element) return self:send_data(data, element) end + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end } -- return EventQueue object @@ -163,23 +167,32 @@ function EventQueue:add() .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -function EventQueue:send_data(data, element) +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(broker.json_encode(data))) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - local http_post_data = "" - - - for _, raw_event in ipairs(data) do - http_post_data = http_post_data .. broker.json_encode(raw_event) - end - - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(http_post_data)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) local http_response_body = "" @@ -196,7 +209,7 @@ function EventQueue:send_data(data, element) curl.OPT_HTTPHEADER, { "content-type: application/json", - "content-length:" .. string.len(http_post_data), + "content-length:" .. string.len(payload), "authorization: Splunk " .. self.sc_params.params.splunk_token, } ) @@ -220,7 +233,7 @@ function EventQueue:send_data(data, element) end -- adding the HTTP POST data - http_request:setopt_postfields(http_post_data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() @@ -253,39 +266,68 @@ function init(conf) queue = EventQueue.new(conf) end --- Fonction write() -function write(event) - -- First, flush all queues if needed (too old or size too big) - queue.sc_flush:flush_all_queues(queue.send_data_method[1]) - +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) -- skip event if a mandatory parameter is missing if queue.fail then queue.sc_logger:error("Skipping event because a mandatory parameter is not set") - return true + return false end -- initiate event object - queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) - queue.sc_event = queue.sc_metrics.sc_event - - -- drop event if wrong category - if not queue.sc_metrics:is_valid_bbdo_element() then - queue.sc_logger:debug("dropping event because category or element is not valid. Event category is: " - .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category]) - .. ". Event element is: " .. queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element]) + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then return true end - -- drop event if its perfdatas aren't valid - if queue.sc_metrics:is_valid_metric_event() then - queue.sc_logger:debug("valid Perfdata?: " .. tostring(queue.sc_event.event.perfdata)) - queue:format_accepted_event() - else - queue.sc_logger:debug("dropping event because metric event wasn't valid. Perfdata: " .. tostring(queue.sc_event.event.perf_data)) + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + return true end - -- Since we've added an event to a specific queue, flush it if queue is full - queue.sc_flush:flush_queue(queue.send_data_method[1], queue.sc_event.event.category, queue.sc_event.event.element) - return true -end + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 0d42b7a318e..3af30a814a5 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -225,7 +225,7 @@ function ScCommon:json_escape(string) -- check that param is a valid string if string == nil or type == "table" then - self.sc_logger:error("[sc_common:escape_string]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + self.sc_logger:error("[sc_common:json_escape]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) return string end @@ -247,4 +247,28 @@ function ScCommon:json_escape(string) return string end +function ScCommon:xml_escape(string) + local type = type(string) + + -- check that param is a valid string + if string == nil or type == "table" then + self.sc_logger:error("[sc_common:xml_escape]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + return string + end + + -- nothing to escape in a boolean or number value + if type ~= "string" then + return string + end + + -- escape all characters + string = string.gsub(string, '&', '&') + string = string.gsub(string, '<', '<') + string = string.gsub(string, '>', '>') + string = string.gsub(string, '"', '"') + string = string.gsub(string, "'", "'") + + return string +end + return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 40b6bafcac8..f1f1fd44895 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -984,7 +984,7 @@ function ScEvent:is_service_status_event_duplicated() end -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event - if self.event.last_hard_state_change == self.event.last_check then + if self.event.last_hard_state_change == self.event.last_check or self.event.last_hard_state_change == self.event.last_update then return false end @@ -1023,7 +1023,7 @@ function ScEvent:is_host_status_event_duplicated() end -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event - if self.event.last_hard_state_change == self.event.last_check then + if self.event.last_hard_state_change == self.event.last_check or self.event.last_hard_state_change == self.event.last_update then return false end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua index e7b89f30afc..39d7218aac5 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -23,8 +23,8 @@ function sc_flush.new(params, logger) end self.params = params + self.last_global_flush = os.time() - local os_time = os.time() local categories = self.params.bbdo.categories local elements = self.params.bbdo.elements @@ -34,10 +34,9 @@ function sc_flush.new(params, logger) [categories.bam.id] = {} } - -- link queue flush info to their respective categories and elements + -- link events queues to their respective categories and elements for element_name, element_info in pairs(self.params.accepted_elements_info) do self.queues[element_info.category_id][element_info.element_id] = { - flush_date = os_time, events = {} } end @@ -47,58 +46,132 @@ function sc_flush.new(params, logger) end --- flush_all_queues: tries to flush all queues according to accepted elements +-- @param build_payload_method (function) the function from the stream connector that will concatenate events in the payload -- @param send_method (function) the function from the stream connector that will send the data to the wanted tool -function ScFlush:flush_all_queues(send_method) - self.sc_logger:debug("[sc_flush:flush_all_queues]: Starting to flush all queues") - - -- flush and reset queues of accepted elements - for element_name, element_info in pairs(self.params.accepted_elements_info) do - self:flush_queue(send_method, element_info.category_id, element_info.element_id) +-- @return boolean (boolean) if flush failed or not +function ScFlush:flush_all_queues(build_payload_method, send_method) + if self.params.send_mixed_events == 1 then + if not self:flush_mixed_payload(build_payload_method, send_method) then + return false + end + else + if not self:flush_homogeneous_payload(build_payload_method, send_method) then + return false + end end - - self.sc_logger:debug("[sc_flush:flush_all_queues]: All queues have been flushed") + + self:reset_all_queues() + return true end +--- reset_all_queues: put all queues back to their initial state after flushing their events +function ScFlush:reset_all_queues() + for _, element_info in pairs(self.params.accepted_elements_info) do + self.queues[element_info.category_id][element_info.element_id].events = {} + end ---- flush_queue: flush a queue if requirements are met --- @param send_method (function) the function from the stream connector that will send the data to the wanted tool --- @param category (number) the category related to the queue --- @param element (number) the element related to the queue --- @return true|false (boolean) true if the queue is not flushed and true or false depending the send_method result -function ScFlush:flush_queue(send_method, category, element) - -- no events are stored in the queue - if (#self.queues[category][element].events == 0) then - self.sc_logger:debug("[sc_flush:flush_queue]: queue with category: " .. tostring(category) .. " and element: " - .. tostring(element) .. " won't be flushed because there is no event stored in it.") - return true + self.last_global_flush = os.time() +end + +--- get_queues_size: get the number of events stored in all the queues +-- @return queues_size (number) the number of events stored in all queues +function ScFlush:get_queues_size() + local queues_size = 0 + + for _, element_info in pairs(self.params.accepted_elements_info) do + queues_size = queues_size + #self.queues[element_info.category_id][element_info.element_id].events + self.sc_logger:debug("[sc_flush:get_queues_size]: size of queue for category " .. tostring(element_info.category_name) + .. " and element: " .. tostring(element_info.element_name) + .. " is: " .. tostring(#self.queues[element_info.category_id][element_info.element_id].events)) + end + + return queues_size +end + +--- flush_mixed_payload: flush a payload that contains various type of events (services mixed hosts for example) +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_mixed_payload(build_payload_method, send_method) + local payload = nil + local counter = 0 + + -- get all queues + for _, element_info in pairs(self.params.accepted_elements_info) do + -- get events from queues + for _, event in ipairs(self.queues[element_info.category_id][element_info.element_id].events) do + -- add event to the payload + payload = build_payload_method(payload, event) + counter = counter + 1 + + -- send events if max buffer size is reached + if counter >= self.params.max_buffer_size then + if not self:flush_payload(send_method, payload) then + return false + end + + -- reset payload and counter because events have been sent + payload = nil + counter = 0 + end + end end - local rem = self.params.reverse_element_mapping; + -- we need to empty all queues to not mess with broker retention + if not self:flush_payload(send_method, payload) then + return false + end - -- flush if events in the queue are too old or if the queue is full - if (os.time() > self.queues[category][element].flush_date + self.params.max_buffer_age) - or (#self.queues[category][element].events > self.params.max_buffer_size) - then - self.sc_logger:debug("[sc_flush:flush_queue]: flushing all the " .. rem[category][element] .. " events. Last flush date was: " - .. tostring(self.queues[category][element].flush_date) .. ". Buffer size is: " .. tostring(#self.queues[category][element].events)) - local retval = send_method(self.queues[category][element].events, rem[category][element]) + -- all events have been sent + return true +end - if retval then - self:reset_queue(category, element) +--- flush_homogeneous_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) + local counter = 0 + local payload = nil + + -- get all queues + for _, element_info in pairs(self.params.accepted_elements_info) do + -- get events from queues + for _, event in ipairs(self.queues[element_info.category_id][element_info.element_id].events) do + -- add event to the payload + payload = build_payload_method(payload, event) + counter = counter + 1 + + -- send events if max buffer size is reached + if counter >= self.params.max_buffer_size then + if not self:flush_payload(send_method, payload) then + return false + end + + -- reset payload and counter because events have been sent + counter = 0 + payload = nil + end end - else - return true + + -- make sure there are no events left inside a specific queue + if not self:flush_payload(send_method, payload) then + return false + end + + -- reset payload to not mix events from different queues + payload = nil end - return retval + return true end ---- reset_queue: put a queue back to its initial state after flushing its events --- @param category (number) the category related to the queue --- @param element (number) the element related to the queue -function ScFlush:reset_queue(category, element) - self.queues[category][element].flush_date = os.time() - self.queues[category][element].events = {} +--- flush_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_payload(send_method, payload) + if payload then + if not send_method(payload) then + return false + end + end + + return true end return sc_flush \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 94dbfce35dc..d437ff8a16d 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -66,7 +66,9 @@ function sc_params.new(common, logger) -- communication parameters max_buffer_size = 1, - max_buffer_age = 5, + max_buffer_age = 5, --deprecated + max_all_queues_age = 60, + send_mixed_events = 1, -- connection parameters connection_timeout = 60, @@ -87,6 +89,7 @@ function sc_params.new(common, logger) -- internal parameters __internal_ts_last_flush = os.time(), + __internal_last_global_flush_date = os.time(), -- testing parameters send_data_test = 0, diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index bfbe56b47d5..5dfd6c1d2c1 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -42,6 +42,7 @@ | generate_postfield_param_string | convert a table of parameters into an url encoded parameters string | [Documentation](sc_common.md#generate_postfield_param_string-method) | | load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | | json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | +| xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | ## sc_logger methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index 10ca97c6e0c..0f877f83c44 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -45,6 +45,10 @@ - [json_escape: parameters](#json_escape-parameters) - [json_escape: returns](#json_escape-returns) - [json_escape: example](#json_escape-example) + - [xml_escape method](#xml_escape-method) + - [xml_escape: parameters](#xml_escape-parameters) + - [xml_escape: returns](#xml_escape-returns) + - [xml_escape: example](#xml_escape-example) ## Introduction @@ -399,3 +403,29 @@ local string = 'string with " and backslashes \\ and tab:\tend tab' local result = test_common:json_escape(string) --> result is 'string with \" and backslashes \\ and tab:\tend tab' ``` + +## xml_escape method + +The **xml_escape** method escape xml special characters. + +### xml_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### xml_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### xml_escape: example + +```lua +local string = 'string with " and < and >' +--> string is 'string with " and < and >' + +local result = test_common:xml_escape(string) +--> result is 'string with " and < and >' +``` diff --git a/stream-connectors/modules/docs/sc_flush.md b/stream-connectors/modules/docs/sc_flush.md index 266f8a2f486..ce00f760e1c 100644 --- a/stream-connectors/modules/docs/sc_flush.md +++ b/stream-connectors/modules/docs/sc_flush.md @@ -98,6 +98,7 @@ head over the following chapters for more information | the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | | the category of the queue that we need to flush | number | no | | | the element of the queue that we need to flush | number | no | | +| force a flush ignoring max_buffer_age and max_buffer_size | boolean | yes | false | ### flush_queue: returns @@ -119,13 +120,13 @@ test_flush.queues[1][14].events = { [2] = "second event" } -local result = test_flush:flush_queue(send_data, 1, 14) +local result = test_flush:flush_queue(send_data, 1, 14, false) --> result is true -- initiate a empty queue for service_status events test_.queues[1][24].events = {} -result = test_flush:flush_queue(send_data, 1, 24) +result = test_flush:flush_queue(send_data, 1, 24, false) --> result is false because buffer size is 0 ``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 9a4cc24c45e..e56dd27a4f5 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -49,6 +49,8 @@ The sc_param module provides methods to help you handle parameters for your stre | skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | | max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | +| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | | service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | | service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | | host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | diff --git a/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec b/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec new file mode 100644 index 00000000000..d2087ffb63f --- /dev/null +++ b/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "2.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "2.0.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 0e5fc41aa1bd8cc1ad551d574e7ccabc558a9a31 Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Tue, 22 Feb 2022 16:57:15 +0000 Subject: [PATCH 127/219] Enhancement event format to multi event --- stream-connectors/centreon-certified/omi/omi_events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index ceee4613f88..16c23d1a6ee 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -210,7 +210,7 @@ function EventQueue:build_payload(payload, event) payload = payload .. "" else - payload = payload .. "\t" + payload = payload .. "\n\t" for index, xml_str in pairs(event) do payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "\t" end @@ -360,4 +360,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end +end \ No newline at end of file From bc0360acdeb24a8e6f5b0aa3e36f38c8400f10a6 Mon Sep 17 00:00:00 2001 From: psamecentreon Date: Tue, 22 Feb 2022 17:10:02 +0000 Subject: [PATCH 128/219] enhencament format event for event in payload From 64cc9d9ab65c83e2a31e6da545572be000348732 Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 16 Mar 2022 14:05:00 +0100 Subject: [PATCH 129/219] (stream/lib) sc_event: add new event outputs options (#97) * add new output management options * better pagerduty event format --- .../elasticsearch/elastic-events-apiv2.lua | 4 +- .../kafka/kafka-events-apiv2.lua | 4 +- .../pagerduty/pagerduty-events-apiv2.lua | 7 +- .../signl4/signl4-events-apiv2.lua | 4 +- .../splunk/splunk-events-apiv2.lua | 4 +- .../sc_event.lua | 26 ++++++ .../sc_params.lua | 6 ++ stream-connectors/modules/docs/sc_event.md | 12 +++ stream-connectors/modules/docs/sc_param.md | 85 ++++++++++--------- 9 files changed, 101 insertions(+), 51 deletions(-) diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index 1d5f6b0cfe2..10b3a5e6278 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -135,7 +135,7 @@ function EventQueue:format_accepted_event() event_type = "host", timestamp = self.sc_event.event.last_check, host = self.sc_event.event.cache.host.name, - output = string.gsub(self.sc_event.event.output, "\n", " "), + output = self.sc_event.event.output, status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], state = self.sc_event.event.state, state_type = self.sc_event.event.state_type @@ -151,7 +151,7 @@ function EventQueue:format_accepted_event() status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], state = self.sc_event.event.state, state_type = self.sc_event.event.state_type, - output = string.gsub(self.sc_event.event.output, "\n", " "), + output = self.sc_event.event.output, } end diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 27b2fc1002f..0bc1bace3fc 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -140,7 +140,7 @@ function EventQueue:format_host_status() self.sc_event.event.formated_event = { host = tostring(self.sc_event.event.cache.host.name), state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - output = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output"), + output = self.sc_common:ifnil_or_empty(string.gsub(self.sc_event.event.output, '\\', "_"), "no output"), } end @@ -149,7 +149,7 @@ function EventQueue:format_service_status() host = tostring(self.sc_event.event.cache.host.name), service = tostring(self.sc_event.event.cache.service.description), state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], - output = self.sc_common:ifnil_or_empty(string.match(string.gsub(self.sc_event.event.output, '\\', "_"), "^(.*)\n"), "no output") + output = self.sc_common:ifnil_or_empty(string.gsub(self.sc_event.event.output, '\\', "_"), "no output") } end diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 4409e22f520..5a1ba6e314c 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -178,10 +178,11 @@ function EventQueue:format_event_host() pdy_custom_details['Hostseverity'] = host_severity end + pdy_custom_details["Output"] = self.sc_common:ifnil_or_empty(event.output, "no output") self.sc_event.event.formated_event = { payload = { - summary = tostring(event.cache.host.name) .. ": " .. self.sc_common:ifnil_or_empty(string.match(event.output, "^(.*)\n"), 'no output'), + summary = tostring(event.cache.host.name) .. ": " .. self.sc_params.params.status_mapping[event.category][event.element][event.state], timestamp = new_from_timestamp(event.last_update):rfc_3339(), severity = self.state_to_severity_mapping[event.state].severity, source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), @@ -261,9 +262,11 @@ function EventQueue:format_event_service() pdy_custom_details["Serviceseverity"] = service_severity end + pdy_custom_details["Output"] = self.sc_common:ifnil_or_empty(event.output, "no output") + self.sc_event.event.formated_event = { payload = { - summary = tostring(event.cache.host.name) .. "/" .. tostring(event.cache.service.description) .. ": " .. self.sc_common:ifnil_or_empty(string.match(event.output, "^(.*)\n"), 'no output'), + summary = tostring(event.cache.host.name) .. "/" .. tostring(event.cache.service.description) .. ": " .. self.sc_params.params.status_mapping[event.category][event.element][event.state], timestamp = new_from_timestamp(event.last_update):rfc_3339(), severity = self.state_to_severity_mapping[event.state].severity, source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index eedfe4b3d58..4bde0a224b0 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -135,7 +135,7 @@ function EventQueue:format_event_host() EventType = "HOST", Date = self.sc_macros:transform_date(self.sc_event.event.last_check), Host = self.sc_event.event.cache.host.name, - Message = string.gsub(self.sc_event.event.output, "\n", " "), + Message = self.sc_event.event.output, Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], Title = "HOST ALERT:" .. self.sc_event.event.cache.host.name .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, @@ -150,7 +150,7 @@ function EventQueue:format_event_service() Date = self.sc_macros:transform_date(self.sc_event.event.last_check), Host = self.sc_event.event.cache.host.name, Service = self.sc_event.event.cache.service.description, - Message = string.gsub(self.sc_event.event.output, "\n", " "), + Message = self.sc_event.event.output, Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], Title = "SERVICE ALERT:" .. self.sc_event.event.cache.host.name .. "/" .. self.sc_event.event.cache.service.description .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index b997b5b9938..b5dc8a4e0ea 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -129,7 +129,7 @@ function EventQueue:format_event_host() state = self.sc_event.event.state, state_type = self.sc_event.event.state_type, hostname = self.sc_event.event.cache.host.name, - output = string.gsub(self.sc_event.event.output, "\n", ""), + output = self.sc_event.event.output, } end @@ -140,7 +140,7 @@ function EventQueue:format_event_service() state_type = self.sc_event.event.state_type, hostname = self.sc_event.event.cache.host.name, service_description = self.sc_event.event.cache.service.description, - output = string.gsub(self.sc_event.event.output, "\n", ""), + output = self.sc_event.event.output, } end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index f1f1fd44895..c7d5dc562d7 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -144,6 +144,8 @@ function ScEvent:is_valid_host_status_event() return false end + self:build_outputs() + return true end @@ -217,6 +219,8 @@ function ScEvent:is_valid_service_status_event() return false end + self:build_outputs() + return true end @@ -1099,6 +1103,28 @@ function ScEvent:is_valid_downtime_event_end() return false end +--- build_outputs: adds short_output and long_output entries in the event table. output entry will be equal to one or another depending on the use_longoutput param +function ScEvent:build_outputs() + self.event.long_output = self.event.output + self.event.long_output = self.event.output + + -- no short output if there is no line break + local short_output = string.match(self.event.output, "^(.*)\n") + if short_output then + self.event.short_output = short_output + end + + -- use shortoutput if it exists + if self.params.use_long_output == 0 and short_output then + self.event.output = short_output + + -- replace line break if asked to and we are not already using a short output + elseif not short_output and self.params.remove_line_break_in_output == 1 then + self.event.output = string.gsub(self.event.output, "\n", self.params.output_line_break_replacement_character) + end + +end + --- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead -- @return true (boolean) function ScEvent:is_valid_storage_event() diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index d437ff8a16d..9b626c63710 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -82,6 +82,9 @@ function sc_params.new(common, logger) -- event formatting parameters format_file = "", + use_long_output = 1, + remove_line_break_in_output = 1, + output_line_break_replacement_character = " ", -- time parameters local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), @@ -659,6 +662,9 @@ function ScParams:check_params() self.params.allow_insecure_connection = self.common:number_to_boolean(self.common:check_boolean_number_option_syntax(self.params.allow_insecure_connection, 0)) self.params.logfile = self.common:ifnil_or_empty(self.params.logfile, "/var/log/centreon-broker/stream-connector.log") self.params.log_level = self.common:ifnil_or_empty(self.params.log_level, 1) + self.params.use_long_output = self.common:check_boolean_number_option_syntax(self.params.use_longoutput, 1) + self.params.remove_line_break_in_output = self.common:check_boolean_number_option_syntax(self.params.remove_line_break_in_output, 1) + self.params.output_line_break_replacement_character = self.common:if_wrong_type(self.params.output_line_break_replacement_character, "string", " ") end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 1df2f57b2c8..514027a19de 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -120,6 +120,8 @@ - [is_valid_downtime_event_end method](#is_valid_downtime_event_end-method) - [is_valid_downtime_event_end: returns](#is_valid_downtime_event_end-returns) - [is_valid_downtime_event_end: example](#is_valid_downtime_event_end-example) + - [build_outputs method](#build_outputs-method) + - [build_outputs: example](#build_outputs-example) - [is_valid_storage_event method](#is_valid_storage_event-method) ## Introduction @@ -1072,6 +1074,16 @@ local result = test_event:is_valid_downtime_event_end() --> result is true or false ``` +## build_outputs method + +The **build_outputs** method adds short_output and long_output entries in the event table. output entry will be equal to one or another depending on the [**use_long_output parameter](sc_param.md#default-parameters). + +### build_outputs: example + +```lua +local result = test_event:build_outputs() +``` + ## is_valid_storage_event method **DEPRECATED** does nothing diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index e56dd27a4f5..bd7ebdd1d83 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -31,47 +31,50 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters -| Parameter name | type | default value | description | default scope | additionnal information | -| --------------------------- | ------ | ----------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | -| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | -| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | -| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | -| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | -| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | -| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | -| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | -| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | -| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | -| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | -| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | -| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | -| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | -| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | -| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | -| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | -| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | -| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | -| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | -| enable_host_status_dedup | number | 1 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | -| enable_service_status_dedup | number | 1 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | -| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | -| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | -| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | -| send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | -| format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | -| proxy_address | string | | | address of the proxy | | -| proxy_port | number | | | port of the proxy | | -| proxy_username | string | | | user for the proxy | | -| proxy_password | string | | | pasword of the proxy user | | -| connection_timeout | number | 60 | | time to wait in second when opening connection | | -| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | +| Parameter name | type | default value | description | default scope | additionnal information | +| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | +| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | +| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | +| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | +| enable_host_status_dedup | number | 1 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | +| enable_service_status_dedup | number | 1 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | +| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | +| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | +| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | +| send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | +| format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | +| proxy_address | string | | | address of the proxy | | +| proxy_port | number | | | port of the proxy | | +| proxy_username | string | | | user for the proxy | | +| proxy_password | string | | | pasword of the proxy user | | +| connection_timeout | number | 60 | | time to wait in second when opening connection | | +| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | +| use_long_output | number | 1 | | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | +| remove_line_break_in_output | number | 1 | | replace all line breaks (\n) in the output with the character set in the output_line_break_replacement_character parameter | service_status(neb), host_status(neb) | +| output_line_break_replacement_character | string | " " | | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | ## Module initialization From eba62fb5756feabb87ff66ef854d6cceb1be511a Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 17 Mar 2022 13:23:25 +0100 Subject: [PATCH 130/219] (streams/lib): add rockspec 2.1.0-1 (#102) * add rockspec 2.1.0 --- ...eon-stream-connectors-lib-2.1.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec diff --git a/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec b/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec new file mode 100644 index 00000000000..eda06245b12 --- /dev/null +++ b/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "2.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "2.1.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 91afaf8a8a4e6e67a309fa1a93337b57ca32184c Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 13 Apr 2022 13:17:47 +0200 Subject: [PATCH 131/219] forgot require sc_flush lib --- .../centreon-certified/kafka/kafka-events-apiv2.lua | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 0bc1bace3fc..3fc87dc7f2a 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -6,6 +6,7 @@ local sc_broker = require("centreon-stream-connectors-lib.sc_broker") local sc_event = require("centreon-stream-connectors-lib.sc_event") local sc_params = require("centreon-stream-connectors-lib.sc_params") local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") local kafka_config = require("centreon-stream-connectors-lib.rdkafka.config") local kafka_producer = require("centreon-stream-connectors-lib.rdkafka.producer") local kafka_topic_config = require("centreon-stream-connectors-lib.rdkafka.topic_config") @@ -288,4 +289,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end From 1aa7b8b5ee829e83fc547e61d4886289ef26411a Mon Sep 17 00:00:00 2001 From: matoy Date: Thu, 14 Apr 2022 13:40:11 +0200 Subject: [PATCH 132/219] (stream/events) missing sc_flush new instance --- .../centreon-certified/kafka/kafka-events-apiv2.lua | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 3fc87dc7f2a..7f00f134527 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -79,7 +79,8 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file() self.sc_params:build_accepted_elements_info() - + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements From cd0378b15db0bc311decd1fe02642788e3b585e1 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 21 Apr 2022 16:51:54 +0200 Subject: [PATCH 133/219] (stream/event): fix add queue method (#106) this was still the old add() method so it wasn't adding the event to the right queue and nothing was flushed at all --- .../kafka/kafka-events-apiv2.lua | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 7f00f134527..418b2539fc3 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -166,9 +166,20 @@ end -------------------------------------------------------------------------------- -- EventQueue:add, add an event to the sending queue -------------------------------------------------------------------------------- -function EventQueue:add () - -- store event in self.events list - self.events[#self.events + 1] = self.sc_event.event.formated_event +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- From c8633acfd599a8f1d2439c16bcf9ddaec00a9201 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 10 May 2022 09:43:29 +0200 Subject: [PATCH 134/219] (stream/events): add Datadog sc (#101) * fix requires in test_kafka script * fix rockspec again * update kafka and fix readme * typos * (stream/lib) sc_flush: new queuing system * (stream/connectors) compat patch for queue system * (stream/documentation) add doc for new params * (stream/lib): sc_flush - fix bad param name * shaping flush methods * naming + missing parameter * better useless event management with flush * avoid calling same method twice * update all apiv2 sc * add xml_escape method * add rockspec file * add doc for xml escape method * prettier markdown * fix host_status event dedup * add new output management options * better pagerduty event format * (stream/events): add datadog sc + spec release 2.1 * delete rock spec * + replace success by info Co-authored-by: Simon Bomm --- .../datadog/datadog-events-apiv2.lua | 355 ++++++++++++++++++ 1 file changed, 355 insertions(+) create mode 100644 stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua new file mode 100644 index 00000000000..a6570ead11a --- /dev/null +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -0,0 +1,355 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Datadog Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "api_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/datadog-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.api_key = params.api_key + self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" + self.sc_params.params.datadog_event_endpoint = params.datadog_event_endpoint or "/api/v1/events" + self.sc_params.params.http_server_url = params.http_server_url or "https://api.datadoghq.com" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.state_to_alert_type_mapping = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = "info", + [1] = "error", + [2] = "warning" + }, + [elements.service_status.id] = { + [0] = "info", + [1] = "warning", + [2] = "error", + [3] = "warning" + } + } + } + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = tostring(self.sc_params.params.status_mappinng[event.category][event.element][event.state] .. " " .. event.cache.host.name), + text = event.output, + aggregation_key = "host_" .. tostring(event.host_id), + alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], + host = tostring(event.cache.host.name), + date_happened = event.last_check + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = tostring(self.sc_params.params.status_mappinng[event.category][event.element][event.state] .. " " .. event.cache.host.name .. ": " .. event.cache.service.description), + text = event.output, + aggregation_key = "service_" .. tostring(event.host_id) .. "_" .. tostring(event.service_id), + alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], + host = tostring(event.cache.host.name), + date_happened = event.last_check + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url .. self.sc_params.params.datadog_event_endpoint + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end From 164165f89060fc37bf0dd548f0619044f7d0de87 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 10 May 2022 09:43:49 +0200 Subject: [PATCH 135/219] (stream/lib) Metric v2 (#103) * fix requires in test_kafka script * fix rockspec again * update kafka and fix readme * typos * (stream/lib) sc_flush: new queuing system * (stream/connectors) compat patch for queue system * (stream/documentation) add doc for new params * (stream/lib): sc_flush - fix bad param name * shaping flush methods * naming + missing parameter * better useless event management with flush * avoid calling same method twice * update all apiv2 sc * add xml_escape method * add rockspec file * add doc for xml escape method * prettier markdown * fix host_status event dedup * add new output management options * better pagerduty event format * add a dumper method * add some metric stuff * metrics first tests * fixes datadog metrics bugs * fix log message sc_event * add new params doc + fix md * improve doc + more doc * simplify sc_metrics * add new standard parameters * better dumper display * new snow stream connector * improve snow sc * metric v2 splunk metrics * debug + params for datadog metrics * finishing metrics sc * forgot replacement character * "better" splunk metrics event format * revert change * add default buffer size * fix datadog metric name regex --- stream-connectors/CONTRIBUTE.md | 10 +- stream-connectors/README.md | 41 +- .../datadog/datadog-metrics-apiv2.lua | 405 ++++++++++++++ ...iv2.lua => servicenow-em-events-apiv2.lua} | 2 +- .../servicenow-incident-events-apiv2.lua | 500 ++++++++++++++++++ .../splunk/splunk-metrics-apiv2.lua | 110 +++- .../sc_common.lua | 52 ++ .../sc_event.lua | 6 +- .../sc_metrics.lua | 48 +- .../sc_params.lua | 16 +- stream-connectors/modules/docs/README.md | 2 + stream-connectors/modules/docs/sc_common.md | 45 ++ stream-connectors/modules/docs/sc_metrics.md | 26 +- stream-connectors/modules/docs/sc_param.md | 92 ++-- 14 files changed, 1242 insertions(+), 113 deletions(-) create mode 100644 stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua rename stream-connectors/centreon-certified/servicenow/{servicenow-events-apiv2.lua => servicenow-em-events-apiv2.lua} (99%) create mode 100644 stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua diff --git a/stream-connectors/CONTRIBUTE.md b/stream-connectors/CONTRIBUTE.md index 523558243b9..22abb20d32d 100644 --- a/stream-connectors/CONTRIBUTE.md +++ b/stream-connectors/CONTRIBUTE.md @@ -12,7 +12,7 @@ You can work on Stream Connectors - Update an existing stream connector - [Fix issues](https://github.com/centreon/centreon-stream-connector-scripts/issues) -You can improve our Lua modules +You can improve our Lua modules - Add a new module - Comment it @@ -22,7 +22,7 @@ You can improve our Lua modules - Update the documentation (if it changes the input and/or output of a method) - Update usage examples if there are any and if they are impacted by the change -### For everybody +### For everybody Since we are not all found of code, there are still ways to be part of this project @@ -35,11 +35,11 @@ If you want to work on our LUA modules, you must follow the coding style provide [Coding style guidelines](https://github.com/luarocks/lua-style-guide) While it is mandatory to follow those guidelines for modules, they will not be enforced on community powered Stream Connectors scripts. -It is however recommened to follow them as much as possible. +It is however recommened to follow them as much as possible. ## Documentations -When creating a module you must comment your methods as follow +When creating a module you must comment your methods as follow ```lua --- This is a local function that does things @@ -51,6 +51,6 @@ local function get_age(first_name, last_name) end ``` -You should comment complicated or long code blocks to help people review your code. +You should comment complicated or long code blocks to help people review your code. It is also required to create or update the module documentation for a more casual reading to help people use your module in their Stream Connector diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 62589178b3b..0bfd9219ab8 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -1,46 +1,45 @@ +# Centreon Stream Connectors + [![Contributors][contributors-shield]][contributors-url] [![Stars][stars-shield]][stars-url] [![Forks][forks-shield]][forks-url] [![Issues][issues-shield]][issues-url] - -# Centreon Stream Connectors # - Centreon stream connectors are LUA scripts that help you send your Centreon monitoring datas to your favorite tools -# Stream connectors +## Stream connectors Available scripts Here is a list of the Centreon powered scripts: -| Software | Connectors | Documentations | -| -------- | ---------- | -------------- | -| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-bsm.html) | +| Software | Connectors | Documentations | +| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-bsm.html) | | ElasticSearch | [ElasticSearch Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/elasticsearch) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-metrics.html) | -| InfluxDB | [InfluxDB Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/influxdb) | WIP | -| NDO | [NDO Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/ndo) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/ndo.html) | -| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-omi.html) | -| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-opsgenie.html) | -| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-pagerduty-events.html) | -| Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | -| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-service-now-events.html) | -| Signl4 | [Signl4 Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/signl4) | [Events Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-signl4-events.html) | -| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-metrics.html) | -| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-warp10.html) | +| InfluxDB | [InfluxDB Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/influxdb) | WIP | +| NDO | [NDO Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/ndo) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/ndo.html) | +| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-omi.html) | +| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-opsgenie.html) | +| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-pagerduty-events.html) | +| Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | +| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-service-now-events.html) | +| Signl4 | [Signl4 Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/signl4) | [Events Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-signl4-events.html) | +| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-metrics.html) | +| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-warp10.html) | +| Kafka | [Kafka stream connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/kafka) | [Documentation](https://docs.centreon.com/docs/integrations/data-analytics/sc-kafka-events/) | Here is a list of the Community powered scripts -| Software | Connectors | Documentations | Contributors | Organizations | -| -------- | ---------- | -------------- | ------------ | ------------- | +| Software | Connectors | Documentations | Contributors | Organizations | +| -------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | --------------------------------------- | | Canopsis | [Canopsis Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis) | [Documentation](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis/README.md) | [ppremont-capensis](https://github.com/ppremont-capensis) | [Capensis](https://www.capensis.fr/en/) | -# Contribute +## Contribute If you wish to help us improve this project, feel free to read the [Contribute.md](https://github.com/centreon/centreon-stream-connector-scripts/blob/master/CONTRIBUTE.md) file. - [contributors-shield]: https://img.shields.io/github/contributors/centreon/centreon-stream-connector-scripts?color=%2384BD00&label=CONTRIBUTORS&style=for-the-badge [stars-shield]: https://img.shields.io/github/stars/centreon/centreon-stream-connector-scripts?color=%23433b02a&label=STARS&style=for-the-badge diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua new file mode 100644 index 00000000000..44c6a781499 --- /dev/null +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -0,0 +1,405 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Datadog Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "api_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/datadog-metrics.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.api_key = params.api_key + self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" + self.sc_params.params.datadog_metric_endpoint = params.datadog_metric_endpoint or "/api/v1/series" + self.sc_params.params.http_server_url = params.http_server_url or "https://api.datadoghq.com" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_%.]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +------------------------------------------------------------------------------- +function EventQueue:format_metric_event(metric) + self.sc_logger:debug("[EventQueue:format_metric]: start real format metric ") + local event = self.sc_event.event + self.sc_event.event.formated_event = { + host = tostring(event.cache.host.name), + metric = metric.metric_name, + points = {{event.last_check, metric.value}}, + tags = self:build_metadata(metric) + } + + self:add() + self.sc_logger:debug("[EventQueue:format_metric]: end real format metric ") +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_metadata method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:build_metadata(metric) + local tags = {} + + -- add service name in tags + if self.sc_event.event.cache.service.description then + table.insert(tags, "service:" .. self.sc_event.event.cache.service.description) + end + + -- add metric instance in tags + if metric.instance ~= "" then + table.insert(tags, "instance:" .. metric.instance) + end + + -- add metric subinstances in tags + if metric.subinstance[1] then + for _, subinstance in ipairs(metric.subinstance) do + table.insert(tags, "subinstance:" .. subinstance) + end + end + + return tags +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = { + series = {event} + } + else + table.insert(payload.series, event) + end + + return payload +end + +function EventQueue:send_data(payload) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url .. tostring(self.sc_params.params.datadog_metric_endpoint) + local payload_json = broker.json_encode(payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload_json)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload_json)) + self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload_json) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua similarity index 99% rename from stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua rename to stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 6eefecca634..9ba33d61405 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -47,7 +47,7 @@ function EventQueue.new (params) self.events = {} self.fail = false - local logfile = params.logfile or "/var/log/centreon-broker/servicenow-stream-connector.log" + local logfile = params.logfile or "/var/log/centreon-broker/servicenow-em-stream-connector.log" local log_level = params.log_level or 1 -- initiate mandatory objects diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua new file mode 100644 index 00000000000..d5873edb5b6 --- /dev/null +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -0,0 +1,500 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html +-------------------------------------------------------------------------------- + + +-- libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new (params) + local self = {} + local mandatory_parameters = { + [1] = "instance", + [2] = "client_id", + [3] = "client_secret", + [4] = "username", + [5] = "password" + } + + self.tokens = {} + self.tokens.authToken = nil + self.tokens.refreshToken = nil + + + self.events = {} + self.fail = false + + local logfile = params.logfile or "/var/log/centreon-broker/servicenow-incident-stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + self.sc_params.params.instance = params.instance + self.sc_params.params.client_id = params.client_id + self.sc_params.params.client_secret = params.client_secret + self.sc_params.params.username = params.username + self.sc_params.params.password = params.password + self.sc_params.params.http_server_url = params.http_server_url or "service-now.com" + self.sc_params.params.incident_table = params.incident_table or "incident" + self.sc_params.params.source = params.source or "centreon" + + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + -- this is an automatic ticketing stream connector, by default we only open ticket on warning/critical/unknown/down/unreachable states + self.sc_params.params.host_status = params.host_status or "1,2" + self.sc_params.params.service_status = params.service_status or "1,2,3" + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- force max_buffer_size to 1, we can't send bulk events + params.max_buffer_size = 1 + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.http_server_url = self.sc_common:if_wrong_type(self.sc_params.params.http_server_url, "string", "service-now.com") + self.sc_params.params.incident_table = self.sc_common:if_wrong_type(self.sc_params.params.incident_table, "string", "incident") + self.sc_params.params.source = self.sc_common:if_wrong_type(self.sc_params.params.source, "string", "centreon") + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + + return self.tokens.authToken.token +end + +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + + self.tokens.refreshToken = { + token = res.refresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password .. "&refresh_token=" .. token + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.tokens.refreshToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.tokens.authToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (url, method, data, authToken) + method = method or "GET" + data = data or nil + authToken = authToken or nil + + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. "." .. self.sc_params.params.http_server_url .. "/" .. tostring(url) + self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) + return true + end + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + + self.sc_logger:debug("EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + if not authToken then + if method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) + end + else + broker_log:info(3, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + ) + end + + if method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add post data") + request:setopt_postfields(data) + end + + self.sc_logger:debug("EventQueue:call: request body " .. tostring(data)) + self.sc_logger:debug("EventQueue:call: request header " .. tostring(authToken)) + self.sc_logger:warning("EventQueue:call: Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + self.sc_logger:debug("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:debug("EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + self.sc_logger:error("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:error("EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + self.sc_logger:warning("EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + source = self.sc_params.params.source, + short_description = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. tostring(event.cache.host.name) .. " " .. tostring(event.short_output), + cmdb_ci = tostring(event.cache.host.name), + comments = "HOST: " .. tostring(event.cache.host.name) .. "\n" + .. "OUTPUT: " .. tostring(event.output) .. "\n" + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + source = self.sc_params.params.source, + short_description = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. tostring(event.cache.host.name) .. " " .. tostring(event.cache.service.description) .. " " .. tostring(event.short_output), + cmdb_ci = tostring(event.cache.host.name), + comments = "HOST: " .. tostring(event.cache.host.name) .. "\n" + .. "SERVICE: " .. tostring(event.cache.service.description) .. "\n" + .. "OUTPUT: " .. tostring(event.output) .. "\n" + } +end + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end + + return payload +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data(payload) + local authToken + local counter = 0 + + -- generate a fake token for test purpose or use a real one if not testing + if self.sc_params.params.send_data_test == 1 then + authToken = "fake_token" + else + authToken = self:getAuthToken() + end + + local http_post_data = payload + self.sc_logger:info('EventQueue:send_data: creating json: ' .. http_post_data) + + if self:call( + "api/now/table/" .. self.sc_params.params.incident_table, + "POST", + http_post_data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index f6202d686c5..565b2b906e4 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -37,7 +37,7 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics.log" - local log_level = params.log_level or 1 + local log_level = params.log_level or 3 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) @@ -57,9 +57,12 @@ function EventQueue.new(params) self.sc_params.params.splunk_host = params.splunk_host or "Central" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 self.sc_params.params.hard_only = params.hard_only or 0 self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" -- apply users params and check syntax of standard ones self.sc_params:param_override(params) @@ -75,8 +78,14 @@ function EventQueue.new(params) [categories.neb.id] = { [elements.host_status.id] = function () return self:format_metrics_host() end, [elements.service_status.id] = function () return self:format_metrics_service() end - }, - [categories.bam.id] = {} + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } } self.send_data_method = { @@ -99,7 +108,6 @@ function EventQueue:format_accepted_event() local category = self.sc_event.event.category local element = self.sc_event.event.element self.sc_logger:debug("[EventQueue:format_event]: starting format event") - self.sc_event.event.formated_event = {} -- can't format event if stream connector is not handling this kind of event if not self.format_event[category][element] then @@ -108,38 +116,89 @@ function EventQueue:format_accepted_event() .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) .. ". If it is a not a misconfiguration, you can open an issue at https://github.com/centreon/centreon-stream-connector-scripts/issues") else + self.sc_logger:debug("[EventQueue:format_event]: going to format it") self.format_event[category][element]() - - -- add metrics in the formated event - for metric_name, metric_data in pairs(self.sc_metrics.metrics) do - metric_name = string.gsub(metric_name, "[^a-zA-Z0-9_]", "_") - self.sc_event.event.formated_event["metric_name:" .. tostring(metric_name)] = metric_data.value - end end - self:add() self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") end -function EventQueue:format_metrics_host() + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_event.event.formated_event = { event_type = "host", - state = self.sc_event.event.state, - state_type = self.sc_event.event.state_type, - hostname = self.sc_event.event.cache.host.name, - ctime = self.sc_event.event.last_check + state = event.state, + state_type = event.state_type, + hostname = event.cache.host.name, + ctime = event.last_check } + + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) end -function EventQueue:format_metrics_service() +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + local event = self.sc_event.event + self.sc_event.event.formated_event = { event_type = "service", - state = self.sc_event.event.state, - state_type = self.sc_event.event.state_type, - hostname = self.sc_event.event.cache.host.name, - service_description = self.sc_event.event.cache.service.description, - ctime = self.sc_event.event.last_check + state = event.state, + state_type = event.state_type, + hostname = event.cache.host.name, + service_description = event.cache.service.description, + ctime = event.last_check } + + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_metadata method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:format_metric_event(metric) + self.sc_logger:debug("[EventQueue:format_metric]: start real format metric ") + self.sc_event.event.formated_event["metric_name:" .. tostring(metric.metric_name)] = metric.value + + -- add metric instance in tags + if metric.instance ~= "" then + self.sc_event.event.formated_event["instance"] = metric.instance + end + + if metric.subinstance[1] then + self.sc_event.event.formated_event["subinstances"] = metric.subinstance + end + + self:add() + self.sc_logger:debug("[EventQueue:format_metric]: end real format metric ") end -------------------------------------------------------------------------------- @@ -279,12 +338,13 @@ function write (event) end -- initiate event object - queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event if queue.sc_event:is_valid_category() then - if queue.sc_event:is_valid_element() then + if queue.sc_metrics:is_valid_bbdo_element() then -- format event if it is validated - if queue.sc_event:is_valid_event() then + if queue.sc_metrics:is_valid_metric_event() then queue:format_accepted_event() end --- log why the event has been dropped diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 3af30a814a5..72c6c6ce5e3 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -247,6 +247,9 @@ function ScCommon:json_escape(string) return string end +--- xml_escape: escape xml special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters function ScCommon:xml_escape(string) local type = type(string) @@ -271,4 +274,53 @@ function ScCommon:xml_escape(string) return string end +--- dumper: dump variables for debug purpose +-- @param variable (any) the variable that must be dumped +-- @param result (string) [opt] the string that contains the dumped variable. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE +-- @param tab_char (string) [opt] the string that contains the tab character. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE (and design) +-- @return result (string) the dumped variable +function ScCommon:dumper(variable, result, tab_char) + -- tabulation handling + if not tab_char then + tab_char = "" + else + tab_char = tab_char .. "\t" + end + + -- non table variables handling + if type(variable) ~= "table" then + if result then + result = result .. "\n" .. tab_char .. "[" .. type(variable) .. "]: " .. tostring(variable) + else + result = "\n[" .. type(variable) .. "]: " .. tostring(variable) + end + else + if not result then + result = "\n[table]" + tab_char = "\t" + end + + -- recursive looping through each tables in the table + for index, value in pairs(variable) do + if type(value) ~= "table" then + if result then + result = result .. "\n" .. tab_char .. "[" .. type(value) .. "] " .. tostring(index) .. ": " .. tostring(value) + else + result = "\n" .. tostring(index) .. " [" .. type(value) .. "]: " .. tostring(value) + end + else + if result then + result = result .. "\n" .. tab_char .. "[" .. type(value) .. "] " .. tostring(index) .. ": " + else + result = "\n[" .. type(value) .. "] " .. tostring(index) .. ": " + end + result = self:dumper(value, result, tab_char) + end + end + end + + return result +end + + return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index c7d5dc562d7..bf122625783 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -371,8 +371,8 @@ end -- @return true|false (boolean) function ScEvent:is_valid_event_downtime_state() if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid ack state. Event ack state must be above or equal to " .. tostring(self.params.acknowledged) - .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) + self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be above or equal to " .. tostring(self.params.in_downtime) + .. ". Current downtime state: " .. tostring(self.sc_common:boolean_to_number(self.event.scheduled_downtime_depth))) return false end @@ -1112,6 +1112,8 @@ function ScEvent:build_outputs() local short_output = string.match(self.event.output, "^(.*)\n") if short_output then self.event.short_output = short_output + else + self.event.short_output = self.event.output end -- use shortoutput if it exists diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua index e6574c47367..453260f8c26 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -49,6 +49,20 @@ function sc_metrics.new(event, params, common, broker, logger) } } +-- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation +-- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics +-- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key +-- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) +-- splunk [^a-zA-Z0-9_] + + if self.params.metrics_name_custom_regex and self.params.metrics_name_custom_regex ~= "" then + self.metrics_name_operations.custom.regex = self.params.metrics_custom_regex + end + + if self.params.metrics_name_custom_replacement_character then + self.metrics_name_operations.custom.replacement_character = self.params.metrics_name_custom_replacement_character + end + -- initiate metrics table self.metrics = {} -- initiate sc_event object @@ -70,6 +84,7 @@ function ScMetrics:is_valid_bbdo_element() -- drop event if event category is not accepted if not self.sc_event:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, event_category) then + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with category: " .. tostring(event_category) .. " is not an accepted category") return false else -- drop event if accepted category is not supposed to be used for a metric stream connector @@ -80,17 +95,16 @@ function ScMetrics:is_valid_bbdo_element() else -- drop event if element is not accepted if not self.sc_event:find_in_mapping(self.params.element_mapping[event_category], self.params.accepted_elements, event_element) then + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with element: " .. tostring(event_element) .. " is not an accepted element") return false else -- drop event if element is not an element that carries perfdata - if event_element ~= elements.host.id - and event_element ~= elements.host_status.id - and event_element ~= elements.service.id + if event_element ~= elements.host_status.id and event_element ~= elements.service_status.id and event_element ~= elements.kpi_event.id then self.sc_logger:warning("[sc_metrics:is_valid_bbdo_element] Configuration error. accepted elements from paramters are: " - .. tostring(self.params.accepted_elements) .. ". Only host, host_status, service, service_status and kpi_event can be used for metrics") + .. tostring(self.params.accepted_elements) .. ". Only host_status, service_status and kpi_event can be used for metrics") return false end end @@ -138,7 +152,7 @@ function ScMetrics:is_valid_host_metric_event() return false end - -- return false if there is no perfdata or they it can't be parsed + -- return false if there is no perfdata or it can't be parsed if not self:is_valid_perfdata(self.sc_event.event.perfdata) then self.sc_logger:warning("[sc_metrics:is_vaild_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perf_data)) @@ -239,12 +253,28 @@ function ScMetrics:is_valid_perfdata(perfdata) end -- store data from parsed perfdata inside a metrics table - for metric_name, metric_data in pairs(metrics_info) do - self.metrics[metric_name] = metric_data - self.metrics[metric_name].name = metric_name - end + self.metrics_info = metrics_info return true end +-- to name a few : +-- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation +-- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics +-- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key +-- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) + +--- build_metric: use the stream connector format method to parse every metric in the event +-- @param format_metric (function) the format method from the stream connector +function ScMetrics:build_metric(format_metric) + local metrics_info = self.metrics_info + self.sc_logger:debug("perfdata: " .. self.sc_common:dumper(metrics_info)) + + for metric, metric_data in pairs(self.metrics_info) do + metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) + -- use stream connector method to format the metric event + format_metric(metrics_info[metric]) + end +end + return sc_metrics \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 9b626c63710..87d6185f94d 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -101,6 +101,10 @@ function sc_params.new(common, logger) logfile = "", log_level = "", + -- metric + metric_name_regex = "", + metric_replacement_character = "_", + -- initiate mappings element_mapping = {}, status_mapping = {}, @@ -109,7 +113,7 @@ function sc_params.new(common, logger) [1] = "HARD" }, validatedEvents = {}, - + -- FIX BROKER ISSUE max_stored_events = 10 -- do not use values above 100 } @@ -647,10 +651,10 @@ function ScParams:check_params() self.params.service_severity_threshold = self.common:if_wrong_type(self.params.service_severity_threshold, "number", nil) self.params.host_severity_operator = self.common:if_wrong_type(self.params.host_severity_operator, "string", ">=") self.params.service_severity_operator = self.common:if_wrong_type(self.params.service_severity_operator, "string", ">=") - self.params.ack_host_status = self.common:ifnil_or_empty(self.params.ack_host_status,self.params.host_status) - self.params.ack_service_status = self.common:ifnil_or_empty(self.params.ack_service_status,self.params.service_status) - self.params.dt_host_status = self.common:ifnil_or_empty(self.params.dt_host_status,self.params.host_status) - self.params.dt_service_status = self.common:ifnil_or_empty(self.params.dt_service_status,self.params.service_status) + self.params.ack_host_status = self.common:ifnil_or_empty(self.params.ack_host_status, self.params.host_status) + self.params.ack_service_status = self.common:ifnil_or_empty(self.params.ack_service_status, self.params.service_status) + self.params.dt_host_status = self.common:ifnil_or_empty(self.params.dt_host_status, self.params.host_status) + self.params.dt_service_status = self.common:ifnil_or_empty(self.params.dt_service_status, self.params.service_status) self.params.enable_host_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_host_status_dedup, 0) self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) @@ -665,6 +669,8 @@ function ScParams:check_params() self.params.use_long_output = self.common:check_boolean_number_option_syntax(self.params.use_longoutput, 1) self.params.remove_line_break_in_output = self.common:check_boolean_number_option_syntax(self.params.remove_line_break_in_output, 1) self.params.output_line_break_replacement_character = self.common:if_wrong_type(self.params.output_line_break_replacement_character, "string", " ") + self.params.metric_name_regex = self.common:if_wrong_type(self.params.metric_name_regex, "string", "") + self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 5dfd6c1d2c1..4b2339b410f 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -43,6 +43,7 @@ | load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | | json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | | xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | +| dumper | dump any variable for debug purpose | [Documentation](sc_common.md#dumper-method) | ## sc_logger methods @@ -155,6 +156,7 @@ | is_valid_service_metric_event | makes sure that the metric event is valid service metric event | [Documentation](sc_metrics.md#is_valid_service_metric_event-method) | | is_valid_kpi_metric_event | makes sure that the metric event is valid KPI metric event | [Documentation](sc_metrics.md#is_valid_kpi_metric_event-method) | | is_valid_perfdata | makes sure that the performance data is valid | [Documentation](sc_metrics.md#is_valid_perfdata-method) | +| build_metric | use the stream connector format method to parse every metric in the event | [Documentation](sc_metrics.md#build_metric-method) | ## google.bigquery.bigquery methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index 0f877f83c44..e0e0a89519a 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -49,6 +49,10 @@ - [xml_escape: parameters](#xml_escape-parameters) - [xml_escape: returns](#xml_escape-returns) - [xml_escape: example](#xml_escape-example) + - [dumper method](#dumper-method) + - [dumper: parameters](#dumper-parameters) + - [dumper: returns](#dumper-returns) + - [dumper: example](#dumper-example) ## Introduction @@ -429,3 +433,44 @@ local string = 'string with " and < and >' local result = test_common:xml_escape(string) --> result is 'string with " and < and >' ``` + +## dumper method + +The **dumper** dump variables for debug purpose + +### dumper: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the variable that must be dumped | any | no | | +| the string that contains the dumped variable. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE | string | yes | | +| the string that contains the tab character. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE (and design) | string | yes | | + +### dumper: returns + +| return | type | always | condition | +| ------------------- | ------ | ------ | --------- | +| the dumped variable | string | yes | | + +### dumper: example + +```lua +local best_city = { + name = "mont-de-marsan", + geocoord = { + lat = 43.89446, + lon = -0.4964242 + } +} + +local result = "best city info: " .. test_common:dumper(best_city) +--> result is +--[[ + best city info: + [table] + [string] name: mont-de-marsan + [table] geocoord: + [number] lon: -0.4964242 + [number] lat: 43.89446 +]]-- +``` diff --git a/stream-connectors/modules/docs/sc_metrics.md b/stream-connectors/modules/docs/sc_metrics.md index a7f3e51f4ef..ef183c49ba3 100644 --- a/stream-connectors/modules/docs/sc_metrics.md +++ b/stream-connectors/modules/docs/sc_metrics.md @@ -24,6 +24,9 @@ - [is_valid_perfdata parameters](#is_valid_perfdata-parameters) - [is_valid_perfdata: returns](#is_valid_perfdata-returns) - [is_valid_perfdata: example](#is_valid_perfdata-example) + - [build_metric method](#build_metric-method) + - [build_metric parameters](#build_metric-parameters) + - [build_metric: example](#build_metric-example) ## Introduction @@ -215,7 +218,7 @@ The **is_valid_perfdata** method makes sure that the performance data is valid. ```lua local perfdata = "pl=45%;40;80;0;100" -local result = test_metrics:is_valid_perfdata() +local result = test_metrics:is_valid_perfdata(perfdata) --> result is true or false --> test_metrics.metrics is now --[[ @@ -236,3 +239,24 @@ local result = test_metrics:is_valid_perfdata() } ]]-- ``` + +## build_metric method + +The **build_metric** method uses the provided stream connector format method to parse every metric in the event + +### build_metric parameters + +| parameter | type | optional | default value | +| -------------------------------------------- | -------- | -------- | ------------- | +| "the format method from the stream connector | function | no | | + +### build_metric: example + +```lua +local function my_format_method(metric_data) + -- your code here +end + +local stored_method = function(metric_data) return my_format_method(metric_data) end +test_metrics:build_metric(stored_method) +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index bd7ebdd1d83..4aa4dae92cf 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -31,50 +31,54 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters -| Parameter name | type | default value | description | default scope | additionnal information | -| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | -| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | -| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | -| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | -| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | -| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | -| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | -| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | -| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | -| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | -| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | -| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | -| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | -| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | -| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | -| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | -| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | -| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| ack_host_status | string | | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | -| ack_service_status | string | | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | -| dt_host_status | string | | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | -| dt_service_status | string | | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | -| enable_host_status_dedup | number | 1 | | enable the deduplication of host status event when set to 1 | host_status(neb) | | -| enable_service_status_dedup | number | 1 | | enable the deduplication of service status event when set to 1 | service_status(neb) | | -| accepted_authors | string | | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | -| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | | the time difference from UTC in seconds | all | | -| timestamp_conversion_format | string | %Y-%m-%d %X | | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | -| send_data_test | number | 0 | | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | -| format_file | string | | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | -| proxy_address | string | | | address of the proxy | | -| proxy_port | number | | | port of the proxy | | -| proxy_username | string | | | user for the proxy | | -| proxy_password | string | | | pasword of the proxy user | | -| connection_timeout | number | 60 | | time to wait in second when opening connection | | -| allow_insecure_connection | number | 0 | | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | -| use_long_output | number | 1 | | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | -| remove_line_break_in_output | number | 1 | | replace all line breaks (\n) in the output with the character set in the output_line_break_replacement_character parameter | service_status(neb), host_status(neb) | -| output_line_break_replacement_character | string | " " | | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | +| Parameter name | type | default value | description | default scope | additionnal information | +| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | +| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| ack_host_status | string | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | +| ack_service_status | string | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| dt_host_status | string | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | +| dt_service_status | string | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | +| enable_host_status_dedup | number | 1 | enable the deduplication of host status event when set to 1 | host_status(neb) | | +| enable_service_status_dedup | number | 1 | enable the deduplication of service status event when set to 1 | service_status(neb) | | +| accepted_authors | string | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | +| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | the time difference from UTC in seconds | all | | +| timestamp_conversion_format | string | %Y-%m-%d %X | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | +| send_data_test | number | 0 | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | +| format_file | string | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | | +| proxy_address | string | | address of the proxy | | | +| proxy_port | number | | port of the proxy | | | +| proxy_username | string | | user for the proxy | | | +| proxy_password | string | | pasword of the proxy user | | | +| connection_timeout | number | 60 | time to wait in second when opening connection | | | +| allow_insecure_connection | number | 0 | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | | +| use_long_output | number | 1 | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | | +| remove_line_break_in_output | number | 1 | replace all line breaks (\n) in the output with the character set in the output_line_break_replacement_character parameter | service_status(neb), host_status(neb) | | +| output_line_break_replacement_character | string | " " | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | | +| metric_name_regex | string | "" | the regex that will be used to transform the metric name to a compatible name for the software that will receive the data | service_status(neb), host_status(neb) | | +| metric_replacement_character | string | "_" | the character that will be used to replace invalid characters in the metric name | service_status(neb), host_status(neb) | | +| logfile | string | **check the stream connector documentation** | the logfile that will be used for the stream connector | any | | +| log_level | number | 1 | the verbosity level for the logs. 1 = error + notice, 2 = error + warning + notice, 3 = error + warning + notice + debug (you should avoir using level 3) | any | | ## Module initialization From c8b3f7f72b17c16714a184a83dda57d0f864a543 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 12 May 2022 09:18:35 +0200 Subject: [PATCH 136/219] add specfile (#109) --- ...eon-stream-connectors-lib-3.0.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec diff --git a/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec b/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec new file mode 100644 index 00000000000..d5d8f5a71a9 --- /dev/null +++ b/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.0.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 62cc9e27c9b613c9689a47a8414cc3226246baed Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 12 May 2022 16:42:17 +0200 Subject: [PATCH 137/219] (stream/lib) sc_param bbdo3 and bddo2 compat (#108) * manage bbdo3 and bddo2 compat * add comment * streamline compat idea * fix missing state in new bbdo elements * force var type for bbdo_version * fix downtime state bbdo3 * compat patch for ack * fix bad cache handling * fix bad boolean for ack --- .../sc_event.lua | 26 +++- .../sc_params.lua | 139 ++++++++++++++++-- 2 files changed, 151 insertions(+), 14 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index bf122625783..00437cb262c 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -286,7 +286,7 @@ function ScEvent:is_valid_service() -- force service description to its id if no description has been found if not self.event.cache.service.description then - self.event.cache.service.description = service_infos.service_id or self.event.service_id + self.event.cache.service.description = self.event.service_id end return true @@ -324,6 +324,16 @@ function ScEvent:is_valid_event_status(accepted_status_list) return false end + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.state and self.event.current_state) then + self.event.state = self.event.current_state + end + + if (not self.event.current_state and self.event.state) then + self.event.current_state = self.event.state + end + -- end compat patch + for _, status_id in ipairs(status_list) do if tostring(self.event.state) == status_id then return true @@ -358,6 +368,15 @@ end --- is_valid_event_acknowledge_state: check if the acknowledge state of the event is valid -- @return true|false (boolean) function ScEvent:is_valid_event_acknowledge_state() + -- compat patch bbdo 3 => bbdo 2 + if (not self.event.acknowledged and self.event.acknowledgement_type) then + if self.event.acknowledgement_type >= 1 then + self.event.acknowledged = true + else + self.event.acknowledged = false + end + end + if not self.sc_common:compare_numbers(self.params.acknowledged, self.sc_common:boolean_to_number(self.event.acknowledged), ">=") then self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be above or equal to " .. tostring(self.params.acknowledged) .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) @@ -370,6 +389,11 @@ end --- is_valid_event_downtime_state: check if the event is in an accepted downtime state -- @return true|false (boolean) function ScEvent:is_valid_event_downtime_state() + -- patch compat bbdo 3 => bbdo 2 + if (not self.event.scheduled_downtime_depth and self.event.downtime_depth) then + self.event.scheduled_downtime_depth = self.event.downtime_depth + end + if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be above or equal to " .. tostring(self.params.in_downtime) .. ". Current downtime state: " .. tostring(self.sc_common:boolean_to_number(self.event.scheduled_downtime_depth))) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 87d6185f94d..43a1fa65914 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -25,6 +25,15 @@ function sc_params.new(common, logger) end self.common = common + -- get the version of the bbdo protocol (only the first digit, nothing else matters) + if broker.bbdo_version ~= nil then + _, _, self.bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") + else + self.bbdo_version = 2 + end + + self.bbdo_version = tonumber(self.bbdo_version) + -- initiate params self.params = { -- filter broker events @@ -137,6 +146,38 @@ function sc_params.new(common, logger) } local categories = self.params.bbdo.categories + + local bbdo2_bbdo3_compat_mapping = { + [2] = { + host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 14, + name = "host_status" + }, + service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 24, + name = "service_status" + }, + }, + [3] = { + host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 32, + name = "pb_host_status" + }, + service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 29, + name = "pb_service_status" + } + } + } + self.params.bbdo.elements = { acknowledgement = { category_id = categories.neb.id, @@ -216,12 +257,7 @@ function sc_params.new(common, logger) id = 13, name = "host_parent" }, - host_status = { - category_id = categories.neb.id, - category_name = categories.neb.name, - id = 14, - name = "host_status" - }, + host_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["host_status"], instance = { category_id = categories.neb.id, category_name = categories.neb.name, @@ -276,18 +312,67 @@ function sc_params.new(common, logger) id = 23, name = "service" }, - service_status = { - category_id = categories.neb.id, - category_name = categories.neb.name, - id = 24, - name = "service_status" - }, + service_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["service_status"], instance_configuration = { category_id = categories.neb.id, category_name = categories.neb.name, id = 25, name = "instance_configuration" }, + responsive_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 26, + name = "responsive_instance" + }, + pb_service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 27, + name = "pb_service" + }, + pb_adaptive_service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 28, + name = "pb_adaptive_service" + }, + pb_service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 29, + name = "pb_service_status" + }, + pb_host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 30, + name = "pb_host" + }, + pb_adaptive_host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 31, + name = "pb_adaptive_host" + }, + pb_host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 32, + name = "pb_host_status" + }, + pb_severity = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 33, + name = "pb_severity" + }, + pb_tag = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 34, + name = "pb_tag" + }, metric = { category_id = categories.storage.id, category_name = categories.storage.name, @@ -464,6 +549,15 @@ function sc_params.new(common, logger) self.params.element_mapping[categories.neb.id].service = elements.service.id self.params.element_mapping[categories.neb.id].service_status = elements.service_status.id self.params.element_mapping[categories.neb.id].instance_configuration = elements.instance_configuration.id + self.params.element_mapping[categories.neb.id].responsive_instance = elements.responsive_instance.id + self.params.element_mapping[categories.neb.id].pb_service = elements.pb_service.id + self.params.element_mapping[categories.neb.id].pb_adaptive_service = elements.pb_adaptive_service.id + self.params.element_mapping[categories.neb.id].pb_service_status = elements.pb_service_status.id + self.params.element_mapping[categories.neb.id].pb_host = elements.pb_host.id + self.params.element_mapping[categories.neb.id].pb_adaptive_host = elements.pb_adaptive_host.id + self.params.element_mapping[categories.neb.id].pb_host_status = elements.pb_host_status.id + self.params.element_mapping[categories.neb.id].pb_severity = elements.pb_severity.id + self.params.element_mapping[categories.neb.id].pb_tag = elements.pb_tag.id -- metric elements mapping self.params.element_mapping[categories.storage.id].metric = elements.metric.id @@ -518,7 +612,15 @@ function sc_params.new(common, logger) [elements.service_group_member.id] = "service_group_member", [elements.service.id] = "service", [elements.service_status.id] = "service_status", - [elements.instance_configuration.id] = "instance_configuration" + [elements.instance_configuration.id] = "instance_configuration", + [elements.pb_service.id] = "pb_service", + [elements.pb_adaptive_service.id] = "pb_adaptive_service", + [elements.pb_service_status.id] = "pb_service_status", + [elements.pb_host.id] = "pb_host", + [elements.pb_adaptive_host.id] = "pb_adaptive_host", + [elements.pb_host_status.id] = "pb_host_status", + [elements.pb_severity.id] = "pb_severity", + [elements.pb_tag] = "pb_tag" }, [categories.storage.id] = { [elements.metric.id] = "metric", @@ -590,6 +692,17 @@ function sc_params.new(common, logger) [1] = "WARNING", [2] = "CRITICAL", [3] = "UNKNOWN" + }, + [elements.pb_host_status.id] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + }, + [elements.pb_service_status.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" } }, [categories.bam.id] = { From bf24f16b021eb1c6d979e82a361f79fdfdf2c05f Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 12 May 2022 17:02:07 +0200 Subject: [PATCH 138/219] add spec file (#110) * add spec file * fix bad spec file name --- ...eon-stream-connectors-lib-3.1.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec diff --git a/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec b/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec new file mode 100644 index 00000000000..bee0a86b40e --- /dev/null +++ b/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.1.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From e8212dc2bcd807d3dc1155133c6d19070a8a15b5 Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 1 Jun 2022 17:27:23 +0200 Subject: [PATCH 139/219] (stream/events) add logstash-events stream connector (#111) * add logstash-events stream connector * fix communication protocol --- .../logstash/logstash-events-apiv2.lua | 340 ++++++++++++++++++ 1 file changed, 340 insertions(+) create mode 100644 stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua new file mode 100644 index 00000000000..7eb94f491fd --- /dev/null +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -0,0 +1,340 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker logstash Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "port" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/logstash-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.port = params.port + self.sc_params.params.username = params.username or "" + self.sc_params.params.password = params.password or "" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload) return self:send_data(payload) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. ": " .. tostring(event.cache.host.name), + state = self.sc_params.params.status_mapping[event.category][event.element][event.state], + hostname = tostring(event.cache.host.name), + output = event.output, + poller = event.cache.poller, + event_timestamp = event.last_check + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + self.sc_event.event.formated_event = { + title = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. ": " .. tostring(event.cache.host.name) .. ", " .. tostring(event.cache.service.description), + state = self.sc_params.params.status_mapping[event.category][event.element][event.state], + hostname = tostring(event.cache.host.name), + service = tostring(event.cache.service.description), + output = event.output, + poller = event.cache.poller, + event_timestamp = event.last_check + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + + +function EventQueue:send_data(payload) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Logstash address is: " .. tostring(self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_CUSTOMREQUEST, "PUT") + :setopt( + curl.OPT_HTTPHEADER, + { + "accept: application/json" + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + if (self.sc_params.params.username ~= '') then + if (self.sc_params.params.password ~= '') then + http_request:setopt(curl.OPT_USERPWD, self.sc_params.params.username .. ":" .. self.sc_params.params.password) + else + self.sc_logger:error("[EventQueue:send_data]: basic auth username is configured but no password has been provided") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file From 586d2d029d1cbf94d2520aabfd505ec53145ec97 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 10 Jun 2022 16:19:15 +0200 Subject: [PATCH 140/219] fix wrong event format method (#114) --- .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 565b2b906e4..5ec3e5de9f6 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -76,8 +76,8 @@ function EventQueue.new(params) self.format_event = { [categories.neb.id] = { - [elements.host_status.id] = function () return self:format_metrics_host() end, - [elements.service_status.id] = function () return self:format_metrics_service() end + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end } } @@ -390,4 +390,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end From ae915ecf49f28478f4b47891d57562d554df8103 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 13 Jun 2022 16:13:30 +0200 Subject: [PATCH 141/219] (stream/lib) add output limit (#112) * (stream/lib) add output limit * fixing my dumb mistakes * fix bad variable scope --- .../modules/centreon-stream-connectors-lib/sc_event.lua | 4 ++++ .../modules/centreon-stream-connectors-lib/sc_params.lua | 4 +++- stream-connectors/modules/docs/sc_param.md | 3 ++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 00437cb262c..e89fdc2f739 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -1149,6 +1149,10 @@ function ScEvent:build_outputs() self.event.output = string.gsub(self.event.output, "\n", self.params.output_line_break_replacement_character) end + if self.params.output_size_limit ~= "" then + self.event.output = string.sub(self.event.output, 1, self.params.output_size_limit) + end + end --- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 43a1fa65914..1acc02e9404 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -94,6 +94,7 @@ function sc_params.new(common, logger) use_long_output = 1, remove_line_break_in_output = 1, output_line_break_replacement_character = " ", + output_size_limit = "", -- time parameters local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), @@ -784,6 +785,7 @@ function ScParams:check_params() self.params.output_line_break_replacement_character = self.common:if_wrong_type(self.params.output_line_break_replacement_character, "string", " ") self.params.metric_name_regex = self.common:if_wrong_type(self.params.metric_name_regex, "string", "") self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") + self.params.output_size_limit = self.common:if_wrong_type(self.params.output_size_limit, "number", "") end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table @@ -858,7 +860,7 @@ function ScParams:load_event_format_file(json_string) end function ScParams:build_accepted_elements_info() - categories = self.params.bbdo.categories + local categories = self.params.bbdo.categories self.params.accepted_elements_info = {} -- list all accepted elements diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 4aa4dae92cf..fc094af1bd7 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -74,7 +74,8 @@ The sc_param module provides methods to help you handle parameters for your stre | allow_insecure_connection | number | 0 | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | | | use_long_output | number | 1 | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | | | remove_line_break_in_output | number | 1 | replace all line breaks (\n) in the output with the character set in the output_line_break_replacement_character parameter | service_status(neb), host_status(neb) | | -| output_line_break_replacement_character | string | " " | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | | +| output_line_break_replacement_character | string | " " | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | +| output_size_limit | number | "" | put a character number limit for the output (no limit by default) | service_status(neb), host_status(neb) | | | metric_name_regex | string | "" | the regex that will be used to transform the metric name to a compatible name for the software that will receive the data | service_status(neb), host_status(neb) | | | metric_replacement_character | string | "_" | the character that will be used to replace invalid characters in the metric name | service_status(neb), host_status(neb) | | | logfile | string | **check the stream connector documentation** | the logfile that will be used for the stream connector | any | | From 40407e9cc35254df3cc74e61b473758f06a40ceb Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 17 Jun 2022 09:33:02 +0200 Subject: [PATCH 142/219] handle last_check & last_update for better compat (#116) --- .../sc_event.lua | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index e89fdc2f739..5b68c9ac575 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -143,7 +143,15 @@ function ScEvent:is_valid_host_status_event() self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup") return false end - + + -- in bbdo 2 last_update do exist but not in bbdo3. + -- last_check also exist in bbdo2 but it is preferable to stay compatible with all stream connectors + if not self.event.last_update and self.event.last_check then + self.event.last_update = self.event.last_check + elseif not self.event.last_check and self.event.last_update then + self.event.last_check = self.event.last_update + end + self:build_outputs() return true @@ -219,6 +227,14 @@ function ScEvent:is_valid_service_status_event() return false end + -- in bbdo 2 last_update do exist but not in bbdo3. + -- last_check also exist in bbdo2 but it is preferable to stay compatible with all stream connectors + if not self.event.last_update and self.event.last_check then + self.event.last_update = self.event.last_check + elseif not self.event.last_check and self.event.last_update then + self.event.last_check = self.event.last_update + end + self:build_outputs() return true From bbf092ee044018f46df30843dd994cea805958a3 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 17 Jun 2022 10:07:00 +0200 Subject: [PATCH 143/219] add 3.2.0-1 spec file (#117) --- ...eon-stream-connectors-lib-3.2.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec diff --git a/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec b/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec new file mode 100644 index 00000000000..9efdb745d84 --- /dev/null +++ b/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.2.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.2.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 9edf716ba6579cc13c58d59b2983e126975198e5 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 24 Jun 2022 09:58:27 +0200 Subject: [PATCH 144/219] Update datadog-events-apiv2.lua --- .../centreon-certified/datadog/datadog-events-apiv2.lua | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index a6570ead11a..3a60ce15e7b 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -147,7 +147,7 @@ function EventQueue:format_event_host() local event = self.sc_event.event self.sc_event.event.formated_event = { - title = tostring(self.sc_params.params.status_mappinng[event.category][event.element][event.state] .. " " .. event.cache.host.name), + title = tostring(self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. event.cache.host.name), text = event.output, aggregation_key = "host_" .. tostring(event.host_id), alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], @@ -160,7 +160,7 @@ function EventQueue:format_event_service() local event = self.sc_event.event self.sc_event.event.formated_event = { - title = tostring(self.sc_params.params.status_mappinng[event.category][event.element][event.state] .. " " .. event.cache.host.name .. ": " .. event.cache.service.description), + title = tostring(self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. event.cache.host.name .. ": " .. event.cache.service.description), text = event.output, aggregation_key = "service_" .. tostring(event.host_id) .. "_" .. tostring(event.service_id), alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], From b24f43412b79f1e441cd0a8056fbb9d82c12c451 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 29 Jul 2022 15:41:51 +0200 Subject: [PATCH 145/219] Enhance macro system (new group macro + new transformation flags) (#118) * add new escape method + refacto other ones * always put cache data in event + fix poller cache * add group macro handling (hg, sg, bv) * add new transformation flags * add doc for lua_regex_escape method * add new macros documentation * put new methods into readme * fix wrong link * add trim function * fix many bugs with macro conversion * add trim documentation * add doc for new macros method * add new methods in the readme * remove testing debug --- .../sc_common.lua | 88 +-- .../sc_event.lua | 47 +- .../sc_macros.lua | 214 ++++++- stream-connectors/modules/docs/README.md | 36 +- stream-connectors/modules/docs/sc_common.md | 67 +- stream-connectors/modules/docs/sc_macros.md | 572 +++++++++++++++++- 6 files changed, 926 insertions(+), 98 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 72c6c6ce5e3..d06684a81c7 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -221,57 +221,58 @@ end -- @param string (string) the string that must be escaped -- @return string (string) the string with escaped characters function ScCommon:json_escape(string) - local type = type(string) - - -- check that param is a valid string - if string == nil or type == "table" then - self.sc_logger:error("[sc_common:json_escape]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) - return string - end - - -- nothing to escape in a boolean or number value - if type ~= "string" then + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:json_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) return string end - -- escape all characters - string = string.gsub(string, '\\', '\\\\') - string = string.gsub(string, '\t', '\\t') - string = string.gsub(string, '\n', '\\n') - string = string.gsub(string, '\b', '\\b') - string = string.gsub(string, '\r', '\\r') - string = string.gsub(string, '\f', '\\f') - string = string.gsub(string, '/', '\\/') - string = string.gsub(string, '"', '\\"') - - return string + return string:gsub('\\', '\\\\') + :gsub('\t', '\\t') + :gsub('\n', '\\n') + :gsub('\b', '\\b') + :gsub('\r', '\\r') + :gsub('\f', '\\f') + :gsub('/', '\\/') + :gsub('"', '\\"') end --- xml_escape: escape xml special characters in a string -- @param string (string) the string that must be escaped -- @return string (string) the string with escaped characters function ScCommon:xml_escape(string) - local type = type(string) - - -- check that param is a valid string - if string == nil or type == "table" then - self.sc_logger:error("[sc_common:xml_escape]: the input parameter is not valid, it is either nil or a table. Sent value: " .. tostring(string)) + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:xml_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) return string end - -- nothing to escape in a boolean or number value - if type ~= "string" then + return string:gsub('&', '&') + :gsub('<', '$lt;') + :gsub('>', '>') + :gsub('"', '"') + :gsub("'", "'") +end + +--- lua_regex_escape: escape lua regex special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:lua_regex_escape(string) + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:lua_regex_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) return string end - -- escape all characters - string = string.gsub(string, '&', '&') - string = string.gsub(string, '<', '<') - string = string.gsub(string, '>', '>') - string = string.gsub(string, '"', '"') - string = string.gsub(string, "'", "'") - - return string + return string:gsub('%%', '%%%%') + :gsub('%.', '%%.') + :gsub("%*", "%%*") + :gsub("%-", "%%-") + :gsub("%(", "%%(") + :gsub("%)", "%%)") + :gsub("%[", "%%[") + :gsub("%]", "%%]") + :gsub("%$", "%%$") + :gsub("%^", "%%^") + :gsub("%+", "%%+") + :gsub("%?", "%%?") end --- dumper: dump variables for debug purpose @@ -322,5 +323,20 @@ function ScCommon:dumper(variable, result, tab_char) return result end +--- trim: remove spaces at the beginning and end of a string (or remove the provided character) +-- @param string (string) the string that will be trimmed +-- @param character [opt] (string) the character to trim +-- @return string (string) the trimmed string +function ScCommon:trim(string, character) + local result = "" + local count = "" + if not character then + result, count = string.gsub(string, "^%s*(.-)%s*$", "%1") + else + result, count = string.gsub(string, "^" .. character .. "*(.-)" .. character .. "*$", "%1") + end + + return result +end return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 5b68c9ac575..ba558658dc3 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -422,13 +422,13 @@ end --- is_valid_hostgroup: check if the event is in an accepted hostgroup -- @return true|false (boolean) function ScEvent:is_valid_hostgroup() + self.event.cache.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) + -- return true if option is not set if self.params.accepted_hostgroups == "" then return true end - self.event.cache.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) - -- return false if no hostgroups were found if not self.event.cache.hostgroups then self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) @@ -469,13 +469,13 @@ end --- is_valid_servicegroup: check if the event is in an accepted servicegroup -- @return true|false (boolean) function ScEvent:is_valid_servicegroup() + self.event.cache.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) + -- return true if option is not set if self.params.accepted_servicegroups == "" then return true end - self.event.cache.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) - -- return false if no servicegroups were found if not self.event.cache.servicegroups then self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) @@ -611,13 +611,13 @@ end --- is_valid_bv: check if the event is in an accepted BV -- @return true|false (boolean) function ScEvent:is_valid_bv() + self.event.cache.bvs = self.sc_broker:get_bvs_infos(self.event.host_id) + -- return true if option is not set if self.params.accepted_bvs == "" then return true end - - self.event.cache.bvs = self.sc_broker:get_bvs_infos(self.event.host_id) - + -- return false if no hostgroups were found if not self.event.cache.bvs then self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) @@ -658,6 +658,14 @@ end --- is_valid_poller: check if the event is monitored from an accepted poller -- @return true|false (boolean) function ScEvent:is_valid_poller() + self.event.cache.poller = self.sc_broker:get_instance(self.event.cache.host.instance_id) + + -- required if we want to easily have access to poller name with macros {cache.instance.name} + self.event.cache.instance = { + id = self.event.cache.host.instance, + name = self.event.cache.poller + } + -- return true if option is not set if self.params.accepted_pollers == "" then return true @@ -669,8 +677,6 @@ function ScEvent:is_valid_poller() return false end - self.event.cache.poller = self.sc_broker:get_instance(self.event.cache.host.instance) - -- return false if no poller found in cache if not self.event.cache.poller then self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) @@ -708,11 +714,6 @@ end --- is_valid_host_severity: checks if the host severity is accepted -- @return true|false (boolean) function ScEvent:is_valid_host_severity() - -- return true if there is no severity filter - if self.params.host_severity_threshold == nil then - return true - end - -- initiate the severity table in the cache if it doesn't exist if not self.event.cache.severity then self.event.cache.severity = {} @@ -721,6 +722,12 @@ function ScEvent:is_valid_host_severity() -- get severity of the host from broker cache self.event.cache.severity.host = self.sc_broker:get_severity(self.event.host_id) + -- return true if there is no severity filter + if self.params.host_severity_threshold == nil then + return true + end + + -- return false if host severity doesn't match if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.severity.host, self.params.host_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " @@ -735,11 +742,6 @@ end --- is_valid_service_severity: checks if the service severity is accepted -- @return true|false (boolean) function ScEvent:is_valid_service_severity() - -- return true if there is no severity filter - if self.params.service_severity_threshold == nil then - return true - end - -- initiate the severity table in the cache if it doesn't exist if not self.event.cache.severity then self.event.cache.severity = {} @@ -748,6 +750,13 @@ function ScEvent:is_valid_service_severity() -- get severity of the host from broker cache self.event.cache.severity.service = self.sc_broker:get_severity(self.event.host_id, self.event.service_id) + -- return true if there is no severity filter + if self.params.service_severity_threshold == nil then + return true + end + + + -- return false if service severity doesn't match if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.severity.service, self.params.service_severity_operator) then self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua index 3668c4a049a..ebcaf51aa30 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -32,12 +32,27 @@ function sc_macros.new(params, logger, common) -- initiate params self.params = params + -- mapping to help get "group" type macros value + self.group_macro_conversion = { + hg = function(event, format, regex) return self:get_hg_macro(event, format, regex) end, + sg = function(event, format, regex) return self:get_sg_macro(event, format, regex) end, + bv = function(event, format, regex) return self:get_bv_macro(event, format, regex) end + } + + -- mapping to help transform group macro values into a specific format + self.group_macro_format = { + table = function(data) return self:group_macro_format_table(data) end, + inline = function(data) return self:group_macro_format_inline(data) end + } + -- mapping of macro that we will convert if asked self.transform_macro = { date = function (macro_value) return self:transform_date(macro_value) end, type = function (macro_value) return self:transform_type(macro_value) end, short = function (macro_value) return self:transform_short(macro_value) end, - state = function (macro_value, event) return self:transform_state(macro_value, event) end + state = function (macro_value, event) return self:transform_state(macro_value, event) end, + number = function (macro_value) return self:transform_number(macro_value) end, + string = function (macro_value) return self:transform_string(macro_value) end } -- mapping of centreon standard macros to their stream connectors counterparts @@ -183,12 +198,14 @@ end function ScMacros:replace_sc_macro(string, event, json_string) local cache_macro_value = false local event_macro_value = false + local group_macro_value = false + local format = false local converted_string = string -- find all macros for exemple the string: -- {cache.host.name} is the name of host with id: {host_id} -- will generate two macros {cache.host.name} and {host_id}) - for macro in string.gmatch(string, "{[%w_.]+}") do + for macro in string.gmatch(string, "{[%w_.%(%),%%%+%-%*%?%[%]%^%$]+}") do self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) -- check if macro is in the cache @@ -196,32 +213,36 @@ function ScMacros:replace_sc_macro(string, event, json_string) -- replace all cache macro such as {cache.host.name} with their values if cache_macro_value then - self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a cache macro. Macro name: " - .. tostring(macro) .. ", value is: " .. tostring(cache_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) - - -- if the input string was a json encoded string, we must make sure that the value we are going to insert is json ready - if json_string then - cache_macro_value = self.sc_common:json_escape(cache_macro_value) - end - - converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(cache_macro_value, "%%", "%%%%"))) + converted_string = self:build_converted_string_for_cache_and_event_macro(cache_macro_value, macro, converted_string) else -- if not in cache, try to find a matching value in the event itself event_macro_value = self:get_event_macro(macro, event) -- replace all event macro such as {host_id} with their values if event_macro_value then - self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is an event macro. Macro name: " - .. tostring(macro) .. ", value is: " .. tostring(event_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) - - -- if the input string was a json encoded string, we must make sure that the value we are going to insert is json ready - if json_string then - cache_macro_value = self.sc_common:json_escape(cache_macro_value) - end - - converted_string = string.gsub(converted_string, macro, self.sc_common:json_escape(string.gsub(event_macro_value, "%%", "%%%%"))) + converted_string = self:build_converted_string_for_cache_and_event_macro(event_macro_value, macro, converted_string) else - self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro") + -- if not event or cache macro, maybe it is a group macro + group_macro_value, format = self:get_group_macro(macro, event) + + -- replace all group macro such as {group(hg,table)} with their values + if group_macro_value then + group_macro_value = broker.json_encode(group_macro_value) + macro = self.sc_common:lua_regex_escape(macro) + + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a group macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(group_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string) + .. ". Applied format is: " .. tostring(format)) + + if string.match(converted_string, '"' .. macro .. '"') then + converted_string = string.gsub(converted_string, '"' .. macro .. '"', group_macro_value) + else + converted_string = string.gsub(converted_string, "(.*)" .. macro .. "(.*)", "%1" .. self.sc_common:json_escape(self.sc_common:trim(group_macro_value, '"')) .. "%2") + end + else + self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro or we didn't find a value for it" + .. ". For example a {cache.severity.service} macro that is perfectly valid but the service has no severity") + end end end end @@ -236,9 +257,11 @@ function ScMacros:replace_sc_macro(string, event, json_string) return converted_string end + self.sc_logger:debug("[sc_macros:replace_sc_macro]: decoded json: " .. self.sc_common:dumper(decoded_json)) return decoded_json end + self.sc_logger:debug("[sc_macros:replace_sc_macro]: converted string: " .. tostring(converted_string)) return converted_string end @@ -308,6 +331,104 @@ function ScMacros:get_event_macro(macro, event) return false end +--- get_group_macro: check if the macro is a macro which value must be found in a group table (meaning it is a special kind of data in the event) +-- @param macro (string) the macro we want to check (for example: {group(hg,table)}) +-- @param event (table) the event table +-- @return false (boolean) if the macro is not found +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_group_macro(macro, event) + -- try to cut the macro + local group_type, format, regex = string.match(macro, "^{groups%((%w+),(%w+),(.*)%)}") + + if not group_type or not format or not regex or not self.group_macro_conversion[group_type] then + self.sc_logger:info("[sc_macros:get_group_macro]: macro: " .. tostring(macro) .. " is not a valid group macro") + return false + end + + local data, index_name = self.group_macro_conversion[group_type](event) + local code, converted_data = self:build_group_macro_value(data, index_name, format, regex) + + if not code then + self.sc_logger:error("[sc_macros:get_group_macro]: couldn't convert data for group type: " .. tostring(group_type) + .. ". Desired format: " .. tostring(format) .. ". Filtering using regex: " .. tostring(regex)) + return false + end + + return converted_data, format +end + +--- get_hg_macro: retrieve hostgroup information and make it available as a macro +-- @param event (table) all the event information +-- @return hostgroups (table) all the hostgroups linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the hostgroup +function ScMacros:get_hg_macro(event) + return event.cache.hostgroups, "group_name" +end + +--- get_sg_macro: retrieve servicegroup information and make it available as a macro +-- @param event (table) all the event information +-- @return servicegroups (table) all the servicegroups linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the servicegroup +function ScMacros:get_sg_macro(event) + return event.cache.servicegroups, "group_name" +end + +--- get_bv_macro: retrieve BV information and make it available as a macro +-- @param event (table) all the event information +-- @return bvs (table) all the BVS linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the BV +function ScMacros:get_bv_macro(event) + return event.cache.bvs, "bv_name" +end + +--- build_group_macro_value: build the value that must replace the macro (it will also put it in the desired format) +-- @param data (table) the data from the group (hg, sg or bvs) +-- @param index_name (string) the name of the index at which we will find the relevant data (most of the time, the name of hg, sg or bv) +-- @param format (string) the output format we want (can be table or inline) +-- @param regex (string) the regex that is going to be used to filter unwanted hg, sg or bv (use wildcard .* to accepte everything) +-- @return boolean (boolean) false if asked format is unknown, true otherwise +-- @return macro_value (string|table) the value that will replace the macro (the type of returned value depends on the asked format) +function ScMacros:build_group_macro_value(data, index_name, format, regex) + local result = {} + for _, group_info in pairs(data) do + if string.match(group_info[index_name], regex) then + table.insert(result, group_info[index_name]) + end + end + + if not self.group_macro_format[format] then + self.sc_logger:error("[sc_macros:build_group_macro_value]: unknown format for group macro. Format provided: " .. tostring(format)) + return false + end + + return true, self.group_macro_format[format](result) +end + +--- group_macro_format_table: transform the value behind the macro into a table +-- @param data (table) the values linked to the macro +-- @return data (table) the values linked to the macro stored inside a table +function ScMacros:group_macro_format_table(data) + -- data is already a table, nothing to do + return data +end + +--- group_macro_format_inline: transform the value behind the macro into a single line string separated using a coma +-- @param data (table) the values linked to the macro +-- @return result (string) the values linked to the macro stored inside a coma separated string +function ScMacros:group_macro_format_inline(data) + local result = "" + + for _, value in pairs(data) do + if result == "" then + result = value + else + result = result .. "," .. value + end + end + + return result +end + --- convert_centreon_macro: replace a centreon macro with its value -- @param string (string) the string that may contain centreon macros -- @param event (table) the event table @@ -397,7 +518,6 @@ end -- @param event (table) the event table -- @return string (string) the status of the event in a human readable format (e.g: OK, WARNING) function ScMacros:transform_state(macro_value, event) - -- acknowledgement events are special, the state can be for a host or a service. -- We force the element to be host_status or service_status in order to properly convert the state if event.element == 1 and event.service_id == 0 then @@ -409,4 +529,54 @@ function ScMacros:transform_state(macro_value, event) return self.params.status_mapping[event.category][event.element][macro_value] end +--- transform_number: convert a string to a number +-- @param macro_value (string) the string that needs to be converted +-- @return number (number) a number based on the provided string +function ScMacros:transform_number(macro_value) + local result = tonumber(macro_value) + return result +end + +--- transform_number: convert a something to a number +-- @param macro_value (any) the value that needs to be converted +-- @return string (string) a string based on the provided input +function ScMacros:transform_string(macro_value) + return tostring(macro_value) +end + +--- build_converted_string: replace macros in the string that contains macros +-- @param macro_value (any): the value of the macro that must be replaced +-- @param macro (string): the macro name +-- @param converted_string (string): the string in which a macro must be replaced +-- @return converted_string (string): the string with the macro replaced +function ScMacros:build_converted_string_for_cache_and_event_macro(macro_value, macro, converted_string) + -- need to escape % characters or else it will break the string.gsub that is done later + local clean_macro_value, _ = string.gsub(macro_value, "%%", "%%%%") + local clean_macro_value_json = "" + + self.sc_logger:debug("[sc_macros:build_converted_string_for_cache_and_event_macro]: macro is a cache macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(clean_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + + --[[ + to have the best json possible, we try to remove double quotes. + "service_severity": "{cache.severity.service}" must become "service_severity": 1 and not "service_severity": "1" + "service_severity": "my service severity is: {cache.severity.service}" must become "service_severity": "my service severity is: 1" + ]]-- + if string.match(converted_string, '"' .. macro .. '"') then + -- we don't need to json encode numbers and booleans, if people want them as a string, they need to use the _scstring flag in their macro + if type(clean_macro_value) == "number" or type(clean_macro_value) == "boolean" then + clean_macro_value_json = clean_macro_value + else + clean_macro_value_json = broker.json_encode(clean_macro_value) + end + + converted_string = string.gsub(converted_string, '"' .. macro .. '"', clean_macro_value_json) + else + -- if the macro is in a middle of a string we can't directly json encode it because it will break the final json if we don't escape characters. (and doing that will result in an ugly json) + converted_string = string.gsub(converted_string, "(.*)" .. macro .. "(.*)", "%1" .. self.sc_common:json_escape(clean_macro_value) .. "%2") + end + + return converted_string +end + return sc_macros diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 4b2339b410f..da2148a952b 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -43,7 +43,9 @@ | load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | | json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | | xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | +| lua_regex_escape | escape lua regex special characters in a string | [Documentation](sc_common.md#lua_regex_escape-method) | | dumper | dump any variable for debug purpose | [Documentation](sc_common.md#dumper-method) | +| trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | ## sc_logger methods @@ -125,18 +127,28 @@ ## sc_macros methods -| Method name | Method description | Link | -| ---------------------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------- | -| replace_sc_macro | replace a stream connector macro with its value | [Documentation](sc_macros.md#replace_sc_macro-method) | -| get_cache_macro | retrieve a macro value in the cache | [Documentation](sc_macros.md#get_cache_macro-method) | -| get_event_macro | retrieve a macro value in the event | [Documentation](sc_macros.md#get_event_macro-method) | -| convert_centreon_macro | replace a Centreon macro with its value | [Documentation](sc_macros.md#convert_centreon_macro-method) | -| get_centreon_macro | transform a Centreon macro into a stream connector macro | [Documentation](sc_macros.md#get_centreon_macro-method) | -| get_transform_flag | try to find a transformation flag in the macro name | [Documentation](sc_macros.md#get_transform_flag-method) | -| transform_date | transform a timestamp into a human readable format | [Documentation](sc_macros.md#transform_date-method) | -| transform_short | keep the first line of a string | [Documentation](sc_macros.md#transform_short-method) | -| transform_type | convert 0 or 1 into SOFT or HARD | [Documentation](sc_macros.md#transform_type-method) | -| transform_state | convert a status code into its matching human readable status (OK, WARNING...) | [Documentation](sc_macros.md#transform_state-method) | +| Method name | Method description | Link | +| ------------------------------------------------ | --------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| replace_sc_macro | replace a stream connector macro with its value | [Documentation](sc_macros.md#replace_sc_macro-method) | +| get_cache_macro | retrieve a macro value in the cache | [Documentation](sc_macros.md#get_cache_macro-method) | +| get_event_macro | retrieve a macro value in the event | [Documentation](sc_macros.md#get_event_macro-method) | +| get_group_macro | retrieve a macro from groups (hostgroups, servicegroups, business views) | [Documentation](sc_macros.md#get_group_macro-method) | +| convert_centreon_macro | replace a Centreon macro with its value | [Documentation](sc_macros.md#convert_centreon_macro-method) | +| get_centreon_macro | transform a Centreon macro into a stream connector macro | [Documentation](sc_macros.md#get_centreon_macro-method) | +| get_transform_flag | try to find a transformation flag in the macro name | [Documentation](sc_macros.md#get_transform_flag-method) | +| transform_date | transform a timestamp into a human readable format | [Documentation](sc_macros.md#transform_date-method) | +| transform_short | keep the first line of a string | [Documentation](sc_macros.md#transform_short-method) | +| transform_type | convert 0 or 1 into SOFT or HARD | [Documentation](sc_macros.md#transform_type-method) | +| transform_state | convert a status code into its matching human readable status (OK, WARNING...) | [Documentation](sc_macros.md#transform_state-method) | +| transform_number | convert a string into a number | [Documentation](sc_macros.md#transform_number-method) | +| transform_string | convert anything into a string | [Documentation](sc_macros.md#transform_string-method) | +| get_hg_macro | retrieves hostgroup information and make it available as a macro | [Documentation](sc_macros.md#get_hg_macro-method) | +| get_sg_macro | retrieves servicegroup information and make it available as a macro | [Documentation](sc_macros.md#get_sg_macro-method) | +| get_bv_macro | retrieves business view information and make it available as a macro | [Documentation](sc_macros.md#get_bv_macro-method) | +| build_group_macro_value | build the value that must replace the macro (it will also put it in the desired format) | [Documentation](sc_macros.md#build_group_macro_value-method) | +| group_macro_format_table | transforms the given macro value into a table | [Documentation](sc_macros.md#group_macro_format_table-method) | +| group_macro_format_inline | transforms the give macro value into a string with values separated using comas | [Documentation](sc_macros.md#group_macro_format_inline-method) | +| build_converted_string_for_cache_and_event_macro | replace event or cache macro in a string that may contain them | [Documentation](sc_macros.md#build_converted_string_for_cache_and_event_macro-method) | ## sc_flush methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index e0e0a89519a..805ec769ce9 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -49,10 +49,18 @@ - [xml_escape: parameters](#xml_escape-parameters) - [xml_escape: returns](#xml_escape-returns) - [xml_escape: example](#xml_escape-example) + - [lua_regex_escape method](#lua_regex_escape-method) + - [lua_regex_escape: parameters](#lua_regex_escape-parameters) + - [lua_regex_escape: returns](#lua_regex_escape-returns) + - [lua_regex_escape: example](#lua_regex_escape-example) - [dumper method](#dumper-method) - [dumper: parameters](#dumper-parameters) - [dumper: returns](#dumper-returns) - [dumper: example](#dumper-example) + - [trim method](#trim-method) + - [trim: parameters](#trim-parameters) + - [trim: returns](#trim-returns) + - [trim: example](#trim-example) ## Introduction @@ -434,9 +442,35 @@ local result = test_common:xml_escape(string) --> result is 'string with " and < and >' ``` +## lua_regex_escape method + +The **lua_regex_escape** method escape lua regex special characters. + +### lua_regex_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### lua_regex_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### lua_regex_escape: example + +```lua +local string = 'string with % and . and *' +--> string is 'string with % and . and *' + +local result = test_common:lua_regex_escape(string) +--> result is 'string with %% and %. and %*' +``` + ## dumper method -The **dumper** dump variables for debug purpose +The **dumper** method dumps variables for debug purpose ### dumper: parameters @@ -474,3 +508,34 @@ local result = "best city info: " .. test_common:dumper(best_city) [number] lat: 43.89446 ]]-- ``` + +## trim method + +The **trim** methods remove spaces (or the specified character) at the beginning and the end of a string + +### trim: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the string that must be trimmed | string | no | | +| the character the must be removed (if not provided, will remove space characters) | string | yes | | + +### trim: returns + +| return | type | always | condition | +| ------------------- | ------ | ------ | --------- | +| the trimmed variable | string | yes | | + +### trim: example + +```lua +local string = " I'm a space maaaaan " + +local result = test_common:trim(string) +--> result is: "I'm a space maaaaan" + +local string = ";;;;;;I'm no longer a space maaaaan;;;;;;;;;;;;;;" + +local result = test_common:trim(string, ";") +--> result is: "I'm no longer a space maaaaan" +``` diff --git a/stream-connectors/modules/docs/sc_macros.md b/stream-connectors/modules/docs/sc_macros.md index 98ecca20d48..6bb246d3bc8 100644 --- a/stream-connectors/modules/docs/sc_macros.md +++ b/stream-connectors/modules/docs/sc_macros.md @@ -5,6 +5,11 @@ - [Stream connectors macro explanation](#stream-connectors-macro-explanation) - [Event macros](#event-macros) - [Cache macros](#cache-macros) + - [Group macros](#group-macros) + - [group type](#group-type) + - [output format](#output-format) + - [regex filter](#regex-filter) + - [examples](#examples) - [Transformation flags](#transformation-flags) - [Module initialization](#module-initialization) - [Module constructor](#module-constructor) @@ -21,6 +26,10 @@ - [get_event_macro: parameters](#get_event_macro-parameters) - [get_event_macro: returns](#get_event_macro-returns) - [get_event_macro: example](#get_event_macro-example) + - [get_group_macro method](#get_group_macro-method) + - [get_group_macro: parameters](#get_group_macro-parameters) + - [get_group_macro: returns](#get_group_macro-returns) + - [get_group_macro: example](#get_group_macro-example) - [convert_centreon_macro method](#convert_centreon_macro-method) - [convert_centreon_macro: parameters](#convert_centreon_macro-parameters) - [convert_centreon_macro: returns](#convert_centreon_macro-returns) @@ -49,6 +58,42 @@ - [transform_state: parameters](#transform_state-parameters) - [transform_state: returns](#transform_state-returns) - [transform_state: example](#transform_state-example) + - [transform_number method](#transform_number-method) + - [transform_number: parameters](#transform_number-parameters) + - [transform_number: returns](#transform_number-returns) + - [transform_number: example](#transform_number-example) + - [transform_string method](#transform_string-method) + - [transform_string: parameters](#transform_string-parameters) + - [transform_string: returns](#transform_string-returns) + - [transform_string: example](#transform_string-example) + - [get_hg_macro method](#get_hg_macro-method) + - [get_hg_macro: parameters](#get_hg_macro-parameters) + - [get_hg_macro: returns](#get_hg_macro-returns) + - [get_hg_macro: example](#get_hg_macro-example) + - [get_sg_macro method](#get_sg_macro-method) + - [get_sg_macro: parameters](#get_sg_macro-parameters) + - [get_sg_macro: returns](#get_sg_macro-returns) + - [get_sg_macro: example](#get_sg_macro-example) + - [get_bv_macro method](#get_bv_macro-method) + - [get_bv_macro: parameters](#get_bv_macro-parameters) + - [get_bv_macro: returns](#get_bv_macro-returns) + - [get_bv_macro: example](#get_bv_macro-example) + - [build_group_macro_value method](#build_group_macro_value-method) + - [build_group_macro_value: parameters](#build_group_macro_value-parameters) + - [build_group_macro_value: returns](#build_group_macro_value-returns) + - [build_group_macro_value: example](#build_group_macro_value-example) + - [group_macro_format_table method](#group_macro_format_table-method) + - [group_macro_format_table: parameters](#group_macro_format_table-parameters) + - [group_macro_format_table: returns](#group_macro_format_table-returns) + - [group_macro_format_table: example](#group_macro_format_table-example) + - [group_macro_format_inline method](#group_macro_format_inline-method) + - [group_macro_format_inline: parameters](#group_macro_format_inline-parameters) + - [group_macro_format_inline: returns](#group_macro_format_inline-returns) + - [group_macro_format_inline: example](#group_macro_format_inline-example) + - [build_converted_string_for_cache_and_event_macro method](#build_converted_string_for_cache_and_event_macro-method) + - [build_converted_string_for_cache_and_event_macro: parameters](#build_converted_string_for_cache_and_event_macro-parameters) + - [build_converted_string_for_cache_and_event_macro: returns](#build_converted_string_for_cache_and_event_macro-returns) + - [build_converted_string_for_cache_and_event_macro: example](#build_converted_string_for_cache_and_event_macro-example) ## Introduction @@ -56,7 +101,13 @@ The sc_macros module provides methods to handle a stream connector oriented macr ## Stream connectors macro explanation -There are two kind of stream connectors macro, the **event macros** and the **cache macros**. The first type refers to data that are accessible right from the event. The second type refers to data that needs to be retrieved from the broker cache. +There are three kind of stream connectors macro: + +- **event macros** +- **cache macros** +- **group macros** + +The first type refers to data that are accessible right from the event. The second type refers to data that needs to be retrieved from the broker cache. And the last type refers to three kind of group object in Centreon (hostgroups, servicegroups and Business views) ### Event macros @@ -76,16 +127,13 @@ This means that it is possible to use the following macros This one is a bit more complicated. The purpose is to retrieve information from the event cache using a macro. If you rely on the centreon-stream-connectors-lib to fill the cache, here is what you need to know. -There are 8 kind of cache +There are 5 kind of cache - host cache (for any event that is linked to a host, which means any event but BA events) - service cache (for any event that is linked to a service) -- poller cache (only generated if you filter your events on a poller) -- severity cache (only generated if you filter your events on a severity) -- hostgroups cache (only generated if you filter your events on a hostgroup) -- servicegroups cache (only generated if you filter your events on a servicegroup) +- poller cache (for any event that is linked to a poller, which means any event but BA events) +- severity cache (for any event that is linked to a host, which means any event but BA events) - ba cache (only for a ba_status event) -- bvs cache (only generated if you filter your BA events on a BV) For example, if we want to retrieve the description of a service in the cache (because the description is not provided in the event data). We will use `{cache.service.description}`. @@ -99,6 +147,68 @@ This means that it is possible to use the following macros "{cache.service.last_time_critical}" -- will be replaced by the service last_time_critical timestamp ``` +cache values for hosts: [list](sc_broker.md#get_host_all_infos-example) +cache values for services: [list](sc_broker.md#get_services_all_infos-example) +cache values for BAs: [list](sc_broker.md#get_ba_infos-example) +cache values for pollers: + +- {cache.instance.name} +- {cache.instance.id} + +cache values for severities: + +- {cache.severity.service} +- {cache.severity.host} + +### Group macros + +Group macros are a very special kind of macros that allows you to retrieve the hostgroups, services groups or BVs that are linked to your host/service/BA. The syntax goes as follow: `{groups(,,)}` + +It means that when using a group macro, you need to specify which kind of group you want, its output format and the filter you are going to use. + +#### group type + +When using a group macro, you need to set a group type. You have three possibilities + +- hg (to retrieve hostgroups) +- sg (to retrieve servicegroups) +- bv (to retrives business views) + +#### output format + +When using a group, you need to set an output format. You have two possibilities + +- table (each found group is going to be stored in a table structure) +- inline (each found group is going to be stored in a string and each value will be separated using a `,`) + +#### regex filter + +When using a group, you need to set a regex filter. You accept everything using `.*` or you can accept groups that will only have alpha numerical characters in their name with `^%w+$`. + +[More information about regex in lua](https://www.lua.org/pil/20.2.html) + +#### examples + +for a service linked to: + +| hostgroups | servicegroups | +| ---------- | -------------- | +| HG_1 | FOO_the-first | +| HG_2 | FOO_the-second | +| HG_3 | another_sg | + +get all hostgroups in a table format: + +| macro | result | +| ----------------------- | -------------------------- | +| `{groups(hg,table,.*)}` | `["HG_1", "HG_2", "HG_3"]` | + +get all servicegroups that start with "FOO" in an inline format: `{groups} + +| macro | result | +| ---------------------------- | -------------------------------- | +| `{groups(sg,inline,^FOO.*)}` | `"FOO_the-first,FOO_the-second"` | + ### Transformation flags You can use transformation flags on stream connectors macros. Those flags purpose is to convert the given value to something more appropriate. For example, you can convert a timestamp to a human readable date. @@ -111,6 +221,8 @@ Here is the list of all available flags | _sctype | convert a state type number to its human value | 0 | SOFT | | _scstate | convert a state to its human value | 2 | WARNING (for a servie) | | _scshort | only retrieve the first line of a string (mostly use to get the output instead of the long output of a service for exemple) | "my output\n this is part of the longoutput" | "my output" | +| _scnumber | convert a string to a number | "1" | 1 | +| _scstring | convert anything to a string | false | "false" | The **_scdate** is a bit specific because you can change the date format using the [**timestamp_conversion_format parameter**](sc_param.md#default-parameters) @@ -121,7 +233,9 @@ With all that information in mind, we can use the following macros "{cache.service.last_time_critical_scdate}" -- will be replaced by the service last_time_critical converted in a human readable date format "{state_type_sctype}" -- will be replaced by the service state_type in a human readable format (SOFT or HARD) "{state_scstate}" -- will be replaced by the servie state in a human readable format (OK, WARNING, CRITICAL or UNKNOWN) -"{output_short}" -- will be replaced by the first line of the service output +"{output_scshort}" -- will be replaced by the first line of the service output +"{cache.severity.service_scnumber}" -- will be replaced by 1 instead of "1" +"{acknowledged_scstring}" -- will be replaced by "false" instead of false ``` ## Module initialization @@ -296,6 +410,68 @@ result = test_macros:get_event_macro(macro, event) --> result is false, cache.host.name is in the cache table, not directly in the event table ``` +## get_group_macro method + +The **get_group_macro** method replaces a stream connector group macro by its value. + +head over the following chapters for more information + +- [Group macros](#group-macros) + +### get_group_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_group_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ------------------------------------------ | +| false | boolean | no | if the macro is not a group macro | +| value of the macro | boolean or string or number | no | the value that has been found in the event | + +### get_group_macro: example + +```lua +local macro = "{groups(hg,table,^%w+$)}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } + } +} + +local result = test_macros:get_group_macro(macro, event) +--> result is +--[[ + { + [1] = "hg2", + [2] = "hg3" + } +]] + +macro = "{groups(foo,bar,.*)}" +result = test_macros:get_group_macro(macro, event) +--> result is false, foo is not a valid group type and bar is not a valid format type +``` + ## convert_centreon_macro method The **convert_centreon_macro** method replaces all centreon macro in a string (such as $HOSTALIAS$) by its value. It will first convert it to its stream connector macro counterpart and then convert the stream connector macro to its value. @@ -517,3 +693,383 @@ event = { result = test_macros:transform_state(state, event) --> result is "DOWN" because it is a service (category 1 = neb, element 14 = host_status event) ``` + +## transform_number method + +The **transform_number** method transforms a string value into a number + +### transform_number: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| a string | string | no | | + +### transform_number: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a number | number | yes | | + +### transform_number: example + +```lua +local string_number = "0" + +local result = test_macros:transform_number(string_number) +--> result is 0 +``` + +## transform_string method + +The **transform_string** method transforms any value into a string + +### transform_string: parameters + +| parameter | type | optional | default value | +| --------- | ---- | -------- | ------------- | +| anything | any | no | | + +### transform_string: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a string | string | yes | | + +### transform_string: example + +```lua +local boolean = false + +local result = test_macros:transform_string(boolean) +--> result is "false" +``` + +## get_hg_macro method + +The **get_hg_macro** method retrieves hostgroup information and make it available as a macro + +### get_hg_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_hg_macro: returns + +| return | type | always | condition | +| ------------------------------------------------------- | ------ | ------ | --------- | +| all hostgroups | table | yes | | +| the name of the index that is linked to hostgroups name | string | yes | | + +### get_hg_macro: example + +```lua +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } + } +} + +local hostgroups, index_name = test_macros:get_hg_macro(event) +--> hostgroups is: +--[[ + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } +]] +--> index_name is: group_name +``` + +## get_sg_macro method + +The **get_sg_macro** method retrieves servicegroup information and make it available as a macro + +### get_sg_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_sg_macro: returns + +| return | type | always | condition | +| ---------------------------------------------------------- | ------ | ------ | --------- | +| all servicegroups | table | yes | | +| the name of the index that is linked to servicegroups name | string | yes | | + +### get_sg_macro: example + +```lua +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "sg_1" + }, + [2] = { + group_id = 12, + group_name = "sg2" + }, + [3] = { + group_id = 1991 + group_name = "sg3" + } + } + } +} + +local servicegroups, index_name = test_macros:get_sg_macro(event) +--> servicegroups is: +--[[ + servicegroups = { + [1] = { + group_id = 27, + group_name = "sg_1" + }, + [2] = { + group_id = 12, + group_name = "sg2" + }, + [3] = { + group_id = 1991 + group_name = "sg3" + } + } +]] +--> index_name is: group_name +``` + +## get_bv_macro method + +The **get_bv_macro** method retrieves business views information and make it available as a macro + +### get_bv_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_bv_macro: returns + +| return | type | always | condition | +| ----------------------------------------------------------- | ------ | ------ | --------- | +| all business views | table | yes | | +| the name of the index that is linked to business views name | string | yes | | + +### get_bv_macro: example + +```lua +local event = { + ba_id = 2712, + state = 0 + cache = { + bvs = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } + } +} + +local bvs, index_name = test_macros:get_bv_macro(event) +--> bvs is: +--[[ + bvs = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } +]] +--> index_name is: bv_name +``` + +## build_group_macro_value method + +The **build_group_macro_value** method builds the value that must replace the macro (it will also put it in the desired format) + +### build_group_macro_value: parameters + +| parameter | type | optional | default value | +| -------------------------------------------------------- | ------ | -------- | ------------- | +| the group data | table | no | | +| the name of the index where the group name will be found | string | no | | +| the format in which the result will be built | string | no | | +| the regex that will filter found groups | string | no | | + +### build_group_macro_value: returns + +| return | type | always | condition | +| ----------------------------------- | --------------- | ------ | ----------------------------------- | +| boolean | boolean | yes | | +| the macro value in the right format | string or table | no | only if the desired format is valid | + +### build_group_macro_value: example + +```lua +local group_data = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } +local index_name = "bv_name" +local format = "inline" +local regex = "^%w+$" + +local code, result = test_macros:build_group_macro_value(group_data, index_name, format, regex) +--> code is: true +--> result is: "bv2,bv3" + +format = "bad_format" +code, result = test_macros:build_group_macro_value(group_data, index_name, format, regex) +--> code is: false +--> result is: nil +``` + +## group_macro_format_table method + +The **group_macro_format_table** method transforms the given macro value into a table (does nothing as is) + +### group_macro_format_table: parameters + +| parameter | type | optional | default value | +| --------------- | ----- | -------- | ------------- | +| the macro value | table | no | | + +### group_macro_format_table: returns + +| return | type | always | condition | +| -------------------------- | ----- | ------ | --------- | +| the macro value as a table | table | yes | | + +### group_macro_format_table: example + +```lua +local macro_value = { + [1] = "bv2", + [2] = "bv3" +} + +local result = test_macros:group_macro_format_table(macro_value) +--> result is: +--[[ + result = { + [1] = "bv2", + [2] = "bv3" + } +]]-- +``` + +## group_macro_format_inline method + +The **group_macro_format_inline** method transforms the give macro value into a string with values separated using comas + +### group_macro_format_inline: parameters + +| parameter | type | optional | default value | +| --------------- | ----- | -------- | ------------- | +| the macro value | table | no | | + +### group_macro_format_inline: returns + +| return | type | always | condition | +| --------------------------- | ------ | ------ | --------- | +| the macro value as a string | string | yes | | + +### group_macro_format_inline: example + +```lua +local macro_value = { + [1] = "bv2", + [2] = "bv3" +} + +local result = test_macros:group_macro_format_inline(macro_value) +--> result is: "bv2,bv3" +``` + +## build_converted_string_for_cache_and_event_macro method + +The **build_converted_string_for_cache_and_event_macro** method replace a cache or event macro in a string that may contain those macros + +### build_converted_string_for_cache_and_event_macro: parameters + +| parameter | type | optional | default value | +| ---------------------------------- | ------ | -------- | ------------- | +| the macro value | any | no | | +| the macro name | string | no | | +| the string that may contain macros | string | no | | + +### build_converted_string_for_cache_and_event_macro: returns + +| return | type | always | condition | +| ----------------------------------- | ------ | ------ | --------- | +| the string with the macro converted | string | yes | | + +### build_converted_string_for_cache_and_event_macro: example + +```lua +local string_with_macros = "my cache macro {cache.host.name} +local macro_name = "{cache.host.name}" +local macro_value = "Arcadia" + +local result = test_macros:build_converted_string_for_cache_and_event_macro(macro_value, macro_name, string_with_macros) +--> result is: "my cache macro Arcadia" +``` From 173e908fb5b6a6a7ddb30b19d6707f6a4d10b917 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 11 Aug 2022 08:27:05 +0200 Subject: [PATCH 146/219] add custom code feature for stream connectors (#119) * add new custom_code_file param handling * add custom code execution * load custom code in each api v2 SC * put custom_code in the params table * fix bad logger instance name * Add custom code documentation * avoid breaking change * add new param in the doc * imrpove doc * wording * better code block * Apply suggestions from code review * typos Co-authored-by: Simon Bomm --- .../datadog/datadog-events-apiv2.lua | 6 + .../datadog/datadog-metrics-apiv2.lua | 8 +- .../elasticsearch/elastic-events-apiv2.lua | 6 + .../kafka/kafka-events-apiv2.lua | 6 + .../logstash/logstash-events-apiv2.lua | 6 + .../omi/omi_events-apiv2.lua | 6 + .../pagerduty/pagerduty-events-apiv2.lua | 6 + .../servicenow/servicenow-em-events-apiv2.lua | 6 + .../servicenow-incident-events-apiv2.lua | 6 + .../signl4/signl4-events-apiv2.lua | 6 + .../splunk/splunk-events-apiv2.lua | 6 + .../splunk/splunk-metrics-apiv2.lua | 6 + .../sc_event.lua | 5 + .../sc_params.lua | 48 ++++- stream-connectors/modules/docs/README.md | 10 + stream-connectors/modules/docs/custom_code.md | 190 ++++++++++++++++++ stream-connectors/modules/docs/sc_param.md | 1 + 17 files changed, 325 insertions(+), 3 deletions(-) create mode 100644 stream-connectors/modules/docs/custom_code.md diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index 3a60ce15e7b..af23b9aa108 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -71,6 +71,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index 44c6a781499..a2d7e3ed94c 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -75,9 +75,13 @@ function EventQueue.new(params) -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) - self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index 10b3a5e6278..79ec00b4ca8 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -67,6 +67,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 418b2539fc3..62593a7a4fb 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -78,6 +78,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index 7eb94f491fd..76a47af3d40 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -66,6 +66,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index 16c23d1a6ee..fc7a22f9c63 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -100,6 +100,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) self.sc_params:build_accepted_elements_info() + + -- only load the custom code file, not executed yet + if not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) local categories = self.sc_params.params.bbdo.categories diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 5a1ba6e314c..8d96815d1ee 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -73,6 +73,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 9ba33d61405..5a00af80279 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -76,6 +76,12 @@ function EventQueue.new (params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua index d5873edb5b6..5bc0ec83c63 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -87,6 +87,12 @@ function EventQueue.new (params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index 4bde0a224b0..c03d59d1a01 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -68,6 +68,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index b5dc8a4e0ea..9c40a8a25d4 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -66,6 +66,12 @@ function EventQueue.new(params) self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 5ec3e5de9f6..dacb0cae010 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -67,6 +67,12 @@ function EventQueue.new(params) -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index ba558658dc3..ea4815f93e6 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -75,6 +75,11 @@ function ScEvent:is_valid_event() is_valid_event = self:is_valid_bam_event() end + -- run custom code + if self.params.custom_code and type(self.params.custom_code) == "function" then + self, is_valid_event = self.params.custom_code(self) + end + return is_valid_event end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 1acc02e9404..3ff424b83bb 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -96,6 +96,9 @@ function sc_params.new(common, logger) output_line_break_replacement_character = " ", output_size_limit = "", + -- custom code parameters + custom_code_file = "", + -- time parameters local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), timestamp_conversion_format = "%Y-%m-%d %X", -- will print 2021-06-11 10:43:38 @@ -824,7 +827,7 @@ end --- load_event_format_file: load a json file which purpose is to serve as a template to format events -- @param json_string [opt] (boolean) convert template from a lua table to a json string --- @return true|false (boolean) if file is valid template file or not +-- @return true|false (boolean) if file is a valid template file or not function ScParams:load_event_format_file(json_string) -- return if there is no file configured if self.params.format_file == "" or self.params.format_file == nil then @@ -859,6 +862,49 @@ function ScParams:load_event_format_file(json_string) return true end +--- load_custom_code_file: load a custom code which purpose is to enhance stream connectors possibilities without having to edit any standard code +-- @param file (string) the file that needs to be loaded (example: /etc/centreon-broker/sc-custom-code.lua) +-- @return true|false (boolean) if file is a valid custom code file or not +function ScParams:load_custom_code_file(custom_code_file) + -- return if there is no file configured + if self.params.custom_code_file == "" or self.params.custom_code_file == nil then + return true + end + + local file = io.open(custom_code_file, "r") + + -- return false if we can't open the file + if not file then + self.logger:error("[sc_params:load_custom_code_file]: couldn't open file " + .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + return false + end + + -- get content of the file + local file_content = file:read("*a") + io.close(file) + + -- check if it returns self, true or self, false + for return_value in string.gmatch(file_content, "return (.-)\n") do + if return_value ~= "self, true" and return_value ~= "self, false" then + self.logger:error("[sc_params:load_custom_code_file]: your custom code file: " .. tostring(custom_code_file) + .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") + return false + end + end + + -- check if it is valid lua code + local custom_code, error = loadfile(custom_code_file) + + if not custom_code then + self.logger:error("[sc_params:load_custom_code_file]: custom_code_file doesn't contain valid lua code. Error is: " .. tostring(error)) + return false + end + + self.params.custom_code = custom_code + return true +end + function ScParams:build_accepted_elements_info() local categories = self.params.bbdo.categories self.params.accepted_elements_info = {} diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index da2148a952b..57ca260f126 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -12,6 +12,7 @@ - [sc_metrics methods](#sc_metrics-methods) - [google.bigquery.bigquery methods](#googlebigquerybigquery-methods) - [google.auth.oauth methods](#googleauthoauth-methods) + - [Additionnal documentations](#additionnal-documentations) ## Libraries list @@ -193,3 +194,12 @@ | create_signature | create the signature for the jwt token | [Documentation](google/auth/oauth.md#create_signature-method) | | get_access_token | get a google access token using a jwt token | [Documentation](google/auth/oauth.md#get_access_token-method) | | curl_google | use curl to get an access token | [Documentation](google/auth/oauth.md#curl_google-method) | + +## Additionnal documentations + +| Description | Link | +| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| learn how to create a custom format using a format file | [Documentation](./templating.md) | +| learn how to create custom code for your stream connector | [Documentation](./custom_code.md) | +| have a look at all the available mappings and how to use them | [Documentation](./mappings.md) | +| have a look at the event structure | [Documentation](./broker_data_structure.md) and [Documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/) | diff --git a/stream-connectors/modules/docs/custom_code.md b/stream-connectors/modules/docs/custom_code.md new file mode 100644 index 00000000000..9e74aaf5afd --- /dev/null +++ b/stream-connectors/modules/docs/custom_code.md @@ -0,0 +1,190 @@ +# Stream connectors and custom code + +- [Stream connectors and custom code](#stream-connectors-and-custom-code) + - [Introduction](#introduction) + - [When is it needed?](#when-is-it-needed) + - [How to configure your stream connector](#how-to-configure-your-stream-connector) + - [Mandatory code](#mandatory-code) + - [Available data for your custom code](#available-data-for-your-custom-code) + - [Macros, templating and custom code](#macros-templating-and-custom-code) + - [Filter events](#filter-events) + - [Use all the above chapters](#use-all-the-above-chapters) + - [Add methods from other modules](#add-methods-from-other-modules) + - [Add custom macros](#add-custom-macros) + +## Introduction + +Stream connectors offer the possibility to write custom code. The idea is to let people fully customize how their stream connector behave while still using the Centreon standard stream connector. +Thanks to this feature, you will no longer have a customized stream connector and you will not fear updating it to get access to the latest features. + +## When is it needed? + +It is needed in two cases (mostly) + +- you need more filters than the default one. For example you want to filter out hosts that do not have *notes* +- you need to add data to your event payload + +## How to configure your stream connector + +In your stream connector configuration (broker output), you can add the following option + +| option name | value | type | +| ---------------- | --------------------------------------- | ------ | +| custom_code_file | /etc/centreon-broker/my-custom-code.lua | string | + +## Mandatory code + +Your custom code must respect three rules if you want it to work. + +It must starts with + +```lua +local self = ... +``` + +It must ends with a return the self variable and a boolean followed by a new line. + +```lua +return self, true +-- new line after true +``` + +you can't do: + +```lua +-- ✘ bad, no space after the coma +return self,true +-- new line after true +``` + +nor + +```lua +-- ✘ bad, no new line after the return line +return self, true -- no new line after true +``` + +## Available data for your custom code + +Everything has been made to grant you access to all the useful information. It means that you can: + +- access the [params table](sc_param.md#default-parameters) and the parameters that are dedicated to the stream connector that you are using +- access the [event table](broker_data_structure.md) (you can also take a look at our [broker documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/)) +- access all the methods from: [event module](sc_event.md), [params module](sc_param.md), [logger module](sc_logger.md), [common module](sc_common.md), [broker module](sc_broker.md) and if you are using a metric stream connector [metrics module](sc_metrics.md) +- access all the broker daemon methods that are listed [here](https://docs.centreon.com/docs/developer/developer-broker-stream-connector/#the-broker-table) + +## Macros, templating and custom code + +Since stream connectors have been thought to be highly customizable, we have made a tool to change the data that you are sending. To do so, you use a custom format file ([documentation](templating.md)). In this file you can use macros ([documentation](sc_macros.md)). + +By using custom code you can create your own macros and it is very easy to do! Let's take a look at that. + +```lua +local self = ... + +self.event.my_custom_macro = "my custom macro value" + +return self, true +-- new line after true +``` + +Thanks to the above code, we are now able to use `{my_custom_macro}` as a new macro. And it will be replaced by the string `my custom macro value`. + +To sum up what we have seen. Just add a new entry in the `self.event` table. It is going to be the name of you custom macro and that is it. + +## Filter events + +As explained [at the beginning](#when-is-it-needed), you can add your own filters to your data. Find below a rundown of the major steps that are done when using a stream connector + +1. stream connector init (only done on cbd reload or restart) +2. filter events +3. format event +4. put event in a queue +5. send all events stored in the queue + +The second step has a set of filters but they may not be enough for you. This is where a custom code file can be useful. + +Let's keep our idea of filtering events with hosts that do not have **notes** and see what it will looks like with real code + +```lua +local self = ... + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +## Use all the above chapters + +### Add methods from other modules + +What if we start logging what our custom code does? To do so, we can use [the warning method](sc_logger.md#warning-method) + +```lua +local self = ... + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method of from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +Maybe you want a closer look at what is inside the `self.event` table. To do so, we can dump it in our logfile using [the Dumper method](sc_common.md#dumper-method) + +```lua +local self = ... + +-- we dump the event table to have a closer look to all the available data from the event itself +-- and all the things that are in the cache that we may want to use +self.sc_logger:notice("[custom_code]: self.event table data: " .. self.sc_common:dumper(self.event)) + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +### Add custom macros + +```lua +local self = ... + +-- we dump the event table to have a closer look to all the available data from the event itself +-- and all the things that are in the cache that we may want to use +self.sc_logger:notice("[custom_code]: self.event table data: " .. self.sc_common:dumper(self.event)) + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- let say we can extract the origin of our host by using the first three letters of its name +self.event.origin = string.sub(tostring(self.event.cache.host.name), 1, 3) +-- we now have a custom macro called {origin} + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index fc094af1bd7..d7888e8cb90 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -66,6 +66,7 @@ The sc_param module provides methods to help you handle parameters for your stre | timestamp_conversion_format | string | %Y-%m-%d %X | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | | send_data_test | number | 0 | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | | format_file | string | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | | +| custom_code_file | string | | Path to a file that contains your custom lua code | any | [Documentation](custom_code.md) | | proxy_address | string | | address of the proxy | | | | proxy_port | number | | port of the proxy | | | | proxy_username | string | | user for the proxy | | | From 204ed375bde5ea9af6924624625ad866d9bb093f Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 11 Aug 2022 08:36:45 +0200 Subject: [PATCH 147/219] add specfile for 3.3.0 release (#120) --- ...eon-stream-connectors-lib-3.3.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec diff --git a/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec b/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec new file mode 100644 index 00000000000..025e0cf66b3 --- /dev/null +++ b/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.3.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.3.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 7f741bb5cd1e97ef7872120daea7268fd637c5ee Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 5 Dec 2022 12:12:01 +0100 Subject: [PATCH 148/219] add canopsis stream certified stream connector + queue system enhancement (#122) * prepare canopsis stream connector * add queue metadatas * start writing http query * fix log message in datadog sc * finish canopsis stream connector * update flush class with metadata --- .../capensis/canopsis-events-apiv2.lua | 523 ++++++++++++++++++ .../datadog/datadog-events-apiv2.lua | 2 +- .../datadog/datadog-metrics-apiv2.lua | 2 +- .../sc_flush.lua | 34 +- 4 files changed, 550 insertions(+), 11 deletions(-) create mode 100644 stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua new file mode 100644 index 00000000000..0b3c0538891 --- /dev/null +++ b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua @@ -0,0 +1,523 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Canopsis Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "canopsis_user", + "canopsis_password", + "canopsis_host" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/canopsis-events.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.canopsis_user = params.canopsis_user + self.sc_params.params.canopsis_password = params.canopsis_password + self.sc_params.params.connector = params.connector or "centreon-stream" + self.sc_params.params.connector_name = params.connector_name or "centreon-stream-central" + self.sc_params.params.canopsis_event_route = params.canopsis_event_route or "/api/v2/event" + self.sc_params.params.canopsis_downtime_route = params.canopsis_downtime_route or "/api/v2/pbehavior" + self.sc_params.params.canopsis_host = params.canopsis_host + self.sc_params.params.canopsis_port = params.canopsis_port or 8082 + self.sc_params.params.sending_method = params.sending_method or "api" + self.sc_params.params.sending_protocol = params.sending_protocol or "http" + self.sc_params.params.timezone = params.timezone or "Europe/Paris" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement,downtime" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.send_mixed_events = 0 + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " + .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_flush.queues[categories.neb.id][elements.host_status.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route + self.sc_flush.queues[categories.neb.id][elements.service_status.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route + self.sc_flush.queues[categories.neb.id][elements.downtime.id].queue_metadata.event_route = self.sc_params.params.canopsis_downtime_route + self.sc_flush.queues[categories.neb.id][elements.acknowledgement.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end, + [elements.downtime.id] = function () return self:format_event_downtime() end, + [elements.acknowledgement.id] = function () return self:format_event_acknowledgement() end + }, + [categories.bam.id] = {} + } + + self.centreon_to_canopsis_state = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = 0, + [1] = 3, + [2] = 2 + }, + [elements.service_status.id] = { + [0] = 0, + [1] = 1, + [2] = 3, + [3] = 2 + } + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:list_servicegroups() + local servicegroups = {} + + for _, sg in pairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg.group_name) + end + + return servicegroups +end + +function EventQueue:list_hostgroups() + local hostgroups = {} + + for _, hg in pairs(self.sc_event.event.cache.hostgroups) do + table.insert(hostgroups, hg.group_name) + end + + return hostgroups +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "component", + connector = self.sc_params.params.connector, + connector_name = self.sc_params.params.connector_name, + component = tostring(event.cache.host.name), + resource = "", + timestamp = event.last_check, + output = event.output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + -- extra informations + hostgroups = self:list_hostgroups(), + notes_url = tostring(event.cache.host.notes_url), + action_url = tostring(event.cache.host.action_url) + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "resource", + connector = self.sc_params.params.connector, + connector_name = self.sc_params.params.connector_name, + component = tostring(event.cache.host.name), + resource = tostring(event.cache.service.description), + timestamp = event.last_check, + output = event.output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + -- extra informations + servicegroups = self:list_servicegroups(), + notes_url = event.cache.service.notes_url, + action_url = event.cache.service.action_url, + hostgroups = self:list_hostgroups() + } +end + +function EventQueue:format_event_acknowledgement() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + + self.sc_event.event.formated_event = { + event_type = "ack", + crecord_type = "ack", + author = event.author, + resource = "", + component = tostring(event.cache.host.name), + connector = self.sc_params.params.connector, + connector_name = self.sc_params.params.connector_name, + timestamp = event.entry_time, + output = event.comment_data, + origin = "centreon", + ticket = "", + state_type = 1, + ack_resources = false + } + + if event.service_id then + self.sc_event.event.formated_event['source_type'] = "resource" + self.sc_event.event.formated_event['resource'] = tostring(event.cache.service.description) + self.sc_event.event.formated_event['ref_rk'] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.service_status.id][event.state] + else + self.sc_event.event.formated_event['source_type'] = "component" + self.sc_event.event.formated_event['ref_rk'] = "undefined/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.host_status.id][event.state] + end + + -- send ackremove + if event.deletion_time then + event['event_type'] = "ackremove" + event['crecord_type'] = "ackremove" + event['timestamp'] = event.deletion_time + end +end + +function EventQueue:format_event_downtime() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + local canopsis_downtime_id = "centreon-downtime-".. event.internal_id .. "-" .. event.entry_time + + if event.cancelled then + local metadata = { + event_route = self.sc_params.params.canopsis_downtime_route .. "/" .. canopsis_downtime_id, + method = "DELETE" + } + self:send_data({}, metadata) + else + self.sc_event.event.formated_event = { + _id = canopsis_downtime_id, + author = event.author, + name = canopsis_downtime_id, + tstart = event.start_time, + tstop = event.end_time, + type_ = "Maintenance", + reason = "Autre", + timezone = self.sc_params.params.timezone, + comments = { + { + ['author'] = event.author, + ['message'] = event.comment_data + } + }, + filter = { + ['$and'] = { + { + ['_id'] = "" + } + } + }, + exdate = {}, + } + + if event.service_id then + self.sc_event.event.formated_event['filter']['$and'][1]['_id'] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + else + self.sc_event.event.formated_event['filter']['$and'][1]['_id'] = tostring(event.cache.host.name) + end + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local next = next + + if next(self.sc_event.event.formated_event) ~= nil then + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end + +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = { event } + else + payload = table.insert(payload, event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local params = self.sc_params.params + local url = params.sending_protocol .. "://" .. params.canopsis_user .. ":" .. params.canopsis_password + .. "@" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route + local data = broker.json_encode(payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. data) + self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-length: " .. string.len(data), + "content-type: application/json" + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username + .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata == "DELETE" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + else + http_request:setopt( + curl.OPT_HTTPHEADER, + { + "content-length: " .. string.len(data), + "content-type: application/json" + } + ) + http_request:setopt_postfields(data) + end + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " + .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " + .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index af23b9aa108..0273c81b40e 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -221,7 +221,7 @@ function EventQueue:send_data(payload) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) + self.sc_logger:info("[EventQueue:send_data]: Datadog address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index a2d7e3ed94c..c8bda2c8f62 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -268,7 +268,7 @@ function EventQueue:send_data(payload) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload_json)) - self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) + self.sc_logger:info("[EventQueue:send_data]: Datadog address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua index 39d7218aac5..55bf5f04f22 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -7,6 +7,7 @@ local sc_flush = {} local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") local ScFlush = {} @@ -22,6 +23,8 @@ function sc_flush.new(params, logger) self.sc_logger = sc_logger.new() end + self.sc_common = sc_common.new(self.sc_logger) + self.params = params self.last_global_flush = os.time() @@ -31,13 +34,18 @@ function sc_flush.new(params, logger) self.queues = { [categories.neb.id] = {}, [categories.storage.id] = {}, - [categories.bam.id] = {} + [categories.bam.id] = {}, + global_queues_metadata = {} } -- link events queues to their respective categories and elements for element_name, element_info in pairs(self.params.accepted_elements_info) do self.queues[element_info.category_id][element_info.element_id] = { - events = {} + events = {}, + queue_metadata = { + category_id = element_info.category_id, + element_id = element_info.element_id + } } end @@ -104,7 +112,7 @@ function ScFlush:flush_mixed_payload(build_payload_method, send_method) -- send events if max buffer size is reached if counter >= self.params.max_buffer_size then - if not self:flush_payload(send_method, payload) then + if not self:flush_payload(send_method, payload, self.queues.global_queues_metadata) then return false end @@ -116,7 +124,7 @@ function ScFlush:flush_mixed_payload(build_payload_method, send_method) end -- we need to empty all queues to not mess with broker retention - if not self:flush_payload(send_method, payload) then + if not self:flush_payload(send_method, payload, self.queues.global_queues_metadata) then return false end @@ -137,10 +145,14 @@ function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) -- add event to the payload payload = build_payload_method(payload, event) counter = counter + 1 - + -- send events if max buffer size is reached if counter >= self.params.max_buffer_size then - if not self:flush_payload(send_method, payload) then + if not self:flush_payload( + send_method, + payload, + self.queues[element_info.category_id][element_info.element_id].queue_metadata + ) then return false end @@ -151,7 +163,11 @@ function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) end -- make sure there are no events left inside a specific queue - if not self:flush_payload(send_method, payload) then + if not self:flush_payload( + send_method, + payload, + self.queues[element_info.category_id][element_info.element_id].queue_metadata + ) then return false end @@ -164,9 +180,9 @@ end --- flush_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) -- @return boolean (boolean) true or false depending on the success of the operation -function ScFlush:flush_payload(send_method, payload) +function ScFlush:flush_payload(send_method, payload, metadata) if payload then - if not send_method(payload) then + if not send_method(payload, metadata) then return false end end From 0331ee6fdf56d70576e3d8acdbb75c67034c0f10 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 5 Dec 2022 12:29:14 +0100 Subject: [PATCH 149/219] add specfile 3.4.0-1 (#123) --- ...eon-stream-connectors-lib-3.4.0-1.rockspec | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec diff --git a/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec b/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec new file mode 100644 index 00000000000..2e2852941e3 --- /dev/null +++ b/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.4.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.4.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From f7dca9162888368d71dd1816920e82996dc63ee5 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 5 Jan 2023 16:57:15 +0100 Subject: [PATCH 150/219] fix ack remove and dt cache and more sc options (#124) --- .../capensis/canopsis-events-apiv2.lua | 22 +++++++++++++++---- .../sc_event.lua | 4 ++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua index 0b3c0538891..615ace2896e 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua @@ -61,6 +61,7 @@ function EventQueue.new(params) self.sc_params.params.canopsis_user = params.canopsis_user self.sc_params.params.canopsis_password = params.canopsis_password self.sc_params.params.connector = params.connector or "centreon-stream" + self.sc_params.params.connector_name_type = params.connector_name_type or "poller" self.sc_params.params.connector_name = params.connector_name or "centreon-stream-central" self.sc_params.params.canopsis_event_route = params.canopsis_event_route or "/api/v2/event" self.sc_params.params.canopsis_downtime_route = params.canopsis_downtime_route or "/api/v2/pbehavior" @@ -76,6 +77,10 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() self.sc_params.params.send_mixed_events = 0 + + if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then + self.sc_params.params.connector_name_type = "poller" + end self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) @@ -187,6 +192,15 @@ function EventQueue:list_hostgroups() return hostgroups end +function EventQueue:get_connector_name() + -- use poller name as a connector name + if self.sc_params.params.connector_name_type == "poller" then + return tostring(self.sc_event.event.cache.poller) + end + + return tostring(self.sc_params.params.connector_name) +end + function EventQueue:format_event_host() local event = self.sc_event.event @@ -194,7 +208,7 @@ function EventQueue:format_event_host() event_type = "check", source_type = "component", connector = self.sc_params.params.connector, - connector_name = self.sc_params.params.connector_name, + connector_name = self:get_connector_name(), component = tostring(event.cache.host.name), resource = "", timestamp = event.last_check, @@ -214,7 +228,7 @@ function EventQueue:format_event_service() event_type = "check", source_type = "resource", connector = self.sc_params.params.connector, - connector_name = self.sc_params.params.connector_name, + connector_name = self:get_connector_name(), component = tostring(event.cache.host.name), resource = tostring(event.cache.service.description), timestamp = event.last_check, @@ -239,7 +253,7 @@ function EventQueue:format_event_acknowledgement() resource = "", component = tostring(event.cache.host.name), connector = self.sc_params.params.connector, - connector_name = self.sc_params.params.connector_name, + connector_name = self:get_connector_name(), timestamp = event.entry_time, output = event.comment_data, origin = "centreon", @@ -275,7 +289,7 @@ function EventQueue:format_event_downtime() local elements = self.sc_params.params.bbdo.elements local canopsis_downtime_id = "centreon-downtime-".. event.internal_id .. "-" .. event.entry_time - if event.cancelled then + if event.cancelled or event.deletion_time then local metadata = { event_route = self.sc_params.params.canopsis_downtime_route .. "/" .. canopsis_downtime_id, method = "DELETE" diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index ea4815f93e6..f924e562435 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -991,8 +991,8 @@ end function ScEvent:get_downtime_service_status() -- if cache is not filled we can't get the state of the service if - not self.event.cache.host.last_time_ok - or not self.event.cache.host.last_time_warning + not self.event.cache.service.last_time_ok + or not self.event.cache.service.last_time_warning or not self.event.cache.service.last_time_critical or not self.event.cache.service.last_time_unknown then From 32a481b211202b717dca90c4ec1f7587b9af8864 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 12 Jan 2023 17:09:48 +0100 Subject: [PATCH 151/219] fix ack remove dt remove and dt send (#126) * fix ack remove dt remove and dt send * force buffer size to 1 --- .../capensis/canopsis-events-apiv2.lua | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua index 615ace2896e..64dac21b777 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua @@ -44,7 +44,7 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/canopsis-events.log" - local log_level = params.log_level or 2 + local log_level = params.log_level or 1 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) @@ -77,6 +77,7 @@ function EventQueue.new(params) self.sc_params:param_override(params) self.sc_params:check_params() self.sc_params.params.send_mixed_events = 0 + self.sc_params.params.max_buffer_size = 1 if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then self.sc_params.params.connector_name_type = "poller" @@ -278,9 +279,9 @@ function EventQueue:format_event_acknowledgement() -- send ackremove if event.deletion_time then - event['event_type'] = "ackremove" - event['crecord_type'] = "ackremove" - event['timestamp'] = event.deletion_time + self.sc_event.event.formated_event['event_type'] = "ackremove" + self.sc_event.event.formated_event['crecord_type'] = "ackremove" + self.sc_event.event.formated_event['timestamp'] = event.deletion_time end end @@ -360,7 +361,7 @@ end -------------------------------------------------------------------------------- function EventQueue:build_payload(payload, event) if not payload then - payload = { event } + payload = event else payload = table.insert(payload, event) end @@ -423,7 +424,7 @@ function EventQueue:send_data(payload, queue_metadata) end -- adding the HTTP POST data - if queue_metadata.method and queue_metadata == "DELETE" then + if queue_metadata.method and queue_metadata.method == "DELETE" then http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) else http_request:setopt( From 41e97a761736772568ec9cbe54a37b189ef067ad Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 23 Jan 2023 09:32:51 +0100 Subject: [PATCH 152/219] Flapping and canopsis4 (#127) * handle flapping * update event doc with flapping * add flapping parameter * add documentation for flapping param * add new methods in the readme * rename canopsis and handle duplicate dt edge case * add canopsis api v4 sc * improve queue metadata system * fix queue metadata system * use authkey instead of basic auth * update the flush class documentation * add queue metadata system to all apiv2 sc * improve logs * fix markdown * add add_queue_metadata method documentation * put all sc_flush method in the readme index * fix code comment * add spec file for 3.5.0 --- ...s-apiv2.lua => canopsis2-events-apiv2.lua} | 17 +- .../capensis/canopsis4-events-apiv2.lua | 549 ++++++++++++++++++ .../datadog/datadog-events-apiv2.lua | 4 +- .../datadog/datadog-metrics-apiv2.lua | 4 +- .../elasticsearch/elastic-events-apiv2.lua | 4 +- .../kafka/kafka-events-apiv2.lua | 4 +- .../logstash/logstash-events-apiv2.lua | 4 +- .../omi/omi_events-apiv2.lua | 6 +- .../pagerduty/pagerduty-events-apiv2.lua | 4 +- .../servicenow/servicenow-em-events-apiv2.lua | 6 +- .../servicenow-incident-events-apiv2.lua | 6 +- .../signl4/signl4-events-apiv2.lua | 4 +- .../splunk/splunk-events-apiv2.lua | 4 +- .../splunk/splunk-metrics-apiv2.lua | 4 +- .../sc_event.lua | 25 +- .../sc_flush.lua | 31 +- .../sc_params.lua | 2 + stream-connectors/modules/docs/README.md | 31 +- stream-connectors/modules/docs/sc_event.md | 269 +++++---- stream-connectors/modules/docs/sc_flush.md | 304 +++++++--- stream-connectors/modules/docs/sc_param.md | 39 +- ...eon-stream-connectors-lib-3.5.0-1.rockspec | 39 ++ 22 files changed, 1099 insertions(+), 261 deletions(-) rename stream-connectors/centreon-certified/capensis/{canopsis-events-apiv2.lua => canopsis2-events-apiv2.lua} (95%) create mode 100644 stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua mode change 100755 => 100644 stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua create mode 100644 stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec diff --git a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua similarity index 95% rename from stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua rename to stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index 64dac21b777..dbc29636fd3 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -43,7 +43,7 @@ function EventQueue.new(params) self.fail = false -- set up log configuration - local logfile = params.logfile or "/var/log/centreon-broker/canopsis-events.log" + local logfile = params.logfile or "/var/log/centreon-broker/canopsis2-events.log" local log_level = params.log_level or 1 -- initiate mandatory objects @@ -100,10 +100,10 @@ function EventQueue.new(params) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - self.sc_flush.queues[categories.neb.id][elements.host_status.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route - self.sc_flush.queues[categories.neb.id][elements.service_status.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route - self.sc_flush.queues[categories.neb.id][elements.downtime.id].queue_metadata.event_route = self.sc_params.params.canopsis_downtime_route - self.sc_flush.queues[categories.neb.id][elements.acknowledgement.id].queue_metadata.event_route = self.sc_params.params.canopsis_event_route + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) self.format_event = { [categories.neb.id] = { @@ -447,11 +447,16 @@ function EventQueue:send_data(payload, queue_metadata) -- Handling the return code local retval = false - -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 200 then self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) retval = true + elseif http_response_code == 400 and string.match(http_response_body, "Trying to insert PBehavior with already existing _id") then + self.sc_logger:notice("[EventQueue:send_data]: Ignoring downtime with id: " .. tostring(payload._id) + .. ". Canopsis result: " .. tostring(http_response_body)) + self.sc_logger:info("[EventQueue:send_data]: duplicated downtime event: " .. tostring(data)) + retval = true else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua new file mode 100644 index 00000000000..3f62bda0096 --- /dev/null +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -0,0 +1,549 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Canopsis Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "params.canopsis_authkey", + "canopsis_host" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/canopsis4-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.canopsis_authkey = params.canopsis_authkey + self.sc_params.params.connector = params.connector or "centreon-stream" + self.sc_params.params.connector_name_type = params.connector_name_type or "poller" + self.sc_params.params.connector_name = params.connector_name or "centreon-stream-central" + self.sc_params.params.canopsis_event_route = params.canopsis_event_route or "/api/v4/event" + self.sc_params.params.canopsis_downtime_route = params.canopsis_downtime_route or "/api/v4/bulk/pbehavior" + self.sc_params.params.canopsis_host = params.canopsis_host + self.sc_params.params.canopsis_port = params.canopsis_port or 8082 + self.sc_params.params.sending_method = params.sending_method or "api" + self.sc_params.params.sending_protocol = params.sending_protocol or "http" + self.sc_params.params.timezone = params.timezone or "Europe/Paris" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.send_mixed_events = 0 + + if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then + self.sc_params.params.connector_name_type = "poller" + end + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " + .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadatas(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end, + [elements.downtime.id] = function () return self:format_event_downtime() end, + [elements.acknowledgement.id] = function () return self:format_event_acknowledgement() end + }, + [categories.bam.id] = {} + } + + self.centreon_to_canopsis_state = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = 0, + [1] = 3, + [2] = 2 + }, + [elements.service_status.id] = { + [0] = 0, + [1] = 1, + [2] = 3, + [3] = 2 + } + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:list_servicegroups() + local servicegroups = {} + + for _, sg in pairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg.group_name) + end + + return servicegroups +end + +function EventQueue:list_hostgroups() + local hostgroups = {} + + for _, hg in pairs(self.sc_event.event.cache.hostgroups) do + table.insert(hostgroups, hg.group_name) + end + + return hostgroups +end + +function EventQueue:get_connector_name() + -- use poller name as a connector name + if self.sc_params.params.connector_name_type == "poller" then + return tostring(self.sc_event.event.cache.poller) + end + + return tostring(self.sc_params.params.connector_name) +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "component", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = "", + output = event.short_output, + long_output = event.long_output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + timestamp = event.last_check + -- extra informations no longer exists with canopsis api v4 ? + -- hostgroups = self:list_hostgroups(), + -- notes_url = tostring(event.cache.host.notes_url), + -- action_url = tostring(event.cache.host.action_url) + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "resource", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = tostring(event.cache.service.description), + output = event.short_output, + long_output = event.long_output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + timestamp = event.last_check + -- extra informations + -- servicegroups = self:list_servicegroups(), + -- notes_url = event.cache.service.notes_url, + -- action_url = event.cache.service.action_url, + -- hostgroups = self:list_hostgroups() + } +end + +function EventQueue:format_event_acknowledgement() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + + self.sc_event.event.formated_event = { + event_type = "ack", + author = event.author, + resource = "", + component = tostring(event.cache.host.name), + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + timestamp = event.entry_time, + output = event.comment_data, + long_output = event.comment_data + -- no available in api v4 ? + -- crecord_type = "ack", + -- origin = "centreon", + -- ticket = "", + -- state_type = 1, + -- ack_resources = false + } + + if event.service_id then + self.sc_event.event.formated_event['source_type'] = "resource" + self.sc_event.event.formated_event['resource'] = tostring(event.cache.service.description) + -- only with v2 api ? + -- self.sc_event.event.formated_event['ref_rk'] = tostring(event.cache.service.description) + -- .. "/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.service_status.id][event.state] + else + self.sc_event.event.formated_event['source_type'] = "component" + -- only with v2 api ? + -- self.sc_event.event.formated_event['ref_rk'] = "undefined/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.host_status.id][event.state] + end + + -- send ackremove + if event.deletion_time then + self.sc_event.event.formated_event['event_type'] = "ackremove" + -- only with v2 api ? + -- self.sc_event.event.formated_event['crecord_type'] = "ackremove" + -- self.sc_event.event.formated_event['timestamp'] = event.deletion_time + end +end + +function EventQueue:format_event_downtime() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + local downtime_name = "centreon-downtime-" .. event.internal_id .. "-" .. event.entry_time + + if event.cancelled or event.deletion_time then + local metadata = { + method = "DELETE", + event_route = "/api/v4/pbehaviors" + } + self:send_data({name = downtime_name}, metadata) + else + self.sc_event.event.formated_event = { + -- _id = canopsis_downtime_id, + author = event.author, + name = downtime_name, + tstart = event.start_time, + tstop = event.end_time, + type = "Maintenance", + reason = "Other", + timezone = self.sc_params.params.timezone, + comments = { + { + ['author'] = event.author, + ['message'] = event.comment_data + } + }, + entity_pattern = { + { + { + field = "name", + cond = { + type = "eq" + } + } + } + }, + exdates = {} + } + + if event.service_id then + self.sc_event.event.formated_event["entity_pattern"][0][0]["cond"]["value"] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + else + self.sc_event.event.formated_event["entity_pattern"][0][0]["cond"]["value"] = tostring(event.cache.host.name) + end + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local next = next + + if next(self.sc_event.event.formated_event) ~= nil then + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = {event} + else + payload = table.insert(payload, event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local params = self.sc_params.params + local url = params.sending_protocol .. "://" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route + local data = broker.json_encode(payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. data) + self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-length: " .. string.len(data), + "content-type: application/json", + "x-canopsis-authkey: " .. tostring(self.sc_params.params.canopsis_authkey) + } + ) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username + .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata.method == "DELETE" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + end + + http_request:setopt( + curl.OPT_HTTPHEADER, + { + "content-length: " .. string.len(data), + "content-type: application/json" + } + ) + http_request:setopt_postfields(data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " + .. tostring(http_response_code)) + retval = true + elseif http_response_code == 400 and string.match(http_response_body, "Trying to insert PBehavior with already existing _id") then + self.sc_logger:notice("[EventQueue:send_data]: Ignoring downtime with id: " .. tostring(payload._id) + .. ". Canopsis result: " .. tostring(http_response_body)) + self.sc_logger:info("[EventQueue:send_data]: duplicated downtime event: " .. tostring(data)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " + .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index 0273c81b40e..9292723c8e2 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -108,7 +108,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -209,7 +209,7 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local url = self.sc_params.params.http_server_url .. self.sc_params.params.datadog_event_endpoint diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index c8bda2c8f62..51516aece65 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -103,7 +103,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -255,7 +255,7 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local url = self.sc_params.params.http_server_url .. tostring(self.sc_params.params.datadog_metric_endpoint) diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index 79ec00b4ca8..780fced21a8 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -88,7 +88,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -197,7 +197,7 @@ function EventQueue:format_accepted_event() return payload end - function EventQueue:send_data(payload) + function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 62593a7a4fb..c23e3e817fd 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -99,7 +99,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -208,7 +208,7 @@ end -- EventQueue:send_data, send data to external tool -- @return (boolean) -------------------------------------------------------------------------------- -function EventQueue:send_data (payload) +function EventQueue:send_data(payload, queue_metadata) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index 76a47af3d40..ee96be5b269 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -87,7 +87,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -190,7 +190,7 @@ function EventQueue:build_payload(payload, event) end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index fc7a22f9c63..b4d9cb123e0 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -119,7 +119,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -226,13 +226,13 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) - return true + return true end self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 8d96815d1ee..fc2f2353807 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -94,7 +94,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -330,7 +330,7 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 5a00af80279..ef15ed19148 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -97,7 +97,7 @@ function EventQueue.new (params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -217,7 +217,7 @@ end -- @return {array} decoded output -- @throw exception if http call fails or response is empty -------------------------------------------------------------------------------- -function EventQueue:call (url, method, data, authToken) +function EventQueue:call(url, method, data, authToken) method = method or "GET" data = data or nil authToken = authToken or nil @@ -412,7 +412,7 @@ end -- EventQueue:send_data, send data to external tool -- @return {boolean} -------------------------------------------------------------------------------- -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) local authToken local counter = 0 diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua index 5bc0ec83c63..3c074e62d4a 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -108,7 +108,7 @@ function EventQueue.new (params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -227,7 +227,7 @@ end -- @return {array} decoded output -- @throw exception if http call fails or response is empty -------------------------------------------------------------------------------- -function EventQueue:call (url, method, data, authToken) +function EventQueue:call(url, method, data, authToken) method = method or "GET" data = data or nil authToken = authToken or nil @@ -413,7 +413,7 @@ end -- EventQueue:send_data, send data to external tool -- @return {boolean} -------------------------------------------------------------------------------- -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) local authToken local counter = 0 diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index c03d59d1a01..79b677bbc67 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -89,7 +89,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -200,7 +200,7 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua old mode 100755 new mode 100644 index 9c40a8a25d4..66ac3591b71 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -87,7 +87,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -192,7 +192,7 @@ function EventQueue:build_payload(payload, event) end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index dacb0cae010..b4b4cb6ecae 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -95,7 +95,7 @@ function EventQueue.new(params) } self.send_data_method = { - [1] = function (payload) return self:send_data(payload) end + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } self.build_payload_method = { @@ -248,7 +248,7 @@ function EventQueue:build_payload(payload, event) return payload end -function EventQueue:send_data(payload) +function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") -- write payload in the logfile for test purpose diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index f924e562435..063697f2404 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -125,7 +125,7 @@ function ScEvent:is_valid_host_status_event() return false end - -- return false if one of event ack, downtime or state type (hard soft) aren't valid + -- return false if one of event ack, downtime, state type (hard soft) or flapping aren't valid if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in a validated downtime, ack or hard/soft state") return false @@ -192,7 +192,7 @@ function ScEvent:is_valid_service_status_event() return false end - -- return false if one of event ack, downtime or state type (hard soft) aren't valid + -- return false if one of event ack, downtime, state type (hard soft) or flapping aren't valid if not self:is_valid_event_states() then self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in a validated downtime, ack or hard/soft state") return false @@ -331,6 +331,11 @@ function ScEvent:is_valid_event_states() return false end + -- return false if flapping state is not valid + if not self:is_valid_event_flapping_state() then + return false + end + return true end @@ -399,7 +404,7 @@ function ScEvent:is_valid_event_acknowledge_state() end if not self.sc_common:compare_numbers(self.params.acknowledged, self.sc_common:boolean_to_number(self.event.acknowledged), ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be above or equal to " .. tostring(self.params.acknowledged) + self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be below or equal to " .. tostring(self.params.acknowledged) .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) return false end @@ -416,7 +421,7 @@ function ScEvent:is_valid_event_downtime_state() end if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then - self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be above or equal to " .. tostring(self.params.in_downtime) + self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be below or equal to " .. tostring(self.params.in_downtime) .. ". Current downtime state: " .. tostring(self.sc_common:boolean_to_number(self.event.scheduled_downtime_depth))) return false end @@ -424,6 +429,18 @@ function ScEvent:is_valid_event_downtime_state() return true end +--- is_valid_event_flapping_state: check if the event is in an accepted flapping state +-- @return true|false (boolean) +function ScEvent:is_valid_event_flapping_state() + if not self.sc_common:compare_numbers(self.params.flapping, self.sc_common:boolean_to_number(self.event.flapping), ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_flapping_state]: event is not in an valid flapping state. Event flapping state must be below or equal to " .. tostring(self.params.flapping) + .. ". Current flapping state: " .. tostring(self.sc_common:boolean_to_number(self.event.flapping))) + return false + end + + return true +end + --- is_valid_hostgroup: check if the event is in an accepted hostgroup -- @return true|false (boolean) function ScEvent:is_valid_hostgroup() diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua index 55bf5f04f22..223bf2aae17 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -53,6 +53,32 @@ function sc_flush.new(params, logger) return self end +--- add_queue_metadata: add specific metadata to a queue +-- @param category_id (number) the id of the bbdo category +-- @param element_id (number) the id of the bbdo element +-- @param metadata (table) a table with keys that are the name of the metadata and values the metadata values +function ScFlush:add_queue_metadata(category_id, element_id, metadata) + if not self.queues[category_id] then + self.sc_logger:warning("[ScFlush:add_queue_metadata]: can't add queue metadata for category: " .. self.params.reverse_category_mapping[category_id] + .. " (id: " .. category_id .. ") and element: " .. self.params.reverse_element_mapping[category_id][element_id] .. " (id: " .. element_id .. ")." + .. ". metadata name: " .. tostring(metadata_name) .. ", metadata value: " .. tostring(metadata_value) + .. ". You need to accept this category with the parameter 'accepted_categories'.") + return + end + + if not self.queues[category_id][element_id] then + self.sc_logger:warning("[ScFlush:add_queue_metadata]: can't add queue metadata for category: " .. self.params.reverse_category_mapping[category_id] + .. " (id: " .. category_id .. ") and element: " .. self.params.reverse_element_mapping[category_id][element_id] .. " (id: " .. element_id .. ")." + .. ". metadata name: " .. tostring(metadata_name) .. ", metadata value: " .. tostring(metadata_value) + .. ". You need to accept this element with the parameter 'accepted_elements'.") + return + end + + for metadata_name, metadata_value in pairs(metadata) do + self.queues[category_id][element_id].queue_metadata[metadata_name] = metadata_value + end +end + --- flush_all_queues: tries to flush all queues according to accepted elements -- @param build_payload_method (function) the function from the stream connector that will concatenate events in the payload -- @param send_method (function) the function from the stream connector that will send the data to the wanted tool @@ -178,7 +204,10 @@ function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) return true end ---- flush_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) +--- flush_payload: flush a given payload by sending it using the given send function +-- @param send_method (function) the function that will be used to send the payload +-- @param payload (any) the data that needs to be sent +-- @param metadata (table) all metadata for the payload -- @return boolean (boolean) true or false depending on the success of the operation function ScFlush:flush_payload(send_method, payload, metadata) if payload then diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 3ff424b83bb..c45403582b1 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -53,6 +53,7 @@ function sc_params.new(common, logger) hard_only = 1, acknowledged = 0, in_downtime = 0, + flapping = 0, -- objects filter accepted_hostgroups = "", @@ -757,6 +758,7 @@ function ScParams:check_params() self.params.hard_only = self.common:check_boolean_number_option_syntax(self.params.hard_only, 1) self.params.acknowledged = self.common:check_boolean_number_option_syntax(self.params.acknowledged, 0) self.params.in_downtime = self.common:check_boolean_number_option_syntax(self.params.in_downtime, 0) + self.params.flapping = self.common:check_boolean_number_option_syntax(self.params.flapping, 0) self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) self.params.accepted_authors = self.common:if_wrong_type(self.params.accepted_authors, "string", "") diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 57ca260f126..41ea39467cc 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -2,14 +2,14 @@ - [Stream Connectors lib documentation](#stream-connectors-lib-documentation) - [Libraries list](#libraries-list) - - [sc_common methods](#sc_common-methods) - - [sc_logger methods](#sc_logger-methods) - - [sc_broker methods](#sc_broker-methods) - - [sc_param methods](#sc_param-methods) - - [sc_event methods](#sc_event-methods) - - [sc_macros methods](#sc_macros-methods) - - [sc_flush methods](#sc_flush-methods) - - [sc_metrics methods](#sc_metrics-methods) + - [sc\_common methods](#sc_common-methods) + - [sc\_logger methods](#sc_logger-methods) + - [sc\_broker methods](#sc_broker-methods) + - [sc\_param methods](#sc_param-methods) + - [sc\_event methods](#sc_event-methods) + - [sc\_macros methods](#sc_macros-methods) + - [sc\_flush methods](#sc_flush-methods) + - [sc\_metrics methods](#sc_metrics-methods) - [google.bigquery.bigquery methods](#googlebigquerybigquery-methods) - [google.auth.oauth methods](#googleauthoauth-methods) - [Additionnal documentations](#additionnal-documentations) @@ -101,6 +101,7 @@ | is_valid_event_state_type | check if the state (HARD/SOFT) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_state_type-method) | | is_valid_event_acknowledge_state | check if the acknowledgement state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_acknowledge_state-method) | | is_valid_event_downtime_state | check if the downtime state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_downtime_state-method) | +| is_valid_event_flapping_state | check if the flapping state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_flapping_state-method) | | is_valid_hostgroup | check if the host is in an accepted hostgroup according to the stream connector params | [Documentation](sc_event.md#is_valid_hostgroup-method) | | find_hostgroup_in_list | check if one of the hostgroups of the event is in the list of accepted hostgroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_hostgroup_in_list-method) | | is_valid_servicegroup | check if the service is in an accepted servicegroup according to the stream connector params | [Documentation](sc_event.md#is_valid_servicegroup-method) | @@ -153,11 +154,15 @@ ## sc_flush methods -| Method name | Method description | Link | -| ---------------- | ------------------------------------------------- | ---------------------------------------------------- | -| flush_all_queues | flush all the possible queues that can be created | [Documentation](sc_flush.md#flush_all_queues-method) | -| flush_queue | flush a specific queue | [Documentation](sc_flush.md#flush_queue-method) | -| reset_queue | reset a queue after it has been flushed | [Documentation](sc_flush.md#reset_queue-method) | +| Method name | Method description | Link | +| ------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------------- | +| add_queue_metadata | add specific metadata to a queue | [Documentation](sc_flush.md#add_queue_metadata-method) | +| flush_all_queues | try to flush all queues according to accepted elements | [Documentation](sc_flush.md#flush_all_queues-method) | +| reset_all_queues | put all queues back to their initial state after flushing their events | [Documentation](sc_flush.md#reset_all_queues-method) | +| get_queues_size | get the number of events stored in all the queues | [Documentation](sc_flush.md#get_queues_size-method) | +| flush_mixed_payload | flush a payload that contains various type of events | [Documentation](sc_flush.md#flush_mixed_payload-method) | +| flush_homogeneous_payload | flush a payload that contains a single type of events | [Documentation](sc_flush.md#flush_homogeneous_payload-method) | +| flush_payload | flush a payload | [Documentation](sc_flush.md#flush_payload-method) | ## sc_metrics methods diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 514027a19de..3fbc7ff09a5 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -1,128 +1,131 @@ # Documentation of the sc_param module -- [Documentation of the sc_param module](#documentation-of-the-sc_param-module) +- [Documentation of the sc\_param module](#documentation-of-the-sc_param-module) - [Introduction](#introduction) - [Module initialization](#module-initialization) - [module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [is_valid_category method](#is_valid_category-method) - - [is_valid_category: returns](#is_valid_category-returns) - - [is_valid_category: example](#is_valid_category-example) - - [is_valid_element method](#is_valid_element-method) - - [is_valid_element: returns](#is_valid_element-returns) - - [is_valid_element: example](#is_valid_element-example) - - [is_valid_event method](#is_valid_event-method) - - [is_valid_event: returns](#is_valid_event-returns) - - [is_valid_event: example](#is_valid_event-example) - - [is_valid_neb_event method](#is_valid_neb_event-method) - - [is_valid_neb_event: returns](#is_valid_neb_event-returns) - - [is_valid_neb_event: example](#is_valid_neb_event-example) - - [is_valid_host_status_event method](#is_valid_host_status_event-method) - - [is_valid_host_status_event: returns](#is_valid_host_status_event-returns) - - [is_valid_host_status_event: example](#is_valid_host_status_event-example) - - [is_valid_service_status_event method](#is_valid_service_status_event-method) - - [is_valid_service_status_event: returns](#is_valid_service_status_event-returns) - - [is_valid_service_status_event: example](#is_valid_service_status_event-example) - - [is_valid_host method](#is_valid_host-method) - - [is_valid_host: returns](#is_valid_host-returns) - - [is_valid_host: example](#is_valid_host-example) - - [is_valid_service method](#is_valid_service-method) - - [is_valid_service: returns](#is_valid_service-returns) - - [is_valid_service: example](#is_valid_service-example) - - [is_valid_event_states method](#is_valid_event_states-method) - - [is_valid_event_states: returns](#is_valid_event_states-returns) - - [is_valid_event_states: example](#is_valid_event_states-example) - - [is_valid_event_status method](#is_valid_event_status-method) - - [is_valid_event_status: parameters](#is_valid_event_status-parameters) - - [is_valid_event_status: returns](#is_valid_event_status-returns) - - [is_valid_event_status: example](#is_valid_event_status-example) - - [is_valid_event_state_type method](#is_valid_event_state_type-method) - - [is_valid_event_state_type: returns](#is_valid_event_state_type-returns) - - [is_valid_event_state_type: example](#is_valid_event_state_type-example) - - [is_valid_event_acknowledge_state method](#is_valid_event_acknowledge_state-method) - - [is_valid_event_acknowledge_state: returns](#is_valid_event_acknowledge_state-returns) - - [is_valid_event_acknowledge_state: example](#is_valid_event_acknowledge_state-example) - - [is_valid_event_downtime_state method](#is_valid_event_downtime_state-method) - - [is_valid_event_downtime_state: returns](#is_valid_event_downtime_state-returns) - - [is_valid_event_downtime_state: example](#is_valid_event_downtime_state-example) - - [is_valid_hostgroup method](#is_valid_hostgroup-method) - - [is_valid_hostgroup: returns](#is_valid_hostgroup-returns) - - [is_valid_hostgroup: example](#is_valid_hostgroup-example) - - [is_valid_servicegroup method](#is_valid_servicegroup-method) - - [is_valid_servicegroup: returns](#is_valid_servicegroup-returns) - - [is_valid_servicegroup: example](#is_valid_servicegroup-example) - - [is_valid_bam_event method](#is_valid_bam_event-method) - - [is_valid_bam_event: returns](#is_valid_bam_event-returns) - - [is_valid_bam_event: example](#is_valid_bam_event-example) - - [is_valid_ba method](#is_valid_ba-method) - - [is_valid_ba: returns](#is_valid_ba-returns) - - [is_valid_ba: example](#is_valid_ba-example) - - [is_valid_ba_status_event method](#is_valid_ba_status_event-method) - - [is_valid_ba_status_event: returns](#is_valid_ba_status_event-returns) - - [is_valid_ba_status_event: example](#is_valid_ba_status_event-example) - - [is_valid_ba_downtime_state method](#is_valid_ba_downtime_state-method) - - [is_valid_ba_downtime_state: returns](#is_valid_ba_downtime_state-returns) - - [is_valid_ba_downtime_state: example](#is_valid_ba_downtime_state-example) - - [is_valid_ba_acknowledge_state method](#is_valid_ba_acknowledge_state-method) - - [is_valid_ba_acknowledge_state: returns](#is_valid_ba_acknowledge_state-returns) - - [is_valid_ba_acknowledge_state: example](#is_valid_ba_acknowledge_state-example) - - [is_valid_bv method](#is_valid_bv-method) - - [is_valid_bv: returns](#is_valid_bv-returns) - - [is_valid_bv: example](#is_valid_bv-example) - - [find_hostgroup_in_list method](#find_hostgroup_in_list-method) - - [find_hostgroup_in_list: returns](#find_hostgroup_in_list-returns) - - [find_hostgroup_in_list: example](#find_hostgroup_in_list-example) - - [find_servicegroup_in_list method](#find_servicegroup_in_list-method) - - [find_servicegroup_in_list: returns](#find_servicegroup_in_list-returns) - - [find_servicegroup_in_list: example](#find_servicegroup_in_list-example) - - [find_bv_in_list method](#find_bv_in_list-method) - - [find_bv_in_list: returns](#find_bv_in_list-returns) - - [find_bv_in_list: example](#find_bv_in_list-example) - - [is_valid_poller method](#is_valid_poller-method) - - [is_valid_poller: returns](#is_valid_poller-returns) - - [is_valid_poller: example](#is_valid_poller-example) - - [find_poller_in_list method](#find_poller_in_list-method) - - [find_poller_in_list: returns](#find_poller_in_list-returns) - - [find_poller_in_list: example](#find_poller_in_list-example) - - [is_valid_host_severity method](#is_valid_host_severity-method) - - [is_valid_host_severity: returns](#is_valid_host_severity-returns) - - [is_valid_host_severity: example](#is_valid_host_severity-example) - - [is_valid_service_severity method](#is_valid_service_severity-method) - - [is_valid_service_severity: returns](#is_valid_service_severity-returns) - - [is_valid_service_severity: example](#is_valid_service_severity-example) - - [is_valid_acknowledgement_event method](#is_valid_acknowledgement_event-method) - - [is_valid_acknowledgement_event: returns](#is_valid_acknowledgement_event-returns) - - [is_valid_acknowledgement_event: example](#is_valid_acknowledgement_event-example) - - [is_host_status_event_duplicated method](#is_host_status_event_duplicated-method) - - [is_host_status_event_duplicated: returns](#is_host_status_event_duplicated-returns) - - [is_host_status_event_duplicated: example](#is_host_status_event_duplicated-example) - - [is_service_status_event_duplicated method](#is_service_status_event_duplicated-method) - - [is_service_status_event_duplicated: returns](#is_service_status_event_duplicated-returns) - - [is_service_status_event_duplicated: example](#is_service_status_event_duplicated-example) - - [is_valid_downtime_event method](#is_valid_downtime_event-method) - - [is_valid_downtime_event: returns](#is_valid_downtime_event-returns) - - [is_valid_downtime_event: example](#is_valid_downtime_event-example) - - [get_downtime_host_status method](#get_downtime_host_status-method) - - [get_downtime_host_status: returns](#get_downtime_host_status-returns) - - [get_downtime_host_status: example](#get_downtime_host_status-example) - - [get_downtime_service_status method](#get_downtime_service_status-method) - - [get_downtime_service_status: returns](#get_downtime_service_status-returns) - - [get_downtime_service_status: example](#get_downtime_service_status-example) - - [is_valid_author method](#is_valid_author-method) - - [is_valid_author: returns](#is_valid_author-returns) - - [is_valid_author: example](#is_valid_author-example) - - [is_downtime_event_useless method](#is_downtime_event_useless-method) - - [is_downtime_event_useless: returns](#is_downtime_event_useless-returns) - - [is_downtime_event_useless: example](#is_downtime_event_useless-example) - - [is_valid_downtime_event_start method](#is_valid_downtime_event_start-method) - - [is_valid_downtime_event_start: returns](#is_valid_downtime_event_start-returns) - - [is_valid_downtime_event_start: example](#is_valid_downtime_event_start-example) - - [is_valid_downtime_event_end method](#is_valid_downtime_event_end-method) - - [is_valid_downtime_event_end: returns](#is_valid_downtime_event_end-returns) - - [is_valid_downtime_event_end: example](#is_valid_downtime_event_end-example) - - [build_outputs method](#build_outputs-method) - - [build_outputs: example](#build_outputs-example) - - [is_valid_storage_event method](#is_valid_storage_event-method) + - [is\_valid\_category method](#is_valid_category-method) + - [is\_valid\_category: returns](#is_valid_category-returns) + - [is\_valid\_category: example](#is_valid_category-example) + - [is\_valid\_element method](#is_valid_element-method) + - [is\_valid\_element: returns](#is_valid_element-returns) + - [is\_valid\_element: example](#is_valid_element-example) + - [is\_valid\_event method](#is_valid_event-method) + - [is\_valid\_event: returns](#is_valid_event-returns) + - [is\_valid\_event: example](#is_valid_event-example) + - [is\_valid\_neb\_event method](#is_valid_neb_event-method) + - [is\_valid\_neb\_event: returns](#is_valid_neb_event-returns) + - [is\_valid\_neb\_event: example](#is_valid_neb_event-example) + - [is\_valid\_host\_status\_event method](#is_valid_host_status_event-method) + - [is\_valid\_host\_status\_event: returns](#is_valid_host_status_event-returns) + - [is\_valid\_host\_status\_event: example](#is_valid_host_status_event-example) + - [is\_valid\_service\_status\_event method](#is_valid_service_status_event-method) + - [is\_valid\_service\_status\_event: returns](#is_valid_service_status_event-returns) + - [is\_valid\_service\_status\_event: example](#is_valid_service_status_event-example) + - [is\_valid\_host method](#is_valid_host-method) + - [is\_valid\_host: returns](#is_valid_host-returns) + - [is\_valid\_host: example](#is_valid_host-example) + - [is\_valid\_service method](#is_valid_service-method) + - [is\_valid\_service: returns](#is_valid_service-returns) + - [is\_valid\_service: example](#is_valid_service-example) + - [is\_valid\_event\_states method](#is_valid_event_states-method) + - [is\_valid\_event\_states: returns](#is_valid_event_states-returns) + - [is\_valid\_event\_states: example](#is_valid_event_states-example) + - [is\_valid\_event\_status method](#is_valid_event_status-method) + - [is\_valid\_event\_status: parameters](#is_valid_event_status-parameters) + - [is\_valid\_event\_status: returns](#is_valid_event_status-returns) + - [is\_valid\_event\_status: example](#is_valid_event_status-example) + - [is\_valid\_event\_state\_type method](#is_valid_event_state_type-method) + - [is\_valid\_event\_state\_type: returns](#is_valid_event_state_type-returns) + - [is\_valid\_event\_state\_type: example](#is_valid_event_state_type-example) + - [is\_valid\_event\_acknowledge\_state method](#is_valid_event_acknowledge_state-method) + - [is\_valid\_event\_acknowledge\_state: returns](#is_valid_event_acknowledge_state-returns) + - [is\_valid\_event\_acknowledge\_state: example](#is_valid_event_acknowledge_state-example) + - [is\_valid\_event\_downtime\_state method](#is_valid_event_downtime_state-method) + - [is\_valid\_event\_downtime\_state: returns](#is_valid_event_downtime_state-returns) + - [is\_valid\_event\_downtime\_state: example](#is_valid_event_downtime_state-example) + - [is\_valid\_event\_flapping\_state method](#is_valid_event_flapping_state-method) + - [is\_valid\_event\_flapping\_state: returns](#is_valid_event_flapping_state-returns) + - [is\_valid\_event\_flapping\_state: example](#is_valid_event_flapping_state-example) + - [is\_valid\_hostgroup method](#is_valid_hostgroup-method) + - [is\_valid\_hostgroup: returns](#is_valid_hostgroup-returns) + - [is\_valid\_hostgroup: example](#is_valid_hostgroup-example) + - [is\_valid\_servicegroup method](#is_valid_servicegroup-method) + - [is\_valid\_servicegroup: returns](#is_valid_servicegroup-returns) + - [is\_valid\_servicegroup: example](#is_valid_servicegroup-example) + - [is\_valid\_bam\_event method](#is_valid_bam_event-method) + - [is\_valid\_bam\_event: returns](#is_valid_bam_event-returns) + - [is\_valid\_bam\_event: example](#is_valid_bam_event-example) + - [is\_valid\_ba method](#is_valid_ba-method) + - [is\_valid\_ba: returns](#is_valid_ba-returns) + - [is\_valid\_ba: example](#is_valid_ba-example) + - [is\_valid\_ba\_status\_event method](#is_valid_ba_status_event-method) + - [is\_valid\_ba\_status\_event: returns](#is_valid_ba_status_event-returns) + - [is\_valid\_ba\_status\_event: example](#is_valid_ba_status_event-example) + - [is\_valid\_ba\_downtime\_state method](#is_valid_ba_downtime_state-method) + - [is\_valid\_ba\_downtime\_state: returns](#is_valid_ba_downtime_state-returns) + - [is\_valid\_ba\_downtime\_state: example](#is_valid_ba_downtime_state-example) + - [is\_valid\_ba\_acknowledge\_state method](#is_valid_ba_acknowledge_state-method) + - [is\_valid\_ba\_acknowledge\_state: returns](#is_valid_ba_acknowledge_state-returns) + - [is\_valid\_ba\_acknowledge\_state: example](#is_valid_ba_acknowledge_state-example) + - [is\_valid\_bv method](#is_valid_bv-method) + - [is\_valid\_bv: returns](#is_valid_bv-returns) + - [is\_valid\_bv: example](#is_valid_bv-example) + - [find\_hostgroup\_in\_list method](#find_hostgroup_in_list-method) + - [find\_hostgroup\_in\_list: returns](#find_hostgroup_in_list-returns) + - [find\_hostgroup\_in\_list: example](#find_hostgroup_in_list-example) + - [find\_servicegroup\_in\_list method](#find_servicegroup_in_list-method) + - [find\_servicegroup\_in\_list: returns](#find_servicegroup_in_list-returns) + - [find\_servicegroup\_in\_list: example](#find_servicegroup_in_list-example) + - [find\_bv\_in\_list method](#find_bv_in_list-method) + - [find\_bv\_in\_list: returns](#find_bv_in_list-returns) + - [find\_bv\_in\_list: example](#find_bv_in_list-example) + - [is\_valid\_poller method](#is_valid_poller-method) + - [is\_valid\_poller: returns](#is_valid_poller-returns) + - [is\_valid\_poller: example](#is_valid_poller-example) + - [find\_poller\_in\_list method](#find_poller_in_list-method) + - [find\_poller\_in\_list: returns](#find_poller_in_list-returns) + - [find\_poller\_in\_list: example](#find_poller_in_list-example) + - [is\_valid\_host\_severity method](#is_valid_host_severity-method) + - [is\_valid\_host\_severity: returns](#is_valid_host_severity-returns) + - [is\_valid\_host\_severity: example](#is_valid_host_severity-example) + - [is\_valid\_service\_severity method](#is_valid_service_severity-method) + - [is\_valid\_service\_severity: returns](#is_valid_service_severity-returns) + - [is\_valid\_service\_severity: example](#is_valid_service_severity-example) + - [is\_valid\_acknowledgement\_event method](#is_valid_acknowledgement_event-method) + - [is\_valid\_acknowledgement\_event: returns](#is_valid_acknowledgement_event-returns) + - [is\_valid\_acknowledgement\_event: example](#is_valid_acknowledgement_event-example) + - [is\_host\_status\_event\_duplicated method](#is_host_status_event_duplicated-method) + - [is\_host\_status\_event\_duplicated: returns](#is_host_status_event_duplicated-returns) + - [is\_host\_status\_event\_duplicated: example](#is_host_status_event_duplicated-example) + - [is\_service\_status\_event\_duplicated method](#is_service_status_event_duplicated-method) + - [is\_service\_status\_event\_duplicated: returns](#is_service_status_event_duplicated-returns) + - [is\_service\_status\_event\_duplicated: example](#is_service_status_event_duplicated-example) + - [is\_valid\_downtime\_event method](#is_valid_downtime_event-method) + - [is\_valid\_downtime\_event: returns](#is_valid_downtime_event-returns) + - [is\_valid\_downtime\_event: example](#is_valid_downtime_event-example) + - [get\_downtime\_host\_status method](#get_downtime_host_status-method) + - [get\_downtime\_host\_status: returns](#get_downtime_host_status-returns) + - [get\_downtime\_host\_status: example](#get_downtime_host_status-example) + - [get\_downtime\_service\_status method](#get_downtime_service_status-method) + - [get\_downtime\_service\_status: returns](#get_downtime_service_status-returns) + - [get\_downtime\_service\_status: example](#get_downtime_service_status-example) + - [is\_valid\_author method](#is_valid_author-method) + - [is\_valid\_author: returns](#is_valid_author-returns) + - [is\_valid\_author: example](#is_valid_author-example) + - [is\_downtime\_event\_useless method](#is_downtime_event_useless-method) + - [is\_downtime\_event\_useless: returns](#is_downtime_event_useless-returns) + - [is\_downtime\_event\_useless: example](#is_downtime_event_useless-example) + - [is\_valid\_downtime\_event\_start method](#is_valid_downtime_event_start-method) + - [is\_valid\_downtime\_event\_start: returns](#is_valid_downtime_event_start-returns) + - [is\_valid\_downtime\_event\_start: example](#is_valid_downtime_event_start-example) + - [is\_valid\_downtime\_event\_end method](#is_valid_downtime_event_end-method) + - [is\_valid\_downtime\_event\_end: returns](#is_valid_downtime_event_end-returns) + - [is\_valid\_downtime\_event\_end: example](#is_valid_downtime_event_end-example) + - [build\_outputs method](#build_outputs-method) + - [build\_outputs: example](#build_outputs-example) + - [is\_valid\_storage\_event method](#is_valid_storage_event-method) ## Introduction @@ -375,13 +378,14 @@ local result = test_event:is_valid_service() ## is_valid_event_states method -The **is_valid_event_states** method checks if the event states (downtime, hard/soft, acknowledgement) are valid based on [**hard_only, in_downtime and acknowledged**](sc_param.md#default-parameters) in the **host_status or service_status** scope +The **is_valid_event_states** method checks if the event states (downtime, hard/soft, acknowledgement, flapping) are valid based on[**hard_only, in_downtime, acknowledged and flapping parameters**](sc_param.md#default-parameters) in the **host_status or service_status** scope head over the following chapters for more information - [is_valid_event_state_type](#is_valid_event_state_type-method) - [is_valid_event_acknowledge_state](#is_valid_event_acknowledge_state-method) - [is_valid_event_downtime_state](#is_valid_event_downtime_state-method) +- [is_valid_event_flapping_state](#is_valid_event_flapping_state-method) ### is_valid_event_states: returns @@ -398,7 +402,7 @@ local result = test_event:is_valid_event_states(test_param.params.host_status) ## is_valid_event_status method -The **is_valid_event_states** method checks if the event status is valid based on [**host_status, service_status or ba_status**](sc_param.md#default-parameters) in the **host_status, service_status or ba_status** scope +The **is_valid_event_states** method checks if the event status is valid based on [**host_status, service_status or ba_status parameters**](sc_param.md#default-parameters) in the **host_status, service_status or ba_status** scope ### is_valid_event_status: parameters @@ -421,7 +425,7 @@ local result = test_event:is_valid_event_status() ## is_valid_event_state_type method -The **is_valid_event_state_type** method checks if the event state (HARD/SOFT) is valid based on [**hard_only**](sc_param.md#default-parameters) in the **host_status, service_status** scope +The **is_valid_event_state_type** method checks if the event state (HARD/SOFT) is valid based on the [**hard_only parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope ### is_valid_event_state_type: returns @@ -438,7 +442,7 @@ local result = test_event:is_valid_event_state_type() ## is_valid_event_acknowledge_state method -The **is_valid_event_acknowledge_state** method checks if the event is in valid acknowledgement state based on [**acknowledged**](sc_param.md#default-parameters) in the **host_status, service_status** scope +The **is_valid_event_acknowledge_state** method checks if the event is in valid acknowledgement state based on the [**acknowledged parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope ### is_valid_event_acknowledge_state: returns @@ -455,7 +459,7 @@ local result = test_event:is_valid_event_acknowledge_state() ## is_valid_event_downtime_state method -The **is_valid_event_downtime_state** method checks if the event is in a valid downtime state based on [**in_downtime**](sc_param.md#default-parameters) in the **host_status, service_status** scope +The **is_valid_event_downtime_state** method checks if the event is in a valid downtime state based on the [**in_downtime parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope ### is_valid_event_downtime_state: returns @@ -470,6 +474,23 @@ local result = test_event:is_valid_event_downtime_state() --> result is true or false ``` +## is_valid_event_flapping_state method + +The **is_valid_event_flapping_state** method checks if the event is in valid flapping state based on the [**flapping parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_flapping_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_flapping_state: example + +```lua +local result = test_event:is_valid_event_flapping_state() +--> result is true or false +``` + ## is_valid_hostgroup method The **is_valid_hostgroup** method checks if the event is in a valid hostgroup based on [**accepted_hostgroups**](sc_param.md#default-parameters) in the **host_status or service_status** scope diff --git a/stream-connectors/modules/docs/sc_flush.md b/stream-connectors/modules/docs/sc_flush.md index ce00f760e1c..e18a3a302f3 100644 --- a/stream-connectors/modules/docs/sc_flush.md +++ b/stream-connectors/modules/docs/sc_flush.md @@ -1,20 +1,34 @@ # Documentation of the sc_flush module -- [Documentation of the sc_flush module](#documentation-of-the-sc_flush-module) +- [Documentation of the sc\_flush module](#documentation-of-the-sc_flush-module) - [Introduction](#introduction) - [Module initialization](#module-initialization) - [Module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [flush_all_queues method](#flush_all_queues-method) - - [flush_all_queues: parameters](#flush_all_queues-parameters) - - [flush_all_queues: example](#flush_all_queues-example) - - [flush_queue method](#flush_queue-method) - - [flush_queue: parameters](#flush_queue-parameters) - - [flush_queue: returns](#flush_queue-returns) - - [flush_queue: example](#flush_queue-example) - - [reset_queue method](#reset_queue-method) - - [reset_queue: parameters](#reset_queue-parameters) - - [reset_queue: example](#reset_queue-example) + - [add\_queue\_metadata method](#add_queue_metadata-method) + - [add\_queue\_metadata: parameters](#add_queue_metadata-parameters) + - [add\_queue\_metadata: example](#add_queue_metadata-example) + - [flush\_all\_queues method](#flush_all_queues-method) + - [flush\_all\_queues: parameters](#flush_all_queues-parameters) + - [flush\_all\_queues: returns](#flush_all_queues-returns) + - [flush\_all\_queues: example](#flush_all_queues-example) + - [reset\_all\_queues method](#reset_all_queues-method) + - [reset\_all\_queues: example](#reset_all_queues-example) + - [get\_queues\_size method](#get_queues_size-method) + - [get\_queues\_size: returns](#get_queues_size-returns) + - [get\_queues\_size: example](#get_queues_size-example) + - [flush\_mixed\_payload method](#flush_mixed_payload-method) + - [flush\_mixed\_payload: parameters](#flush_mixed_payload-parameters) + - [flush\_mixed\_payload: returns](#flush_mixed_payload-returns) + - [flush\_mixed\_payload: example](#flush_mixed_payload-example) + - [flush\_homogeneous\_payload method](#flush_homogeneous_payload-method) + - [flush\_homogeneous\_payload: parameters](#flush_homogeneous_payload-parameters) + - [flush\_homogeneous\_payload: returns](#flush_homogeneous_payload-returns) + - [flush\_homogeneous\_payload: example](#flush_homogeneous_payload-example) + - [flush\_payload method](#flush_payload-method) + - [flush\_payload: parameters](#flush_payload-parameters) + - [flush\_payload: returns](#flush_payload-returns) + - [flush\_payload: example](#flush_payload-example) ## Introduction @@ -52,118 +66,274 @@ local params = { param_B = "value B" } --- create a new instance of the sc_common module +-- create a new instance of the sc_flush module local test_flush = sc_flush.new(params, test_logger) ``` +## add_queue_metadata method + +The **add_queue_metadata** method adds a list of metadata to a given queue. + +### add_queue_metadata: parameters + +| parameter | type | optional | default value | +| ---------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the category id of the queue | number | no | | +| the element id of the queue | number | no | | +| a table containing metadata where each key is the name of the metadata and the value its value | table | no | | + +### add_queue_metadata: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local host_metadata = { + endpoint = "/host", + method = "POST" +} + +local cateogry = 1 +local element = 14 + +test_flush:add_queue_metadata(category, element, host_metadata) +--> the host queue (category: 1, element: 14) now has metadata +--[[ + test_flush.queues = { + [1] = { + [14] = { + events = {}, + queue_metadata = { + category_id = 1, + element_id = 14, + endpoint = "/host", + method = "POST" + } + } + } + } +]]-- +``` + ## flush_all_queues method -The **flush_all_queues** method tries to flush all the possible queues that can be created. It flushes queues according to the [**accepted_elements, max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) +The **flush_all_queues** method tries to flush all the possible queues that can be created. It flushes queues according to the [**accepted_elements, max_buffer_size, max_buffer_age parameters and send_mixed_events**](sc_param.md#default_parameters) head over the following chapters for more information -- [flush_queue](#flush_queue-method) +- [flush_mixed_payload](#flush_mixed_payload-method) +- [flush_homogeneous_payload](#flush_homogeneous_payload-method) +- [reset_all_queues](#reset_all_queues-method) ### flush_all_queues: parameters -| parameter | type | optional | default value | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | -| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_all_queues: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | ### flush_all_queues: example ```lua -- if accepted_elements is set to "host_status,service_status" +local function build_payload() + -- build data payload +end + local function send_data() -- send data somewhere end -test_flush:flush_all_queues(send_data) +local result = test_flush:flush_all_queues(build_payload, send_data) +--> result is true or false --> host_status and service_status are flushed if it is possible ``` -## flush_queue method +## reset_all_queues method -The **flush_queue** method tries to flush a specific queue. It flushes a queue according to the [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) +The **reset_all_queues** method removes all the entries from all the queue tables. -head over the following chapters for more information +### reset_all_queues: example + +```lua +test_flush.queues[1] = { + [14] = { + flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past + events = { + [1] = "first event", + [2] = "second event" + } + }, + [24] = { + flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past + events = { + [1] = "first event", + [2] = "second event" + } + } +} + +test_flush:reset_all_queues() +--> test_flush.queues are now reset +--[[ + test_flush.queues[1] = { + [14] = { + os.time() , -- the time at which the reset happened + events = {} + }, + [24] = { + os.time() , -- the time at which the reset happened + events = {} + } + } +]]-- +``` -- [reset_queue](#reset_queue-method) +## get_queues_size method -### flush_queue: parameters +The **get_queues_size** method gets the number of events stored in all the queues. + +### get_queues_size: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a number | number | yes | | + +### get_queues_size: example + +```lua +test_flush.queues[1] = { + [14] = { + flush_date = os.time(), + events = { + [1] = "first event", + [2] = "second event" + } + }, + [24] = { + flush_date = os.time(), + events = { + [1] = "first event", + [2] = "second event" + } + } +} + +local result = test_flush:get_queues_size() +--> result is 4 +``` -| parameter | type | optional | default value | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | -| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | -| the category of the queue that we need to flush | number | no | | -| the element of the queue that we need to flush | number | no | | -| force a flush ignoring max_buffer_age and max_buffer_size | boolean | yes | false | +## flush_mixed_payload method -### flush_queue: returns +The **flush_mixed_payload** method flushes a payload that contains various type of events (services mixed hosts for example) according to [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) + +### flush_mixed_payload: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_mixed_payload: returns | return | type | always | condition | | ------------- | ------- | ------ | --------- | | true or false | boolean | yes | | -### flush_queue: example +### flush_mixed_payload: example ```lua +-- if accepted_elements is set to "host_status,service_status" + +local function build_payload() + -- build data payload +end local function send_data() -- send data somewhere end --- fill a host_status queue with 2 events for the example -test_flush.queues[1][14].events = { - [1] = "first event", - [2] = "second event" -} - -local result = test_flush:flush_queue(send_data, 1, 14, false) ---> result is true +local result = test_flush:flush_all_queues(build_payload, send_data) +--> result is true or false +--> host_status and service_status are flushed if it is possible +``` --- initiate a empty queue for service_status events -test_.queues[1][24].events = {} +## flush_homogeneous_payload method -result = test_flush:flush_queue(send_data, 1, 24, false) ---> result is false because buffer size is 0 -``` +The **flush_mixed_payload** method flushes a payload that contains a single type of events (services with services only and hosts with hosts only for example) according to [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) -## reset_queue method +### flush_homogeneous_payload: parameters -The **reset_queue** reset a queue after it has been flushed +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | -### reset_queue: parameters +### flush_homogeneous_payload: returns -| parameter | type | optional | default value | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | -------- | ------------- | -| the category of the queue that we need to reset | number | no | | -| the element of the queue that we need to reset| number | no | | +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | -### reset_queue: example +### flush_homogeneous_payload: example ```lua +-- if accepted_elements is set to "host_status,service_status" + +local function build_payload() + -- build data payload +end local function send_data() -- send data somewhere end --- fill a host_status queue with 2 events for the example -test_flush.queues[1][14] = { - flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past - events = { - [1] = "first event", - [2] = "second event" - } +local result = test_flush:flush_homogeneous_payload(build_payload, send_data) +--> result is true or false +--> host_status and service_status are flushed if it is possible +``` + +## flush_payload method + +The **flush_payload** method sends a payload using the given method. + +### flush_payload: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| a table containing the payload that must be sent | table | no | | +| a table containing metadata for the payload | table | no | `{}` | + +### flush_payload: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_payload: example + +```lua +local payload = { + host = "mont", + state = "2", + service = "marsan" } -test_flush:reset_queue(1, 14) ---> test_flush.queues[1][14] is now reset like below ---[[ - test_flush.queues[1][14] = { - flush_date = os.time() , -- the time at which the reset happened - events = {} - } -]] +local metadata = { + endpoint = "/api/event" +} + +local function send_data() + -- send data somewhere +end + +result = test_flush:flush_payload(send_data, payload, metadata) +--> result is true or false ``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index d7888e8cb90..1c45cdfc5a5 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -1,29 +1,29 @@ # Documentation of the sc_param module -- [Documentation of the sc_param module](#documentation-of-the-sc_param-module) +- [Documentation of the sc\_param module](#documentation-of-the-sc_param-module) - [Introduction](#introduction) - [Default parameters](#default-parameters) - [Module initialization](#module-initialization) - [module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [param_override method](#param_override-method) - - [param_override: parameters](#param_override-parameters) - - [param_override: example](#param_override-example) - - [check_params method](#check_params-method) - - [check_params: example](#check_params-example) - - [get_kafka_parameters method](#get_kafka_parameters-method) - - [get_kafka_params: parameters](#get_kafka_params-parameters) - - [get_kafka_params: example](#get_kafka_params-example) - - [is_mandatory_config_set method](#is_mandatory_config_set-method) - - [is_mandatory_config_set: parameters](#is_mandatory_config_set-parameters) - - [is_mandatory_config_set: returns](#is_mandatory_config_set-returns) - - [is_mandatory_config_set: example](#is_mandatory_config_set-example) - - [load_event_format_file method](#load_event_format_file-method) - - [load_event_format_file: parameters](#load_event_format_file-parameters) - - [load_event_format_file: returns](#load_event_format_file-returns) - - [load_event_format_file: example](#load_event_format_file-example) - - [build_accepted_elements_info method](#build_accepted_elements_info-method) - - [build_accepted_elements_info: example](#build_accepted_elements_info-example) + - [param\_override method](#param_override-method) + - [param\_override: parameters](#param_override-parameters) + - [param\_override: example](#param_override-example) + - [check\_params method](#check_params-method) + - [check\_params: example](#check_params-example) + - [get\_kafka\_parameters method](#get_kafka_parameters-method) + - [get\_kafka\_params: parameters](#get_kafka_params-parameters) + - [get\_kafka\_params: example](#get_kafka_params-example) + - [is\_mandatory\_config\_set method](#is_mandatory_config_set-method) + - [is\_mandatory\_config\_set: parameters](#is_mandatory_config_set-parameters) + - [is\_mandatory\_config\_set: returns](#is_mandatory_config_set-returns) + - [is\_mandatory\_config\_set: example](#is_mandatory_config_set-example) + - [load\_event\_format\_file method](#load_event_format_file-method) + - [load\_event\_format\_file: parameters](#load_event_format_file-parameters) + - [load\_event\_format\_file: returns](#load_event_format_file-returns) + - [load\_event\_format\_file: example](#load_event_format_file-example) + - [build\_accepted\_elements\_info method](#build_accepted_elements_info-method) + - [build\_accepted\_elements\_info: example](#build_accepted_elements_info-example) ## Introduction @@ -41,6 +41,7 @@ The sc_param module provides methods to help you handle parameters for your stre | hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | | acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | | in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| flapping | number | 0 | accept only events that aren't flapping (use 1 to accept flapping events too) | host_status(neb), service_status(neb) | | | accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | | accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | | accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec new file mode 100644 index 00000000000..f20753b3888 --- /dev/null +++ b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.5.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.5.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From a494f1de1abc19769e2d0c0d01d49edb6e04f798 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 30 Jan 2023 10:49:04 +0100 Subject: [PATCH 153/219] fix broken long output with line break (#128) --- .../sc_event.lua | 13 +++++-- ...eon-stream-connectors-lib-3.5.1-1.rockspec | 39 +++++++++++++++++++ 2 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 063697f2404..0c8ddce7ba3 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -1176,8 +1176,12 @@ end --- build_outputs: adds short_output and long_output entries in the event table. output entry will be equal to one or another depending on the use_longoutput param function ScEvent:build_outputs() - self.event.long_output = self.event.output - self.event.long_output = self.event.output + -- build long output + if self.event.long_output and self.event.long_output ~= "" then + self.event.long_output = self.event.output .. "\n" .. self.event.long_output + else + self.event.long_output = self.event.output + end -- no short output if there is no line break local short_output = string.match(self.event.output, "^(.*)\n") @@ -1187,17 +1191,18 @@ function ScEvent:build_outputs() self.event.short_output = self.event.output end - -- use shortoutput if it exists + -- use short output if it exists if self.params.use_long_output == 0 and short_output then self.event.output = short_output -- replace line break if asked to and we are not already using a short output - elseif not short_output and self.params.remove_line_break_in_output == 1 then + elseif not short_output and self.params.remove_line_break_in_output == 1 then self.event.output = string.gsub(self.event.output, "\n", self.params.output_line_break_replacement_character) end if self.params.output_size_limit ~= "" then self.event.output = string.sub(self.event.output, 1, self.params.output_size_limit) + self.event.short_output = string.sub(self.event.short_output, 1, self.params.output_size_limit) end end diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec new file mode 100644 index 00000000000..727ce86c73c --- /dev/null +++ b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.5.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.5.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 1766a7a3ce4a53b2e29c98351a333b77d30a2cbe Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 30 Jan 2023 12:10:06 +0100 Subject: [PATCH 154/219] fix wrong mandatory param and wrong dt index (#129) --- .../centreon-certified/capensis/canopsis4-events-apiv2.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index 3f62bda0096..c09f908b3d6 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -35,7 +35,7 @@ function EventQueue.new(params) local self = {} local mandatory_parameters = { - "params.canopsis_authkey", + "canopsis_authkey", "canopsis_host" } @@ -330,10 +330,10 @@ function EventQueue:format_event_downtime() } if event.service_id then - self.sc_event.event.formated_event["entity_pattern"][0][0]["cond"]["value"] = tostring(event.cache.service.description) + self.sc_event.event.formated_event["entity_pattern"][1][1]["cond"]["value"] = tostring(event.cache.service.description) .. "/" .. tostring(event.cache.host.name) else - self.sc_event.event.formated_event["entity_pattern"][0][0]["cond"]["value"] = tostring(event.cache.host.name) + self.sc_event.event.formated_event["entity_pattern"][1][1]["cond"]["value"] = tostring(event.cache.host.name) end end end From 8f7653f6419fee372628461ababccef65013c296 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 31 Jan 2023 16:35:49 +0100 Subject: [PATCH 155/219] add a new method to create shell curl commands (#130) * add get_curl_command method * add proxy_protocol param * move get_curl_command from sc_common to sc_logger * add log_curl_commands param * document new parameters * add log_curl_command documentation * encapsulate strings in log_curl_command * add new log_curl_command method in the sc * fix bugs with log curl command * log curl commands for all possible sc * fix log message * fix bad proxy verif * add 3.6.0 specifle --- .../capensis/canopsis2-events-apiv2.lua | 29 ++++----- .../capensis/canopsis4-events-apiv2.lua | 35 +++++------ .../datadog/datadog-events-apiv2.lua | 15 +++-- .../datadog/datadog-metrics-apiv2.lua | 16 ++--- .../elasticsearch/elastic-events-apiv2.lua | 43 ++++++------- .../kafka/kafka-events-apiv2.lua | 4 +- .../logstash/logstash-events-apiv2.lua | 15 +++-- .../omi/omi_events-apiv2.lua | 24 ++++---- .../pagerduty/pagerduty-events-apiv2.lua | 22 +++---- .../servicenow/servicenow-em-events-apiv2.lua | 41 ++++++------- .../servicenow-incident-events-apiv2.lua | 42 ++++++------- .../signl4/signl4-events-apiv2.lua | 25 ++++---- .../splunk/splunk-events-apiv2.lua | 23 +++---- .../splunk/splunk-metrics-apiv2.lua | 23 +++---- .../sc_common.lua | 2 +- .../sc_logger.lua | 60 +++++++++++++++++++ .../sc_params.lua | 4 ++ stream-connectors/modules/docs/README.md | 15 ++--- stream-connectors/modules/docs/sc_logger.md | 46 +++++++++++++- stream-connectors/modules/docs/sc_param.md | 2 + ...eon-stream-connectors-lib-3.6.0-1.rockspec | 39 ++++++++++++ 21 files changed, 338 insertions(+), 187 deletions(-) create mode 100644 stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index dbc29636fd3..ec4d446a2bf 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -348,7 +348,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end end @@ -375,7 +375,13 @@ function EventQueue:send_data(payload, queue_metadata) local params = self.sc_params.params local url = params.sending_protocol .. "://" .. params.canopsis_user .. ":" .. params.canopsis_password .. "@" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route - local data = broker.json_encode(payload) + payload = broker.json_encode(payload) + queue_metadata.headers = { + "content-length: " .. string.len(payload), + "content-type: application/json" + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -383,7 +389,7 @@ function EventQueue:send_data(payload, queue_metadata) return true end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. data) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. payload) self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) local http_response_body = "" @@ -396,13 +402,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-length: " .. string.len(data), - "content-type: application/json" - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then @@ -427,14 +427,7 @@ function EventQueue:send_data(payload, queue_metadata) if queue_metadata.method and queue_metadata.method == "DELETE" then http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) else - http_request:setopt( - curl.OPT_HTTPHEADER, - { - "content-length: " .. string.len(data), - "content-type: application/json" - } - ) - http_request:setopt_postfields(data) + http_request:setopt_postfields(payload) end -- performing the HTTP request diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index c09f908b3d6..e8735ed5992 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -355,7 +355,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end end @@ -380,15 +380,22 @@ function EventQueue:send_data(payload, queue_metadata) local params = self.sc_params.params local url = params.sending_protocol .. "://" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route - local data = broker.json_encode(payload) - + payload = broker.json_encode(payload) + queue_metadata.headers = { + "content-length: " .. string.len(payload), + "content-type: application/json", + "x-canopsis-authkey: " .. tostring(self.sc_params.params.canopsis_authkey) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(data)) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end - self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. data) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. payload) self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) local http_response_body = "" @@ -401,14 +408,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-length: " .. string.len(data), - "content-type: application/json", - "x-canopsis-authkey: " .. tostring(self.sc_params.params.canopsis_authkey) - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then @@ -434,14 +434,7 @@ function EventQueue:send_data(payload, queue_metadata) http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) end - http_request:setopt( - curl.OPT_HTTPHEADER, - { - "content-length: " .. string.len(data), - "content-type: application/json" - } - ) - http_request:setopt_postfields(data) + http_request:setopt_postfields(payload) -- performing the HTTP request http_request:perform() diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index 9292723c8e2..264ab1a0b75 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -190,7 +190,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -213,7 +213,12 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local url = self.sc_params.params.http_server_url .. self.sc_params.params.datadog_event_endpoint + queue_metadata.headers = { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -233,13 +238,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - "DD-API-KEY:" .. self.sc_params.params.api_key - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index 51516aece65..19e002e12a8 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -234,7 +234,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -260,6 +260,12 @@ function EventQueue:send_data(payload, queue_metadata) local url = self.sc_params.params.http_server_url .. tostring(self.sc_params.params.datadog_metric_endpoint) local payload_json = broker.json_encode(payload) + queue_metadata.headers = { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload_json) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -280,13 +286,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - "DD-API-KEY:" .. self.sc_params.params.api_key - } - ) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index 780fced21a8..79bbc212aff 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -177,8 +177,8 @@ function EventQueue:format_accepted_event() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -200,6 +200,15 @@ function EventQueue:format_accepted_event() function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.elastic_url .. "/_bulk" + queue_metadata.headers = { + "content-type: application/json;charset=UTF-8", + "content-length: " .. string.len(payload), + "Authorization: Basic " .. (mime.b64(self.sc_params.params.elastic_username .. ":" .. self.sc_params.params.elastic_password)) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:info("[send_data]: " .. tostring(payload)) @@ -209,24 +218,18 @@ function EventQueue:format_accepted_event() self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) self.sc_logger:info("[EventQueue:send_data]: Elastic URL is: " .. tostring(self.sc_params.params.elastic_url) .. "/_bulk") - local http_response_body = "" - local http_request = curl.easy() - :setopt_url(self.sc_params.params.elastic_url .. "/_bulk") - :setopt_writefunction( - function (response) - http_response_body = http_response_body .. tostring(response) - end - ) - :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) - :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json;charset=UTF-8", - "content-length: " .. string.len(payload), - "Authorization: Basic " .. (mime.b64(self.sc_params.params.elastic_username .. ":" .. self.sc_params.params.elastic_password)) - } - ) + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index c23e3e817fd..5509ea79945 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -184,8 +184,8 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index ee96be5b269..18203d988a5 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -170,7 +170,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -193,6 +193,10 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + queue_metadata.headers = {"accept: application/json"} + queue_metadata.method = "PUT" + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -212,13 +216,8 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt(curl.OPT_CUSTOMREQUEST, "PUT") - :setopt( - curl.OPT_HTTPHEADER, - { - "accept: application/json" - } - ) + :setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index b4d9cb123e0..801b9ce18d4 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -197,8 +197,8 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -229,6 +229,14 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "Content-Type: text/xml", + "content-length: " .. string.len(payload) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -236,11 +244,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(self.sc_params.params.http_server_url .. "\"")) + self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(url) .. "\"") local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) @@ -248,13 +256,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "Content-Type: text/xml", - "content-length: " .. string.len(payload) - } - ) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index fc2f2353807..7d7cf4911a7 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -311,7 +311,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -333,6 +333,14 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -340,11 +348,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(self.sc_params.params.http_server_url)) + self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) @@ -352,13 +360,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - "content-length:" .. string.len(payload), - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index ef15ed19148..3eac03dc8e7 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -218,13 +218,30 @@ end -- @throw exception if http call fails or response is empty -------------------------------------------------------------------------------- function EventQueue:call(url, method, data, authToken) - method = method or "GET" data = data or nil authToken = authToken or nil + local queue_metadata = { + method = method or "GET" + } + + -- handle headers + if not authToken and queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + queue_metadata.headers = {"Content-Type: application/x-www-form-urlencoded"} + else + broker_log:info(3, "Add JSON header") + queue_metadata.headers = { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + end local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. ".service-now.com/" .. tostring(url) self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + self.sc_logger:log_curl_command(endpoint, queue_metadata, self.sc_params.params, data) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) @@ -238,6 +255,7 @@ function EventQueue:call(url, method, data, authToken) res = res .. tostring(response) end) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) self.sc_logger:debug("EventQueue:call: Request initialize") @@ -259,24 +277,7 @@ function EventQueue:call(url, method, data, authToken) end end - if not authToken then - if method ~= "GET" then - self.sc_logger:debug("EventQueue:call: Add form header") - request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) - end - else - broker_log:info(3, "Add JSON header") - request:setopt( - curl.OPT_HTTPHEADER, - { - "Accept: application/json", - "Content-Type: application/json", - "Authorization: Bearer " .. authToken - } - ) - end - - if method ~= "GET" then + if queue_metadata.method ~= "GET" then self.sc_logger:debug("EventQueue:call: Add post data") request:setopt_postfields(data) end @@ -389,7 +390,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua index 3c074e62d4a..f184df5f354 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -228,13 +228,31 @@ end -- @throw exception if http call fails or response is empty -------------------------------------------------------------------------------- function EventQueue:call(url, method, data, authToken) - method = method or "GET" data = data or nil authToken = authToken or nil + local queue_metadata = { + method = method or "GET" + } + + -- handle headers + if not authToken and queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + queue_metadata.headers = {"Content-Type: application/x-www-form-urlencoded"} + else + broker_log:info(3, "Add JSON header") + queue_metadata.headers = { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + end + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. "." .. self.sc_params.params.http_server_url .. "/" .. tostring(url) self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + self.sc_logger:log_curl_command(endpoint, queue_metadata, self.sc_params.params, data) + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) @@ -248,6 +266,7 @@ function EventQueue:call(url, method, data, authToken) res = res .. tostring(response) end) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) self.sc_logger:debug("EventQueue:call: Request initialize") @@ -269,24 +288,7 @@ function EventQueue:call(url, method, data, authToken) end end - if not authToken then - if method ~= "GET" then - self.sc_logger:debug("EventQueue:call: Add form header") - request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) - end - else - broker_log:info(3, "Add JSON header") - request:setopt( - curl.OPT_HTTPHEADER, - { - "Accept: application/json", - "Content-Type: application/json", - "Authorization: Bearer " .. authToken - } - ) - end - - if method ~= "GET" then + if queue_metadata.method ~= "GET" then self.sc_logger:debug("EventQueue:call: Add post data") request:setopt_postfields(data) end @@ -390,7 +392,7 @@ function EventQueue:add() self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index 79b677bbc67..f2b312bcf69 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -180,8 +180,8 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -202,6 +202,10 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.server_address .. "/webhook/" .. self.sc_params.params.team_secret + queue_metadata.headers = {"content-type: application/json"} + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -210,11 +214,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(self.sc_params.params.server_address) .. "/webhook/" .. tostring(self.sc_params.params.team_secret)) + self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.server_address .. "/webhook/" .. self.sc_params.params.team_secret) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) @@ -222,12 +226,8 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - } - ) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then @@ -236,6 +236,7 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end + -- set proxy user configuration if (self.sc_params.params.proxy_username ~= '') then if (self.sc_params.params.proxy_password ~= '') then @@ -244,6 +245,7 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") end end + -- adding the HTTP POST data http_request:setopt_postfields(payload) -- performing the HTTP request @@ -251,14 +253,17 @@ function EventQueue:send_data(payload, queue_metadata) -- collecting results http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() + -- Handling the return code local retval = false + if http_response_code == 200 or http_response_code == 201 then self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) retval = true else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end + return retval end diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 66ac3591b71..450f232da8e 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -172,7 +172,7 @@ function EventQueue:add() } self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -194,6 +194,14 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + "authorization: Splunk " .. self.sc_params.params.splunk_token + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -202,11 +210,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) @@ -214,14 +222,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - "content-length:" .. string.len(payload), - "authorization: Splunk " .. self.sc_params.params.splunk_token, - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index b4b4cb6ecae..2af12d2664f 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -229,7 +229,7 @@ function EventQueue:add() } self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) - .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end -------------------------------------------------------------------------------- @@ -250,6 +250,14 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + "authorization: Splunk " .. self.sc_params.params.splunk_token + } + local url = self.sc_params.params.http_server_url + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then @@ -258,11 +266,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(self.sc_params.params.http_server_url)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) @@ -270,14 +278,7 @@ function EventQueue:send_data(payload, queue_metadata) ) :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) - :setopt( - curl.OPT_HTTPHEADER, - { - "content-type: application/json", - "content-length:" .. string.len(payload), - "authorization: Splunk " .. self.sc_params.params.splunk_token, - } - ) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) -- set proxy address configuration if (self.sc_params.params.proxy_address ~= '') then diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index d06684a81c7..663a7544f5d 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -98,7 +98,7 @@ end -- @param [opt] separator (string) the separator character that will be used to split the string -- @return false (boolean) if text param is empty or nil -- @return table (table) a table of strings -function ScCommon:split (text, separator) +function ScCommon:split(text, separator) -- return false if text is nil or empty if text == nil or text == "" then self.sc_logger:error("[sc_common:split]: could not split text because it is nil or empty") diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua index f63e9623505..523e6b7f074 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -88,4 +88,64 @@ function ScLogger:debug(message) broker_log:info(3, message) end +--- log_curl_command: build a shell curl command based on given parameters and write it in the logfile +-- @param url (string) the url to which curl will send data +-- @param metadata (table) a table that contains headers information and http method for curl +-- @param params (table) the stream connector params table +-- @param data (string) [opt] the data that must be send by curl +function ScLogger:log_curl_command(url, metadata, params, data) + if params.log_curl_commands == 1 then + self:debug("[sc_logger:log_curl_command]: starting computing curl command") + local curl_string = "curl " + + -- handle proxy + self:debug("[sc_looger:log_curl_command]: proxy information: protocol: " .. params.proxy_protocol .. ", address: " + .. params.proxy_address .. ", port: " .. params.proxy_port .. ", user: " .. params.proxy_username .. ", password: " + .. tostring(params.proxy_password)) + local proxy_url + if params.proxy_address ~= "" then + if params.proxy_username ~= "" then + proxy_url = params.proxy_protocol .. "://" .. params.proxy_username .. ":" .. params.proxy_password + .. "@" .. params.proxy_address .. ":" .. params.proxy_port + else + proxy_url = params.proxy_protocol .. "://" .. params.proxy_address .. ":" .. params.proxy_port + end + + curl_string = curl_string .. "--proxy '" .. proxy_url .. "' " + end + + -- handle certificate verification + if params.allow_insecure_connection == 1 then + curl_string = curl_string .. "-k " + end + + -- handle http method + if metadata.method then + curl_string = curl_string .. "-X " .. metadata.method .. " " + elseif data then + curl_string = curl_string .. "-X POST " + else + curl_string = curl_string .. "-X GET " + end + + -- handle headers + if metadata.headers then + for _, header in ipairs(metadata.headers) do + curl_string = curl_string .. "-H '" .. tostring(header) .. "' " + end + end + + curl_string = curl_string .. "'" .. tostring(url) .. "' " + + -- handle curl data + if data and data ~= "" then + curl_string = curl_string .. "-d '" .. data .. "'" + end + + self:notice("[sc_logger:log_curl_command]: " .. curl_string) + else + self:debug("[sc_logger:log_curl_command]: curl command not logged because log_curl_commands param is set to: " .. params.log_curl_commands) + end +end + return sc_logger \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index c45403582b1..6732ddd18c5 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -89,6 +89,7 @@ function sc_params.new(common, logger) proxy_port = "", proxy_username = "", proxy_password = "", + proxy_protocol = "http", -- event formatting parameters format_file = "", @@ -114,6 +115,7 @@ function sc_params.new(common, logger) -- logging parameters logfile = "", log_level = "", + log_curl_commands = 0, -- metric metric_name_regex = "", @@ -778,6 +780,7 @@ function ScParams:check_params() self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_address, "string", "") + self.params.proxy_protocol = self.common:if_wrong_type(self.params.proxy_protocol, "string", "http") self.params.proxy_port = self.common:if_wrong_type(self.params.proxy_port, "number", "") self.params.proxy_username = self.common:if_wrong_type(self.params.proxy_username, "string", "") self.params.proxy_password = self.common:if_wrong_type(self.params.proxy_password, "string", "") @@ -785,6 +788,7 @@ function ScParams:check_params() self.params.allow_insecure_connection = self.common:number_to_boolean(self.common:check_boolean_number_option_syntax(self.params.allow_insecure_connection, 0)) self.params.logfile = self.common:ifnil_or_empty(self.params.logfile, "/var/log/centreon-broker/stream-connector.log") self.params.log_level = self.common:ifnil_or_empty(self.params.log_level, 1) + self.params.log_curl_commands = self.common:check_boolean_number_option_syntax(self.params.log_curl_commands, 0) self.params.use_long_output = self.common:check_boolean_number_option_syntax(self.params.use_longoutput, 1) self.params.remove_line_break_in_output = self.common:check_boolean_number_option_syntax(self.params.remove_line_break_in_output, 1) self.params.output_line_break_replacement_character = self.common:if_wrong_type(self.params.output_line_break_replacement_character, "string", " ") diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 41ea39467cc..5cf1fb96110 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -50,13 +50,14 @@ ## sc_logger methods -| Method name | Method description | Link | -| ----------- | ------------------------------------------- | -------------------------------------------- | -| error | write an error message in the log file | [Documentation](sc_logger.md#error-method) | -| warning | write a warning message in the log file | [Documentation](sc_logger.md#warning-method) | -| notice | write a notice/info message in the log file | [Documentation](sc_logger.md#notice-method) | -| info | write an info message in the log file | [Documentation](sc_logger.md#info-method) | -| debug | write a debug message in the log file | [Documentation](sc_logger.md#debug-method) | +| Method name | Method description | Link | +| ---------------- | ----------------------------------------------------- | ----------------------------------------------------- | +| error | write an error message in the log file | [Documentation](sc_logger.md#error-method) | +| warning | write a warning message in the log file | [Documentation](sc_logger.md#warning-method) | +| notice | write a notice/info message in the log file | [Documentation](sc_logger.md#notice-method) | +| info | write an info message in the log file | [Documentation](sc_logger.md#info-method) | +| debug | write a debug message in the log file | [Documentation](sc_logger.md#debug-method) | +| log_curl_command | creates and log a curl command using given parameters | [Documentation](sc_logger.md#log_curl_command-method) | ## sc_broker methods diff --git a/stream-connectors/modules/docs/sc_logger.md b/stream-connectors/modules/docs/sc_logger.md index 8b7044d823d..620bf721b1d 100644 --- a/stream-connectors/modules/docs/sc_logger.md +++ b/stream-connectors/modules/docs/sc_logger.md @@ -1,6 +1,6 @@ # Documentation of the sc_logger module -- [Documentation of the sc_logger module](#documentation-of-the-sc_logger-module) +- [Documentation of the sc\_logger module](#documentation-of-the-sc_logger-module) - [Introduction](#introduction) - [Best practices](#best-practices) - [Module initialization](#module-initialization) @@ -21,6 +21,9 @@ - [notice method](#notice-method) - [notice: parameters](#notice-parameters) - [notice: example](#notice-example) + - [log\_curl\_command method](#log_curl_command-method) + - [log\_curl\_command: parameters](#log_curl_command-parameters) + - [log\_curl\_command: example](#log_curl_command-example) ## Introduction @@ -158,3 +161,44 @@ The **notice** method will print a notice message in the logfile if **severity i -- call notice method test_logger:notice("[module_name:method_name]: This is a notice message.") ``` + +## log_curl_command method + +The **log_curl_command** method will print a notice message containing a ready to use shell curl command in the logfile. + +See [notice method](#notice-method) for more information about notice logs + +### log_curl_command: parameters + +| parameter | type | optional | default value | +| ---------------------------------------------------------------- | ------ | -------- | ------------- | +| the url for the curl command | string | no | | +| metadata containing headers information and http method for curl | table | no | | +| stream connector parameters | table | no | | +| data that must be sent | string | yes | | + +### log_curl_command: example + +```lua +local url = "https://127.0.0.1/my_endpoint" +local metadata = { + method = "POST", + headers = { + "content-type: application/json", + "token: mont-de-marsan" + } +} + +local params = { + allow_insecure_connection = 1 +} + +local data = '{"host":"test-host","state":"down"}' + +-- call notice method +test_logger:log_curl_command(url, metadata, params, data) +--> this will print the following log +--[[ + Thu Mar 17 10:44:53 2022: INFO: [sc_logger:log_curl_command]: curl -k -X POST -H "content-type: application/json" -H "token: mont-de-marsan" "https://127.0.0.1/my_endpoint" -d '{"host":"test-host","state":"down"}' +]]-- +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 1c45cdfc5a5..878bdeedfd8 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -72,6 +72,7 @@ The sc_param module provides methods to help you handle parameters for your stre | proxy_port | number | | port of the proxy | | | | proxy_username | string | | user for the proxy | | | | proxy_password | string | | pasword of the proxy user | | | +| proxy_protocol | string | http | protocol to use with the proxy (can be http or https) | | | | connection_timeout | number | 60 | time to wait in second when opening connection | | | | allow_insecure_connection | number | 0 | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | | | use_long_output | number | 1 | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | | @@ -82,6 +83,7 @@ The sc_param module provides methods to help you handle parameters for your stre | metric_replacement_character | string | "_" | the character that will be used to replace invalid characters in the metric name | service_status(neb), host_status(neb) | | | logfile | string | **check the stream connector documentation** | the logfile that will be used for the stream connector | any | | | log_level | number | 1 | the verbosity level for the logs. 1 = error + notice, 2 = error + warning + notice, 3 = error + warning + notice + debug (you should avoir using level 3) | any | | +| log_curl_commands | number | 0 | log ready to use curl commands when enabled (0 = disabled, 1 = enabled) | any | | ## Module initialization diff --git a/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec new file mode 100644 index 00000000000..9bd517c9e9a --- /dev/null +++ b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.6.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.6.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 51ec1ca08ba3bcc85032d024f7f82750bec8af8c Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 1 Feb 2023 13:36:49 +0100 Subject: [PATCH 156/219] fix wrong method name (#131) --- .../capensis/canopsis2-events-apiv2.lua | 8 ++++---- .../capensis/canopsis4-events-apiv2.lua | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index ec4d446a2bf..c49a085d0d5 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -100,10 +100,10 @@ function EventQueue.new(params) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) self.format_event = { [categories.neb.id] = { diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index e8735ed5992..36285c2735e 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -97,10 +97,10 @@ function EventQueue.new(params) local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) - self.sc_flush:add_queue_metadatas(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) self.format_event = { [categories.neb.id] = { From f7ef3fb5f455b44a472c03abe6342a63aa509b45 Mon Sep 17 00:00:00 2001 From: xenofree <35098906+xenofree@users.noreply.github.com> Date: Wed, 8 Feb 2023 17:39:36 +0100 Subject: [PATCH 157/219] add instance and service as grouping key (#133) * add instance and service as grouping key * not working without the host label --------- Co-authored-by: Thomas Vasseur --- .../prometheus/prometheus-gateway-apiv1.lua | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua index 3fe09e6c3fc..c0a66e4f965 100644 --- a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -2,6 +2,7 @@ -- libraries local curl = require "cURL" +local base64 = require("base64") -- Global variables @@ -299,7 +300,6 @@ function EventQueue:new (conf) prometheus_gateway_address = 'http://localhost', prometheus_gateway_port = '9091', prometheus_gateway_job = 'monitoring', - prometheus_gateway_instance = 'centreon', http_timeout = 60, proxy_address = '', proxy_port = '', @@ -841,7 +841,7 @@ function EventQueue:send_data () local httpResponseBody = "" local httpRequest = curl.easy() - :setopt_url(self.prometheus_gateway_address .. ':' .. self.prometheus_gateway_port .. '/metrics/job/' .. self.prometheus_gateway_job .. '/instance/' .. self.prometheus_gateway_instance) + :setopt_url(self.prometheus_gateway_address .. ':' .. self.prometheus_gateway_port .. '/metrics/job/' .. self.prometheus_gateway_job .. '/instance/' .. self.current_event.hostname .. '/service@base64/' .. base64.encode(self.current_event.service_description)) :setopt_writefunction( function (response) httpResponseBody = httpResponseBody .. tostring(response) @@ -946,4 +946,4 @@ function write (event) end return true -end \ No newline at end of file +end From 2526b611cf64aed5fa66fbab331bf399da542420 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 14 Feb 2023 11:24:26 +0100 Subject: [PATCH 158/219] fix bulk event and improve canopsis4 (#134) --- .../capensis/canopsis2-events-apiv2.lua | 4 +-- .../capensis/canopsis4-events-apiv2.lua | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index c49a085d0d5..9fd8612ca0a 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -363,7 +363,7 @@ function EventQueue:build_payload(payload, event) if not payload then payload = event else - payload = table.insert(payload, event) + table.insert(payload, event) end return payload @@ -385,7 +385,7 @@ function EventQueue:send_data(payload, queue_metadata) -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then - self.sc_logger:notice("[send_data]: " .. tostring(data)) + self.sc_logger:notice("[send_data]: " .. tostring(payload)) return true end diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index 36285c2735e..11146251346 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -70,6 +70,7 @@ function EventQueue.new(params) self.sc_params.params.timezone = params.timezone or "Europe/Paris" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement" + self.sc_params.params.use_severity_as_state = params.use_severity_as_state or 0 -- apply users params and check syntax of standard ones self.sc_params:param_override(params) @@ -190,6 +191,15 @@ function EventQueue:list_hostgroups() return hostgroups end +function EventQueue:get_state(event, severity) + -- return standard centreon state + if severity and self.sc_params.params.use_severity_as_state == 1 then + return severity + end + + return self.centreon_to_canopsis_state[event.category][event.element][event.state] +end + function EventQueue:get_connector_name() -- use poller name as a connector name if self.sc_params.params.connector_name_type == "poller" then @@ -211,12 +221,11 @@ function EventQueue:format_event_host() resource = "", output = event.short_output, long_output = event.long_output, - state = self.centreon_to_canopsis_state[event.category][event.element][event.state], - timestamp = event.last_check - -- extra informations no longer exists with canopsis api v4 ? - -- hostgroups = self:list_hostgroups(), - -- notes_url = tostring(event.cache.host.notes_url), - -- action_url = tostring(event.cache.host.action_url) + state = self:get_state(event, event.cache.severity.host), + timestamp = event.last_check, + hostgroups = self:list_hostgroups(), + notes_url = tostring(event.cache.host.notes_url), + action_url = tostring(event.cache.host.action_url) } end @@ -232,13 +241,12 @@ function EventQueue:format_event_service() resource = tostring(event.cache.service.description), output = event.short_output, long_output = event.long_output, - state = self.centreon_to_canopsis_state[event.category][event.element][event.state], - timestamp = event.last_check - -- extra informations - -- servicegroups = self:list_servicegroups(), - -- notes_url = event.cache.service.notes_url, - -- action_url = event.cache.service.action_url, - -- hostgroups = self:list_hostgroups() + state = self:get_state(event, event.cache.severity.service), + timestamp = event.last_check, + servicegroups = self:list_servicegroups(), + notes_url = event.cache.service.notes_url, + action_url = event.cache.service.action_url, + hostgroups = self:list_hostgroups() } end @@ -369,7 +377,7 @@ function EventQueue:build_payload(payload, event) if not payload then payload = {event} else - payload = table.insert(payload, event) + table.insert(payload, event) end return payload From ec9a15a4f12093fea18c521efe3b93005ecddd0d Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Fri, 24 Feb 2023 14:06:35 +0100 Subject: [PATCH 159/219] Update README.md --- stream-connectors/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stream-connectors/README.md b/stream-connectors/README.md index 0bfd9219ab8..c1aa110edf3 100644 --- a/stream-connectors/README.md +++ b/stream-connectors/README.md @@ -1,5 +1,7 @@ # Centreon Stream Connectors +The content of the repository has been moved to https://github.com/centreon/centreon-collect + [![Contributors][contributors-shield]][contributors-url] [![Stars][stars-shield]][stars-url] From da7aefcf9aa1cc8bfd0d59a249e1192fca636269 Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 28 Feb 2023 09:38:48 +0100 Subject: [PATCH 160/219] Mon 17276 fix poller filter (#135) * fix broken long output with line break * fix wrong poller filter --- .../sc_event.lua | 13 ++++--- ...eon-stream-connectors-lib-3.5.2-1.rockspec | 39 +++++++++++++++++++ 2 files changed, 46 insertions(+), 6 deletions(-) create mode 100644 stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 0c8ddce7ba3..1b0e75dd93c 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -680,11 +680,17 @@ end --- is_valid_poller: check if the event is monitored from an accepted poller -- @return true|false (boolean) function ScEvent:is_valid_poller() + -- return false if instance id is not found in cache + if not self.event.cache.host.instance_id then + self.sc_logger:warning("[sc_event:is_valid_poller]: no instance ID found for host ID: " .. tostring(self.event.host_id)) + return false + end + self.event.cache.poller = self.sc_broker:get_instance(self.event.cache.host.instance_id) -- required if we want to easily have access to poller name with macros {cache.instance.name} self.event.cache.instance = { - id = self.event.cache.host.instance, + id = self.event.cache.host.instance_id, name = self.event.cache.poller } @@ -693,11 +699,6 @@ function ScEvent:is_valid_poller() return true end - -- return false if instance id is not found in cache - if not self.event.cache.host.instance then - self.sc_logger:warning("[sc_event:is_valid_poller]: no instance ID found for host ID: " .. tostring(self.event.host_id)) - return false - end -- return false if no poller found in cache if not self.event.cache.poller then diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec new file mode 100644 index 00000000000..ead1eb34909 --- /dev/null +++ b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.5.2-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.5.2-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 40049ad5ed2bcf16a90af57624c5ebee634ab6da Mon Sep 17 00:00:00 2001 From: tcharles Date: Tue, 28 Feb 2023 09:48:30 +0100 Subject: [PATCH 161/219] fix rockspec v3.5 and 3.6 (#136) --- .../centreon-stream-connectors-lib-3.6.1-1.rockspec} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename stream-connectors/modules/specs/{3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec => 3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec} (98%) diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec similarity index 98% rename from stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec rename to stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec index ead1eb34909..44ca80f85ec 100644 --- a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.2-1.rockspec +++ b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec @@ -1,8 +1,8 @@ package = "centreon-stream-connectors-lib" -version = "3.5.2-1" +version = "3.6.1-1" source = { url = "git+https://github.com/centreon/centreon-stream-connector-scripts", - tag = "3.5.2-1" + tag = "3.6.1-1" } description = { summary = "Centreon stream connectors lua modules", From 46d1a3c86f12a616a6effd1791c14cdcc11a12b3 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Wed, 1 Mar 2023 11:54:11 +0100 Subject: [PATCH 162/219] chore(ci): introduce github workflows (#137) Refs: MON-16173 --- .../.github/actions/delivery/action.yml | 50 ++++ .../.github/docker/Dockerfile.packaging-alma8 | 12 + .../.github/docker/Dockerfile.packaging-alma9 | 12 + .../docker/Dockerfile.packaging-bullseye | 13 + .../.github/workflows/docker-packaging.yml | 45 +++ .../.github/workflows/get-environment.yml | 34 +++ .../stream-connectors-dependencies.yml | 266 ++++++++++++++++++ .../workflows/stream-connectors-lib.yml | 167 +++++++++++ .../.github/workflows/stream-connectors.yml | 258 +++++++++++++++++ .../lua-cffi/packaging/deb/control | 17 ++ .../lua-cffi/packaging/deb/copyright | 23 ++ .../lua-cffi/packaging/deb/install | 1 + .../dependencies/lua-cffi/packaging/deb/rules | 6 + .../lua-cffi/packaging/deb/source/format | 1 + .../lua-cffi/packaging/rpm/lua-cffi.spec | 47 ++++ .../lua-curl/packaging/rpm/lua-curl.spec | 44 +++ .../dependencies/lua-tz/packaging/deb/control | 15 + .../lua-tz/packaging/deb/copyright | 23 ++ .../dependencies/lua-tz/packaging/deb/dirs | 1 + .../dependencies/lua-tz/packaging/deb/install | 1 + .../dependencies/lua-tz/packaging/deb/rules | 8 + .../lua-tz/packaging/deb/source/format | 1 + .../lua-tz/packaging/rpm/lua-tz.spec | 43 +++ .../packaging/connectors-lib/deb/control | 18 ++ .../packaging/connectors-lib/deb/copyright | 23 ++ .../packaging/connectors-lib/deb/rules | 5 + .../connectors-lib/deb/source/format | 1 + .../rpm/centreon-stream-connectors-lib.spec | 44 +++ .../packaging/connectors/deb/control | 14 + .../packaging/connectors/deb/copyright | 23 ++ .../packaging/connectors/deb/install | 0 .../packaging/connectors/deb/rules | 7 + .../packaging/connectors/deb/source/format | 1 + .../packaging/connectors/rpm/connector.spec | 35 +++ 34 files changed, 1259 insertions(+) create mode 100644 stream-connectors/.github/actions/delivery/action.yml create mode 100644 stream-connectors/.github/docker/Dockerfile.packaging-alma8 create mode 100644 stream-connectors/.github/docker/Dockerfile.packaging-alma9 create mode 100644 stream-connectors/.github/docker/Dockerfile.packaging-bullseye create mode 100644 stream-connectors/.github/workflows/docker-packaging.yml create mode 100644 stream-connectors/.github/workflows/get-environment.yml create mode 100644 stream-connectors/.github/workflows/stream-connectors-dependencies.yml create mode 100644 stream-connectors/.github/workflows/stream-connectors-lib.yml create mode 100644 stream-connectors/.github/workflows/stream-connectors.yml create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/control create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/copyright create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/install create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/rules create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/source/format create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec create mode 100644 stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/control create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/copyright create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/dirs create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/install create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/rules create mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/source/format create mode 100644 stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec create mode 100644 stream-connectors/packaging/connectors-lib/deb/control create mode 100644 stream-connectors/packaging/connectors-lib/deb/copyright create mode 100644 stream-connectors/packaging/connectors-lib/deb/rules create mode 100644 stream-connectors/packaging/connectors-lib/deb/source/format create mode 100644 stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec create mode 100644 stream-connectors/packaging/connectors/deb/control create mode 100644 stream-connectors/packaging/connectors/deb/copyright create mode 100644 stream-connectors/packaging/connectors/deb/install create mode 100644 stream-connectors/packaging/connectors/deb/rules create mode 100644 stream-connectors/packaging/connectors/deb/source/format create mode 100644 stream-connectors/packaging/connectors/rpm/connector.spec diff --git a/stream-connectors/.github/actions/delivery/action.yml b/stream-connectors/.github/actions/delivery/action.yml new file mode 100644 index 00000000000..1ddc4278a2e --- /dev/null +++ b/stream-connectors/.github/actions/delivery/action.yml @@ -0,0 +1,50 @@ +name: "delivery" +description: "RPM and DEB packages delivery" +inputs: + distrib: + description: "The distribution used for packaging" + required: true + cache_key: + description: "The cached package key" + required: true + artifactory_token: + description: "The token for artifactory" + required: true + stability: + description: "branch stability (stable, testing, unstable, canary)" + required: true + +runs: + using: "composite" + steps: + - name: Build name for RPM + shell: bash + if: ${{ startsWith(inputs.distrib, 'el') }} + run: | + echo "extfile=rpm" >> $GITHUB_ENV + + - name: Build name for DEB + shell: bash + if: ${{ inputs.distrib == 'bullseye' }} + run: | + echo "extfile=deb" >> $GITHUB_ENV + + - name: Use cache files + uses: actions/cache@v3 + with: + path: ./*.${{ env.extfile }} + key: ${{ inputs.cache_key }} + + - uses: jfrog/setup-jfrog-cli@v3 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Deliver packages to artifactory + run: | + if [[ "${{ env.extfile }}" == "rpm" ]] ; then + jf rt upload "*.rpm" "rpm-connector-packs/${{ inputs.distrib }}/${{ inputs.stability }}/noarch/" + elif [[ "${{ env.extfile }}" == "deb" ]] ; then + jf rt upload "*.deb" "apt-connector-packs-${{ inputs.stability }}/pool/" --deb "${{ inputs.distrib }}/main/all" + fi + shell: bash diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-alma8 b/stream-connectors/.github/docker/Dockerfile.packaging-alma8 new file mode 100644 index 00000000000..22ea005e28d --- /dev/null +++ b/stream-connectors/.github/docker/Dockerfile.packaging-alma8 @@ -0,0 +1,12 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/almalinux:8 + +RUN <> $GITHUB_OUTPUT + shell: bash diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml new file mode 100644 index 00000000000..74db8b6b019 --- /dev/null +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -0,0 +1,266 @@ +name: stream-connectors-dependencies + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - dependencies/** + push: + branches: + - develop + - master + paths: + - dependencies/** + +jobs: + get-environment: + uses: ./.github/workflows/get-environment.yml + + package-rpm: + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + lib: [lua-cffi, lua-tz, lua-curl] + include: + - distrib: el8 + image: packaging-stream-connectors-alma8 + package_extension: rpm + - distrib: el9 + image: packaging-stream-connectors-alma9 + package_extension: rpm + + name: package ${{ matrix.distrib }} ${{ matrix.lib }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - uses: actions/checkout@v3 + + - name: Install dependencies + run: | + mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + + yum install -y yum-utils epel-release + yum config-manager --set-enabled crb || true # alma 9 + yum config-manager --set-enabled powertools || true # alma 8 + + yum install -y git make gcc luarocks meson gcc-c++ cmake libffi libffi-devel lua-devel libcurl-devel + shell: bash + + - if: ${{ matrix.lib == 'lua-cffi' }} + run: | + luarocks install cffi-lua + luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt + cat version.txt + + mkdir ${{ matrix.lib }} + find /usr/ -type f -name "cffi.so" -exec cp {} ${{ matrix.lib }}/ \; + tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - if: ${{ matrix.lib == 'lua-tz' }} + run: | + luarocks install luatz + luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt + cat version.txt + + mkdir ${{ matrix.lib }} + cp -rp /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/luatz/* ${{ matrix.lib }}/ + tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - if: ${{ matrix.lib == 'lua-curl' }} + run: | + luarocks install lua-curl + luarocks show lua-curl + luarocks show lua-curl | grep "Lua-cURL\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt + cat version.txt + + mkdir -p ${{ matrix.lib }}/cURL + cp -rp /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL ${{ matrix.lib }}/ + cp -p /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL.lua ${{ matrix.lib }}/ + tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - run: | + rpmbuild -ba packaging/rpm/${{ matrix.lib }}.spec -D "VERSION `cat version.txt`" + + mv ~/rpmbuild/RPMS/**/*.rpm ../../ + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} + + package-deb: + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + lib: [lua-cffi, lua-tz] + include: + - distrib: bullseye + image: packaging-stream-connectors-bullseye + package_extension: deb + + name: package ${{ matrix.distrib }} ${{ matrix.lib }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - uses: actions/checkout@v3 + + - name: Install dependencies + run: | + apt update + apt install -y git make gcc luarocks meson cmake libffi7 libffi-dev lua5.3 liblua5.3-dev + shell: bash + + - if: ${{ matrix.lib == 'lua-cffi' }} + name: Package + run: | + luarocks install cffi-lua + luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt + + PACKAGE_NAME="${{ matrix.lib }}" + PACKAGE_VERSION=`cat version.txt` + + mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian + + cp -rp packaging/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ + + find /usr/ -type f -name "cffi.so" -exec cp {} $PACKAGE_NAME-$PACKAGE_VERSION/ \; + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - if: ${{ matrix.lib == 'lua-tz' }} + name: Package + run: | + luarocks install luatz + luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt + + PACKAGE_NAME="${{ matrix.lib }}" + PACKAGE_VERSION=`cat version.txt` + + mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian + cp -rp packaging/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ + + cp -rp /usr/local/share/lua/5.3/luatz $PACKAGE_NAME-$PACKAGE_VERSION/luatz + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - name: Package + run: | + PACKAGE_NAME="${{ matrix.lib }}" + PACKAGE_VERSION=`cat version.txt` + + tar czf $PACKAGE_NAME-$PACKAGE_VERSION.tar.gz $PACKAGE_NAME-$PACKAGE_VERSION + + cd $PACKAGE_NAME-$PACKAGE_VERSION + + debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} + debuild-pbuilder --no-lintian + + mv ../*.deb ../../../ + working-directory: dependencies/${{ matrix.lib }} + shell: bash + + - uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} + + sign-rpm: + needs: [package-rpm] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + lib: [lua-cffi, lua-tz, lua-curl] + name: sign rpm ${{ matrix.distrib }} ${{ matrix.lib }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/rpm-signing:ubuntu + options: -t + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - run: apt-get install -y zstd + shell: bash + + - uses: actions/checkout@v3 + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} + + - run: echo "HOME=/root" >> $GITHUB_ENV + shell: bash + + - run: rpmsign --addsign ./*.rpm + shell: bash + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, sign-rpm] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + lib: [lua-cffi, lua-tz, lua-curl] + name: deliver ${{ matrix.distrib }} ${{ matrix.lib }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package-deb] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + lib: [lua-cffi, lua-tz] + name: deliver ${{ matrix.distrib }} ${{ matrix.lib }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.lib }}-${{ matrix.distrib }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml new file mode 100644 index 00000000000..f3035db3602 --- /dev/null +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -0,0 +1,167 @@ +name: stream-connectors-lib + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - modules/centreon-stream-connectors-lib/** + push: + branches: + - develop + - master + paths: + - modules/centreon-stream-connectors-lib/** + +jobs: + get-environment: + uses: ./.github/workflows/get-environment.yml + + package: + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9, bullseye] + include: + - distrib: el8 + image: packaging-stream-connectors-alma8 + package_extension: rpm + - distrib: el9 + image: packaging-stream-connectors-alma9 + package_extension: rpm + - distrib: bullseye + image: packaging-stream-connectors-bullseye + package_extension: deb + name: package ${{ matrix.distrib }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - uses: actions/checkout@v3 + + - if: ${{ matrix.package_extension == 'rpm' }} + run: | + yum install -y yum-utils + yum config-manager --set-enabled crb || true # alma 9 + yum config-manager --set-enabled powertools || true # alma 8 + yum install -y lua lua-devel + mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + + cd modules + tar czf ~/rpmbuild/SOURCES/centreon-stream-connectors-lib.tar.gz centreon-stream-connectors-lib + cd .. + + rpmbuild -ba packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec + + mv ~/rpmbuild/RPMS/**/*.rpm ./ + shell: bash + + - if: ${{ matrix.package_extension == 'deb' }} + run: | + PACKAGE_VERSION=`cat packaging/connectors-lib/deb/control | grep "^Version:" | cut -d" " -f2` + mkdir -p centreon-stream-connectors-lib-$PACKAGE_VERSION/debian + + cp -rp modules/centreon-stream-connectors-lib/* centreon-stream-connectors-lib-$PACKAGE_VERSION/ + cp -rp packaging/connectors-lib/deb/* centreon-stream-connectors-lib-$PACKAGE_VERSION/debian/ + + tar czf centreon-stream-connectors-lib-$PACKAGE_VERSION.tar.gz centreon-stream-connectors-lib-$PACKAGE_VERSION + + cd centreon-stream-connectors-lib-$PACKAGE_VERSION + + debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} + debuild-pbuilder --no-lintian + shell: bash + + - if: ${{ matrix.package_extension == 'deb' }} + uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + + - if: ${{ matrix.package_extension == 'rpm' }} + uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + + sign-rpm: + needs: [package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: sign rpm ${{ matrix.distrib }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/rpm-signing:ubuntu + options: -t + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - run: apt-get install -y zstd + shell: bash + + - uses: actions/checkout@v3 + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + + - run: echo "HOME=/root" >> $GITHUB_ENV + shell: bash + + - run: rpmsign --addsign ./*.rpm + shell: bash + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, sign-rpm] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml new file mode 100644 index 00000000000..a49f468b98a --- /dev/null +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -0,0 +1,258 @@ +name: stream-connectors + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - centreon-certified/** + push: + branches: + - develop + - master + paths: + - centreon-certified/** + +jobs: + get-environment: + uses: ./.github/workflows/get-environment.yml + + detect-changes: + runs-on: ubuntu-22.04 + outputs: + connectors: ${{ steps.list-connectors.outputs.connectors }} + steps: + - uses: actions/checkout@v3 + + - uses: dorny/paths-filter@v2 + id: filter + with: + base: ${{ github.ref }} + list-files: shell + filters: | + connectors: + - centreon-certified/** + + - name: transform to directories + id: list-connectors + run: | + folders=() + for f in ${{ steps.filter.outputs.connectors_files }}; do + DIR_NAME=($(dirname $f)) + BASE_NAME=($(basename $DIR_NAME)) + echo "Adding $BASE_NAME to folders" + folders+=($BASE_NAME) + done + unique_folders=($(printf "%s\n" "${folders[@]}" | sort -u | tr '\n' ' ')) + echo $unique_folders + echo "connectors=$(jq --compact-output --null-input '$ARGS.positional' --args -- ${unique_folders[@]})" >> $GITHUB_OUTPUT + shell: bash + + package: + if: ${{ needs.detect-changes.outputs.connectors != '[]' }} + needs: [detect-changes] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9, bullseye] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + include: + - distrib: el8 + image: packaging-stream-connectors-alma8 + package_extension: rpm + - distrib: el9 + image: packaging-stream-connectors-alma9 + package_extension: rpm + - distrib: bullseye + image: packaging-stream-connectors-bullseye + package_extension: deb + + name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - uses: actions/checkout@v3 + + - name: Add specific dependencies + id: list-dependencies + run: | + MIN_LIB_VERSION="3.0.0" + DEB_DEPENDENCIES="" + RPM_DEPENDENCIES="" + if [ "${{ matrix.connector_path }}" = "kafka" ]; then + DEB_DEPENDENCIES="librdkafka1,lua-cffi" + RPM_DEPENDENCIES="librdkafka,lua-cffi" + elif [ "${{ matrix.connector_path }}" = "pagerduty" ]; then + DEB_DEPENDENCIES="lua-tz" + RPM_DEPENDENCIES="lua-tz" + elif [ "${{ matrix.connector_path }}" = "splunk" ]; then + DEB_DEPENDENCIES="lua-tz" + RPM_DEPENDENCIES="lua-tz" + fi + echo "min_lib_version=$MIN_LIB_VERSION" >> $GITHUB_OUTPUT + echo "deb_dependencies=$DEB_DEPENDENCIES" >> $GITHUB_OUTPUT + echo "rpm_dependencies=$RPM_DEPENDENCIES" >> $GITHUB_OUTPUT + shell: bash + + - if: ${{ matrix.package_extension == 'rpm' }} + run: | + mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + + PACKAGE_NAME="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" + PACKAGE_VERSION=`date '+%Y%m%d'` + + mkdir $PACKAGE_NAME + cp centreon-certified/${{ matrix.connector_path }}/*.lua $PACKAGE_NAME/ + + tar czf ~/rpmbuild/SOURCES/$PACKAGE_NAME.tar.gz $PACKAGE_NAME + + touch dependencies.txt + if [ ! -z "${{ steps.list-dependencies.outputs.rpm_dependencies }}" ]; then + for dependency in "${{ steps.list-dependencies.outputs.rpm_dependencies }}"; do + echo "Requires: $dependency" >> dependencies.txt + done + fi + sed -i '/Requires:/r dependencies.txt' packaging/connectors/rpm/connector.spec + + touch files.txt + for file in $PACKAGE_NAME/*.lua; do + echo "%{_datadir}/centreon-broker/lua/`basename $file`" >> files.txt + done + sed -i '/%files/r files.txt' packaging/connectors/rpm/connector.spec + + rpmbuild -ba packaging/connectors/rpm/connector.spec -D "PACKAGE_NAME $PACKAGE_NAME" -D "VERSION $PACKAGE_VERSION" -D "MIN_LIB_VERSION ${{ steps.list-dependencies.outputs.min_lib_version }}" + + mv ~/rpmbuild/RPMS/**/*.rpm ./ + shell: bash + + - if: ${{ matrix.package_extension == 'deb' }} + run: | + PACKAGE_NAME="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" + PACKAGE_VERSION=`date '+%Y%m%d'` + + mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian + + cp centreon-certified/${{ matrix.connector_path }}/*.lua $PACKAGE_NAME-$PACKAGE_VERSION/ + + sed -i "s#@PACKAGE_NAME@#$PACKAGE_NAME#g" packaging/connectors/deb/control + sed -i "s#@MIN_LIB_VERSION@#${{ steps.list-dependencies.outputs.min_lib_version }}#g" packaging/connectors/deb/control + + touch dependencies.txt + if [ ! -z "${{ steps.list-dependencies.outputs.deb_dependencies }}" ]; then + for dependency in "${{ steps.list-dependencies.outputs.deb_dependencies }}"; do + echo " $dependency," >> dependencies.txt + done + fi + sed -i '/^Depends:/r dependencies.txt' packaging/connectors/deb/control + + for file in $PACKAGE_NAME-$PACKAGE_VERSION/*.lua; do + echo "`basename $file` /usr/share/centreon-broker/lua/`basename $file`" >> packaging/connectors/deb/install + done + + cp -rp packaging/connectors/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ + + tar czf $PACKAGE_NAME-$PACKAGE_VERSION.tar.gz $PACKAGE_NAME-$PACKAGE_VERSION + + cd $PACKAGE_NAME-$PACKAGE_VERSION + + debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} + debuild-pbuilder --no-lintian + shell: bash + + - if: ${{ matrix.package_extension == 'deb' }} + uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} + + - if: ${{ matrix.package_extension == 'rpm' }} + uses: actions/cache@v3 + with: + path: ./*.${{ matrix.package_extension }} + key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} + + sign-rpm: + needs: [detect-changes, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + name: sign rpm ${{ matrix.distrib }} ${{ matrix.connector_path }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/rpm-signing:ubuntu + options: -t + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + - run: apt-get install -y zstd + shell: bash + + - uses: actions/checkout@v3 + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + + - run: echo "HOME=/root" >> $GITHUB_ENV + shell: bash + + - run: rpmsign --addsign ./*.rpm + shell: bash + + - uses: actions/cache@v3 + with: + path: ./*.rpm + key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, detect-changes, sign-rpm] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, detect-changes, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} + + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.connector_path }}-${{ matrix.distrib }} diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/control b/stream-connectors/dependencies/lua-cffi/packaging/deb/control new file mode 100644 index 00000000000..1ce8a9278d9 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/deb/control @@ -0,0 +1,17 @@ +Source: lua-cffi +Section: interpreters +Priority: optional +Maintainer: Centreon +Build-Depends: + debhelper-compat (=12), + dh-lua (>= 21) +Standards-Version: 4.5.0 +Homepage: https://wwww.centreon.com + +Package: lua-cffi +Architecture: all +Depends: + libffi7, + libffi-dev, + lua5.3 +Description: lua cffi library diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright b/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright new file mode 100644 index 00000000000..25875f802b4 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright @@ -0,0 +1,23 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: lua-cffi +Upstream-Contact: Centreon +Source: https://www.centreon.com + +Files: * +Copyright: 2023 Centreon +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". + diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/install b/stream-connectors/dependencies/lua-cffi/packaging/deb/install new file mode 100644 index 00000000000..5eb40401080 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/deb/install @@ -0,0 +1 @@ +cffi.so usr/lib64/lua/5.3 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/rules b/stream-connectors/dependencies/lua-cffi/packaging/deb/rules new file mode 100644 index 00000000000..d8309f67d01 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/deb/rules @@ -0,0 +1,6 @@ +#!/usr/bin/make -f + +export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +%: + dh $@ diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format b/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format new file mode 100644 index 00000000000..163aaf8d82b --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec b/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec new file mode 100644 index 00000000000..9f5251d6c2f --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec @@ -0,0 +1,47 @@ +%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} +%global luapkgdir %{_datadir}/lua/%{luaver} +%global lualibdir %{_libdir}/lua/%{luaver} +%global debug_package %{nil} + +Name: lua-cffi +Version: %{VERSION} +Release: 1%{?dist} +Summary: lua cffi + +Group: Applications/System +License: Apache-2.0 +URL: https://www.centreon.com +Packager: Centreon +Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ + +Source0: %{name}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: lua +BuildRequires: lua-devel +BuildRequires: libffi +BuildRequires: libffi-devel + +Requires: lua +Requires: libffi +Requires: libffi-devel + +%description +lua cffi library + +%prep +%setup -q -n %{name} + +%build + +%install +%{__install} -d $RPM_BUILD_ROOT%{lualibdir} +%{__cp} -p ./cffi.so $RPM_BUILD_ROOT%{lualibdir}/cffi.so + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files +%{lualibdir}/cffi.so + +%changelog diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec new file mode 100644 index 00000000000..23e6862d184 --- /dev/null +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -0,0 +1,44 @@ +%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} +%global luapkgdir %{_datadir}/lua/%{luaver} +%global lualibdir %{_libdir}/lua/%{luaver} +%global debug_package %{nil} + +Name: lua-curl +Version: %{VERSION} +Release: 1%{?dist} +Summary: lua curl + +Group: Applications/System +License: Apache-2.0 +URL: https://www.centreon.com +Packager: Centreon +Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ + +Source0: %{name}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: lua +BuildRequires: lua-devel + +Requires: lua + +%description +lua curl library + +%prep +%setup -q -n %{name} + +%build + +%install +%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/cURL +%{__cp} -rp ./* $RPM_BUILD_ROOT%{luapkgdir}/ + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files +%{luapkgdir}/cURL.lua +%{luapkgdir}/cURL + +%changelog diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/control b/stream-connectors/dependencies/lua-tz/packaging/deb/control new file mode 100644 index 00000000000..54c7a1ed33a --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/control @@ -0,0 +1,15 @@ +Source: lua-tz +Section: interpreters +Priority: optional +Maintainer: Centreon +Build-Depends: + debhelper-compat (=12), + dh-lua (>= 21) +Standards-Version: 4.5.0 +Homepage: https://wwww.centreon.com + +Package: lua-tz +Architecture: all +Depends: + lua5.3 +Description: lua tz library diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/copyright b/stream-connectors/dependencies/lua-tz/packaging/deb/copyright new file mode 100644 index 00000000000..f495613e125 --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/copyright @@ -0,0 +1,23 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: lua-tz +Upstream-Contact: Centreon +Source: https://www.centreon.com + +Files: * +Copyright: 2023 Centreon +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". + diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/dirs b/stream-connectors/dependencies/lua-tz/packaging/deb/dirs new file mode 100644 index 00000000000..38d80e9ce7e --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/dirs @@ -0,0 +1 @@ +/usr/local/share/lua/5.3/luatz diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/install b/stream-connectors/dependencies/lua-tz/packaging/deb/install new file mode 100644 index 00000000000..ac189cc0100 --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/install @@ -0,0 +1 @@ +luatz/* /usr/local/share/lua/5.3/luatz diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/rules b/stream-connectors/dependencies/lua-tz/packaging/deb/rules new file mode 100644 index 00000000000..4c83552dc55 --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/rules @@ -0,0 +1,8 @@ +#!/usr/bin/make -f + +export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +%: + dh $@ + +override_dh_usrlocal: diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/source/format b/stream-connectors/dependencies/lua-tz/packaging/deb/source/format new file mode 100644 index 00000000000..163aaf8d82b --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/deb/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec b/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec new file mode 100644 index 00000000000..8477f42447f --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec @@ -0,0 +1,43 @@ +%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} +%global luapkgdir %{_datadir}/lua/%{luaver} +%global lualibdir %{_libdir}/lua/%{luaver} +%global debug_package %{nil} + +Name: lua-tz +Version: %{VERSION} +Release: 1%{?dist} +Summary: lua tz + +Group: Applications/System +License: Apache-2.0 +URL: https://www.centreon.com +Packager: Centreon +Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ + +Source0: %{name}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: lua +BuildRequires: lua-devel + +Requires: lua + +%description +lua tz library + +%prep +%setup -q -n %{name} + +%build + +%install +%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/luatz +%{__cp} -p ./* $RPM_BUILD_ROOT%{luapkgdir}/luatz + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files +%{luapkgdir}/luatz + +%changelog diff --git a/stream-connectors/packaging/connectors-lib/deb/control b/stream-connectors/packaging/connectors-lib/deb/control new file mode 100644 index 00000000000..8a366c6385e --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/deb/control @@ -0,0 +1,18 @@ +Source: centreon-stream-connectors-lib +Section: interpreters +Priority: optional +Maintainer: Centreon +Version: 3.6.0 +Build-Depends: + debhelper-compat (=12), + dh-lua (>= 21) +Standards-Version: 4.5.0 +Homepage: https://wwww.centreon.com + +Package: centreon-stream-connectors-lib +Architecture: all +Depends: + centreon-broker-core (>= 22.04.0), + lua-socket (>= 3.0~), + lua-curl +Description: Centreon stream connectors lib for lua modules diff --git a/stream-connectors/packaging/connectors-lib/deb/copyright b/stream-connectors/packaging/connectors-lib/deb/copyright new file mode 100644 index 00000000000..e874fbccb2b --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/deb/copyright @@ -0,0 +1,23 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: centreon-stream-connectors-lib +Upstream-Contact: Centreon +Source: https://www.centreon.com + +Files: * +Copyright: 2023 Centreon +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". + diff --git a/stream-connectors/packaging/connectors-lib/deb/rules b/stream-connectors/packaging/connectors-lib/deb/rules new file mode 100644 index 00000000000..edf5c6cdafc --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/deb/rules @@ -0,0 +1,5 @@ +#!/usr/bin/make -f + +%: + dh $@ --buildsystem=lua --with lua + diff --git a/stream-connectors/packaging/connectors-lib/deb/source/format b/stream-connectors/packaging/connectors-lib/deb/source/format new file mode 100644 index 00000000000..163aaf8d82b --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/deb/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec b/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec new file mode 100644 index 00000000000..e07fde0e8d2 --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec @@ -0,0 +1,44 @@ +%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} +%global luapkgdir %{_datadir}/lua/%{luaver} + +Name: centreon-stream-connectors-lib +Version: 3.6.0 +Release: 1%{?dist} +Summary: Centreon stream connectors lua modules + +Group: Applications/System +License: Apache-2.0 +URL: https://www.centreon.com +Packager: Centreon +Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ + +Source0: %{name}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildArch: noarch + +BuildRequires: lua +BuildRequires: lua-devel + +Requires: centreon-broker-core >= 22.04.0 +Requires: lua-socket >= 3.0 +Requires: lua-curl + +%description +Those modules provides helpful methods to create stream connectors for Centreon + +%prep +%setup -q -n %{name} + +%build + +%install +%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/centreon-stream-connectors-lib +%{__cp} -pr ./* $RPM_BUILD_ROOT%{luapkgdir}/centreon-stream-connectors-lib + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files +%{luapkgdir}/centreon-stream-connectors-lib + +%changelog diff --git a/stream-connectors/packaging/connectors/deb/control b/stream-connectors/packaging/connectors/deb/control new file mode 100644 index 00000000000..a407c3e92eb --- /dev/null +++ b/stream-connectors/packaging/connectors/deb/control @@ -0,0 +1,14 @@ +Source: @PACKAGE_NAME@ +Section: interpreters +Priority: optional +Maintainer: Centreon +Build-Depends: + debhelper-compat (=12) +Standards-Version: 4.5.0 +Homepage: https://wwww.centreon.com + +Package: @PACKAGE_NAME@ +Architecture: all +Depends: + centreon-stream-connectors-lib (>=@MIN_LIB_VERSION@) +Description: Centreon stream connectors lua modules diff --git a/stream-connectors/packaging/connectors/deb/copyright b/stream-connectors/packaging/connectors/deb/copyright new file mode 100644 index 00000000000..e874fbccb2b --- /dev/null +++ b/stream-connectors/packaging/connectors/deb/copyright @@ -0,0 +1,23 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: centreon-stream-connectors-lib +Upstream-Contact: Centreon +Source: https://www.centreon.com + +Files: * +Copyright: 2023 Centreon +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + https://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". + diff --git a/stream-connectors/packaging/connectors/deb/install b/stream-connectors/packaging/connectors/deb/install new file mode 100644 index 00000000000..e69de29bb2d diff --git a/stream-connectors/packaging/connectors/deb/rules b/stream-connectors/packaging/connectors/deb/rules new file mode 100644 index 00000000000..d1cbe832789 --- /dev/null +++ b/stream-connectors/packaging/connectors/deb/rules @@ -0,0 +1,7 @@ +#!/usr/bin/make -f + +export DEB_BUILD_MAINT_OPTIONS = hardening=+all + +%: + dh $@ + diff --git a/stream-connectors/packaging/connectors/deb/source/format b/stream-connectors/packaging/connectors/deb/source/format new file mode 100644 index 00000000000..163aaf8d82b --- /dev/null +++ b/stream-connectors/packaging/connectors/deb/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/stream-connectors/packaging/connectors/rpm/connector.spec b/stream-connectors/packaging/connectors/rpm/connector.spec new file mode 100644 index 00000000000..e27629187f9 --- /dev/null +++ b/stream-connectors/packaging/connectors/rpm/connector.spec @@ -0,0 +1,35 @@ +Name: %{PACKAGE_NAME} +Version: %{VERSION} +Release: 1%{?dist} +Summary: Centreon stream connectors lua modules + +Group: Applications/System +License: Apache-2.0 +URL: https://www.centreon.com +Packager: Centreon +Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ + +Source0: %{name}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildArch: noarch + +Requires: centreon-stream-connectors-lib >= %{MIN_LIB_VERSION} + +%description +Those modules provides helpful methods to create stream connectors for Centreon + +%prep +%setup -q -n %{name} + +%build + +%install +%{__install} -d $RPM_BUILD_ROOT%{_datadir}/centreon-broker/lua +%{__cp} -pr ./*.lua $RPM_BUILD_ROOT%{_datadir}/centreon-broker/lua + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files + +%changelog From b456abe63fb0dd800f6b084aa25e290fe44d6f3a Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 2 Mar 2023 17:03:15 +0100 Subject: [PATCH 163/219] add opsgenie apiv2 (#139) --- .../opsgenie/opsgenie-events-apiv2.lua | 487 ++++++++++++++++++ 1 file changed, 487 insertions(+) create mode 100644 stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua new file mode 100644 index 00000000000..43b8d0df4b0 --- /dev/null +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -0,0 +1,487 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Opsgenie Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "app_api_token" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/opsgenie-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.app_api_token = params.app_api_token + self.sc_params.params.integration_api_token = params.integration_api_token + self.sc_params.params.api_url = params.api_url or "https://api.opsgenie.com" + self.sc_params.params.alerts_api_endpoint = params.alerts_api_endpoint or "/v2/alerts" + self.sc_params.params.incident_api_endpoint = params.incident_api_endpoint or "/v1/incidents/create" + self.sc_params.params.ba_incident_tags = params.ba_incident_tags or "centreon,application" + self.sc_params.params.enable_incident_tags = params.enable_incident_tags or 1 + self.sc_params.params.get_bv = params.get_bv or 1 + self.sc_params.params.enable_severity = params.enable_severity or 0 + self.sc_params.params.default_priority = params.default_priority + self.sc_params.params.priority_mapping = params.priority_mapping or "P1=1,P2=2,P3=3,P4=4,P5=5" + self.sc_params.params.opsgenie_priorities = params.opsgenie_priorities or "P1,P2,P3,P4,P5" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.timestamp_conversion_format = params.timestamp_conversion_format or "%Y-%m-%d %H:%M:%S" + + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- need a queue for each type of event because ba status aren't sent on the same endpoint + self.sc_params.params.send_mixed_events = 0 + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.state_to_alert_type_mapping = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = "info", + [1] = "error", + [2] = "warning" + }, + [elements.service_status.id] = { + [0] = "info", + [1] = "warning", + [2] = "error", + [3] = "warning" + } + } + } + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = { + [elements.ba_status.id] = function () return self:format_event_ba() end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- handle metadatas for queues + self.sc_flush:add_queue_metadata( + categories.neb.id, + elements.host_status.id, + { + api_endpoint = self.sc_params.params.alerts_api_endpoint, + token = self.sc_params.params.app_api_token + } + ) + self.sc_flush:add_queue_metadata( + categories.neb.id, + elements.service_status.id, + { + api_endpoint = self.sc_params.params.alerts_api_endpoint, + token = self.sc_params.params.app_api_token + } + ) + + -- handle opsgenie priority mapping + local severity_to_priority = {} + self.priority_mapping = {} + + if self.sc_params.params.enable_severity == 1 then + self.priority_matching_list = self.sc_common:split(self.sc_params.params.priority_matching, ',') + + for _, priority_group in ipairs(self.priority_matching_list) do + severity_to_priority = self.sc_common:split(priority_group, '=') + + -- + if string.match(self.sc_params.params.opsgenie_priorities, severity_to_priority[1]) == nil then + self.sc_logger:error("[EvenQueue.new]: severity is enabled but the priority configuration is wrong. configured matching: " + .. self.sc_params.params.priority_matching_list .. ", invalid parsed priority: " .. severity_to_priority[1] + .. ", known Opsgenie priorities: " .. self.sc_params.params.opsgenie_priorities + .. ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") + break + end + + self.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + end + end + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +function EventQueue:get_priority() + local severity = nil + local event = self.sc_event.event + local params = self.sc_params.params + + -- get appropriate severity depending on event type (service|host) + if event.service_id == nil then + self.sc_logger:debug("[EventQueue:get_priority]: getting severity for host: " .. event.host_id) + severity = event.cache.severity.host + else + self.sc_logger:debug("[EventQueue:get_priority]: getting severity for service: " .. event.service_id) + severity = event.cache.severity.service + end + + -- find the opsgenie priority depending on the found severity + local matching_priority = self.priority_mapping[tostring(severity)] + + if not matching_priority then + return params.default_priority + end + + return matching_priority +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-- https://docs.opsgenie.com/docs/alert-api#create-alert +function EventQueue:format_event_host() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(os.date(self.sc_params.params.timestamp_conversion_format, event.last_update) + .. " " .. event.cache.host.name .. " is " .. state, 1, 130), + description = string.sub(event.output, 1, 15000), + alias = string.sub(event.cache.host.name .. "_" .. state, 1, 512) + } + + local priority = self:get_priority() + if priority then + self.sc_event.event.formated_event.priority = priority + end +end + +-- https://docs.opsgenie.com/docs/alert-api#create-alert +function EventQueue:format_event_service() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(os.date(self.sc_params.params.timestamp_conversion_format, event.last_update) + .. " " .. event.cache.host.name .. " // " .. event.cache.service.description .. " is " .. state, 1, 130), + description = string.sub(event.output, 1, 15000), + alias = string.sub(event.cache.host.name .. "_" .. event.cache.service.description .. "_" .. state, 1, 512) + } + + local priority = self:get_priority() + if priority then + self.sc_event.event.formated_event.priority = priority + end +end + +-- https://docs.opsgenie.com/docs/incident-api#create-incident +function EventQueue:format_event_ba() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(event.cache.ba.ba_name .. " is " .. state .. ", health level reached " .. event.level_nominal, 1, 130) + } + + if self.sc_params.params.enable_incident_tags == 1 then + local tags = {} + + for _, bv_info in ipairs(event.cache.bvs) do + -- can't have more than 20 tags + if #tags < 50 then + self.sc_logger:info("[EventQueue:format_event_ba]: add bv name: " .. tostring(bv_info.bv_name) .. " to list of tags") + table.insert(tags, string.sub(bv_info.bv_name, 1, 50)) + end + end + + local custom_tags = self.sc_common:split(self.sc_params.params.ba_incident_tags, ",") + for _, tag_name in ipairs(custom_tags) do + -- can't have more than 20 tags + if #tags < 20 then + self.sc_logger:info("[EventQueue:format_event_ba]: add custom tag: " .. tostring(tag_name) .. " to list of tags") + table.insert(tags, string.sub(tag_name, 1, 50)) + end + end + + self.sc_event.formated_event.tags = tags + end + +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. "," .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.api_url .. queue_metadata.api_endpoint + queue_metadata.headers = { + "content-type: application/json", + "accept: application/json", + "Authorization: GenieKey " .. queue_metadata.token + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Opsgenie address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + + -- according to opsgenie documentation "Create alert requests are processed asynchronously, therefore valid requests are responded with HTTP status 202 - Accepted" + -- https://docs.opsgenie.com/docs/alert-api#create-alert + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end From 85e52c1cfb2617b1a7cbd914a042ff95713cc0d7 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Fri, 24 Mar 2023 10:44:50 +0100 Subject: [PATCH 164/219] fix(delivery): restore release with timestamp/hash on unstable (#141) --- .../.github/actions/deb-delivery/action.yml | 43 +++++++++++ .../.github/actions/delivery/action.yml | 50 ------------- .../.github/actions/rpm-delivery/action.yml | 72 +++++++++++++++++++ .../stream-connectors-dependencies.yml | 7 +- .../workflows/stream-connectors-lib.yml | 7 +- .../.github/workflows/stream-connectors.yml | 7 +- 6 files changed, 130 insertions(+), 56 deletions(-) create mode 100644 stream-connectors/.github/actions/deb-delivery/action.yml delete mode 100644 stream-connectors/.github/actions/delivery/action.yml create mode 100644 stream-connectors/.github/actions/rpm-delivery/action.yml diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml new file mode 100644 index 00000000000..6b3ef265a1e --- /dev/null +++ b/stream-connectors/.github/actions/deb-delivery/action.yml @@ -0,0 +1,43 @@ +name: "deb-delivery" +description: "Deliver deb packages" +inputs: + distrib: + description: "The distribution used for packaging" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + +runs: + using: "composite" + steps: + - name: Use cache DEB files + uses: actions/cache@v3 + with: + path: ./*.deb + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@v3 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish DEBs + run: | + FILES="*.deb" + + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) + + jf rt upload "$FILE" "apt-plugins-${{ inputs.stability }}/pool/" --deb "${{ inputs.distrib }}/main/$ARCH" + done + shell: bash diff --git a/stream-connectors/.github/actions/delivery/action.yml b/stream-connectors/.github/actions/delivery/action.yml deleted file mode 100644 index 1ddc4278a2e..00000000000 --- a/stream-connectors/.github/actions/delivery/action.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: "delivery" -description: "RPM and DEB packages delivery" -inputs: - distrib: - description: "The distribution used for packaging" - required: true - cache_key: - description: "The cached package key" - required: true - artifactory_token: - description: "The token for artifactory" - required: true - stability: - description: "branch stability (stable, testing, unstable, canary)" - required: true - -runs: - using: "composite" - steps: - - name: Build name for RPM - shell: bash - if: ${{ startsWith(inputs.distrib, 'el') }} - run: | - echo "extfile=rpm" >> $GITHUB_ENV - - - name: Build name for DEB - shell: bash - if: ${{ inputs.distrib == 'bullseye' }} - run: | - echo "extfile=deb" >> $GITHUB_ENV - - - name: Use cache files - uses: actions/cache@v3 - with: - path: ./*.${{ env.extfile }} - key: ${{ inputs.cache_key }} - - - uses: jfrog/setup-jfrog-cli@v3 - env: - JF_URL: https://centreon.jfrog.io - JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - - name: Deliver packages to artifactory - run: | - if [[ "${{ env.extfile }}" == "rpm" ]] ; then - jf rt upload "*.rpm" "rpm-connector-packs/${{ inputs.distrib }}/${{ inputs.stability }}/noarch/" - elif [[ "${{ env.extfile }}" == "deb" ]] ; then - jf rt upload "*.deb" "apt-connector-packs-${{ inputs.stability }}/pool/" --deb "${{ inputs.distrib }}/main/all" - fi - shell: bash diff --git a/stream-connectors/.github/actions/rpm-delivery/action.yml b/stream-connectors/.github/actions/rpm-delivery/action.yml new file mode 100644 index 00000000000..17ae85ca21a --- /dev/null +++ b/stream-connectors/.github/actions/rpm-delivery/action.yml @@ -0,0 +1,72 @@ +name: "rpm-delivery" +description: "Deliver rpm packages" +inputs: + module_name: + description: "The package module name" + required: true + distrib: + description: "The distribution used for packaging" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + +runs: + using: "composite" + steps: + - name: Use cache RPM files + uses: actions/cache@v3 + with: + path: ./*.rpm + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@v3 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish RPMs + run: | + FILES="*.rpm" + + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + + if [ -z "${{ inputs.module_name }}" ]; then + echo "module name is required" + exit 1 + fi + + if [ -z "${{ inputs.distrib }}" ]; then + echo "distrib is required" + exit 1 + fi + + mkdir noarch x86_64 + + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + ARCH=$(echo $FILE | grep -oP '(x86_64|noarch)') + + echo "[DEBUG] - Arch: $ARCH" + + mv "$FILE" "$ARCH" + done + + for ARCH in "noarch" "x86_64"; do + if [ "$(ls -A $ARCH)" ]; then + if [ "${{ inputs.stability }}" == "stable" ]; then + jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/" --flat + else + jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + fi + fi + done + shell: bash diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index 74db8b6b019..606750b948c 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -238,11 +238,13 @@ jobs: uses: actions/checkout@v3 - name: Publish RPM packages - uses: ./.github/actions/delivery + uses: ./.github/actions/rpm-delivery with: + module_name: stream-connectors-dependencies distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} @@ -259,8 +261,9 @@ jobs: uses: actions/checkout@v3 - name: Publish DEB packages - uses: ./.github/actions/delivery + uses: ./.github/actions/deb-delivery with: distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.lib }}-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index f3035db3602..93f47575cba 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -140,11 +140,13 @@ jobs: uses: actions/checkout@v3 - name: Publish RPM packages - uses: ./.github/actions/delivery + uses: ./.github/actions/rpm-delivery with: + module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} @@ -160,8 +162,9 @@ jobs: uses: actions/checkout@v3 - name: Publish DEB packages - uses: ./.github/actions/delivery + uses: ./.github/actions/deb-delivery with: distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index a49f468b98a..9fc33dc28e1 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -230,11 +230,13 @@ jobs: uses: actions/checkout@v3 - name: Publish RPM packages - uses: ./.github/actions/delivery + uses: ./.github/actions/rpm-delivery with: + module_name: stream-connectors distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-deb: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} @@ -251,8 +253,9 @@ jobs: uses: actions/checkout@v3 - name: Publish DEB packages - uses: ./.github/actions/delivery + uses: ./.github/actions/deb-delivery with: distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.connector_path }}-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} From bd7b445c7ce28c02525f7743cc926e24a0ad002e Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 3 Apr 2023 14:53:33 +0200 Subject: [PATCH 165/219] fix(ci): fix branch name in get environment workflow (#142) --- stream-connectors/.github/workflows/get-environment.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stream-connectors/.github/workflows/get-environment.yml b/stream-connectors/.github/workflows/get-environment.yml index dcdcb547fd4..8717e0ba891 100644 --- a/stream-connectors/.github/workflows/get-environment.yml +++ b/stream-connectors/.github/workflows/get-environment.yml @@ -16,6 +16,12 @@ jobs: - id: get_environment run: | + if [[ -z "$GITHUB_HEAD_REF" ]]; then + BRANCHNAME="$GITHUB_REF_NAME" + else + BRANCHNAME="$GITHUB_HEAD_REF" + fi + case "$BRANCHNAME" in develop | dev-[2-9][0-9].[0-9][0-9].x) STABILITY="unstable" From 4dd6f3176c58226288e32f6d5f2176bec94e9e47 Mon Sep 17 00:00:00 2001 From: interstar001 Date: Wed, 19 Apr 2023 09:29:37 +0200 Subject: [PATCH 166/219] Update servicenow-em-events-apiv2.lua (#138) add http_server_url parm to use self hostet ServiceNow platform set http_server_url in endpoint string change self.proxy_username to self.sc_params.params.proxy_username for use proxy server with auth --- .../servicenow/servicenow-em-events-apiv2.lua | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 3eac03dc8e7..28b98029d9a 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -61,6 +61,7 @@ function EventQueue.new (params) self.sc_params.params.client_secret = params.client_secret self.sc_params.params.username = params.username self.sc_params.params.password = params.password + self.sc_params.params.http_server_url = params.http_server_url or "service-now.com" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" @@ -237,7 +238,7 @@ function EventQueue:call(url, method, data, authToken) } end - local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. ".service-now.com/" .. tostring(url) + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. "." .. self.sc_params.params.http_server_url .. "/" .. tostring(url) self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) self.sc_logger:log_curl_command(endpoint, queue_metadata, self.sc_params.params, data) @@ -271,7 +272,7 @@ function EventQueue:call(url, method, data, authToken) -- set proxy user configuration if (self.sc_params.params.proxy_username ~= '') then if (self.sc_params.params.proxy_password ~= '') then - request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.sc_params.params.proxy_password) + request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) else self.sc_logger:error("EventQueue:call: proxy_password parameter is not set but proxy_username is used") end From 27ad4245358e40c57cc3bf9c2bb2084e8859de24 Mon Sep 17 00:00:00 2001 From: tcharles Date: Thu, 20 Apr 2023 10:02:59 +0200 Subject: [PATCH 167/219] fix bad url handling for log curl command (#143) --- .../centreon-certified/logstash/logstash-events-apiv2.lua | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index 18203d988a5..46bfdd090ef 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -193,6 +193,7 @@ end function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port queue_metadata.headers = {"accept: application/json"} queue_metadata.method = "PUT" self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) @@ -204,11 +205,11 @@ function EventQueue:send_data(payload, queue_metadata) end self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) - self.sc_logger:info("[EventQueue:send_data]: Logstash address is: " .. tostring(self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port)) + self.sc_logger:info("[EventQueue:send_data]: Logstash address is: " .. tostring(url)) local http_response_body = "" local http_request = curl.easy() - :setopt_url(self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port) + :setopt_url(url) :setopt_writefunction( function (response) http_response_body = http_response_body .. tostring(response) From 757dfd5adbbeb56e67ea4a63eb1fb31a3b270ef2 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Thu, 20 Apr 2023 17:24:40 +0200 Subject: [PATCH 168/219] build(void): making a neutral change to force build and delivery (#144) --- .../centreon-certified/capensis/canopsis2-events-apiv2.lua | 1 + .../centreon-certified/capensis/canopsis4-events-apiv2.lua | 1 + .../centreon-certified/datadog/datadog-events-apiv2.lua | 1 + .../centreon-certified/datadog/datadog-metrics-apiv2.lua | 1 + .../centreon-certified/elasticsearch/elastic-events-apiv2.lua | 3 ++- .../centreon-certified/elasticsearch/elastic-metrics-apiv1.lua | 1 + .../centreon-certified/elasticsearch/elastic-neb-apiv1.lua | 1 + .../centreon-certified/google/bigquery-events-apiv2.lua | 1 + .../centreon-certified/influxdb/influxdb-metrics-apiv1.lua | 1 + .../centreon-certified/influxdb/influxdb-neb-apiv1.lua | 1 + .../centreon-certified/kafka/kafka-events-apiv2.lua | 1 + .../centreon-certified/logstash/logstash-events-apiv2.lua | 3 ++- stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua | 1 + stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua | 1 + .../centreon-certified/omi/omi_connector-apiv1.lua | 1 + stream-connectors/centreon-certified/omi/omi_events-apiv2.lua | 3 ++- .../centreon-certified/opsgenie/opsgenie-apiv1.lua | 3 ++- .../centreon-certified/opsgenie/opsgenie-events-apiv2.lua | 1 + .../centreon-certified/pagerduty/pagerduty-apiv1.lua | 1 + .../centreon-certified/pagerduty/pagerduty-events-apiv2.lua | 1 + .../centreon-certified/prometheus/prometheus-gateway-apiv1.lua | 1 + .../centreon-certified/servicenow/servicenow-apiv1.lua | 1 + .../servicenow/servicenow-em-events-apiv2.lua | 1 + .../servicenow/servicenow-incident-events-apiv2.lua | 1 + .../centreon-certified/signl4/signl4-events-apiv2.lua | 3 ++- .../centreon-certified/splunk/splunk-events-apiv2.lua | 3 ++- .../centreon-certified/splunk/splunk-events-http-apiv1.lua | 1 + .../centreon-certified/splunk/splunk-events-luacurl-apiv1.lua | 1 + .../centreon-certified/splunk/splunk-metrics-apiv2.lua | 1 + .../centreon-certified/splunk/splunk-metrics-http-apiv1.lua | 1 + .../centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua | 1 + .../centreon-certified/splunk/splunk-states-http-apiv1.lua | 1 + .../centreon-certified/warp10/export-warp10-apiv1.lua | 1 + 33 files changed, 39 insertions(+), 6 deletions(-) diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index 9fd8612ca0a..5d24c041a3c 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -534,3 +534,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index 11146251346..c29fa9de9ee 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -548,3 +548,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index 264ab1a0b75..6a6bba15f63 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -358,3 +358,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index 19e002e12a8..287e9101caa 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -407,3 +407,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index 79bbc212aff..f4088871334 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -346,4 +346,5 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua index 46ee6edeec5..d98ac8e0748 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua @@ -190,3 +190,4 @@ function filter(category, element) end return false end + diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua index a1c453a4bf1..d7b91f0f932 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua @@ -343,3 +343,4 @@ end function filter(category, element) return category == 1 and (element == 14 or element == 24) end + diff --git a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua index fc6fab554b5..205afc4e746 100644 --- a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -430,3 +430,4 @@ function write(event) return true end + diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua index d001c76e9a3..4ce1fd861e1 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua @@ -171,3 +171,4 @@ function filter(category, element) end return false end + diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index c0bf7dbb379..84b2898ee41 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -282,3 +282,4 @@ end function filter(category, element) return category == 1 and (element == 14 or element == 24) end + diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 5509ea79945..4f80b72979b 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -308,3 +308,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index 46bfdd090ef..869f2464ddb 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -343,4 +343,5 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua index 91f22e49bc4..b0176269aa7 100644 --- a/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua +++ b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua @@ -330,3 +330,4 @@ ndo.data = { } return ndo + diff --git a/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua b/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua index 65d4cd8a50f..40a1849d2a1 100644 --- a/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua +++ b/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua @@ -720,3 +720,4 @@ function write(d) end return true end + diff --git a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua index c299f307fae..8c7d0c9de11 100644 --- a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua @@ -149,3 +149,4 @@ function write(d) end return true end + diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index 801b9ce18d4..71bb9afb7be 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -368,4 +368,5 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua index 3a360ee53dc..6e922cb0a99 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua @@ -1035,4 +1035,5 @@ function EventQueue:is_event_duplicated() end return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua index 43b8d0df4b0..b4f98820a41 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -485,3 +485,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua index 546cfcb2882..350764f8888 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -449,3 +449,4 @@ function write(e) return true end + diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 7d7cf4911a7..775e8b90534 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -480,3 +480,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua index c0a66e4f965..6f87cd14618 100644 --- a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -947,3 +947,4 @@ function write (event) return true end + diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua index 10cd3c44076..eba8755917d 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua @@ -896,3 +896,4 @@ function EventQueue:is_event_duplicated() return false end + diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua index 28b98029d9a..17566923121 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -505,3 +505,4 @@ function flush() return false end + diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua index f184df5f354..d3ea96e5164 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -506,3 +506,4 @@ function flush() return false end + diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index f2b312bcf69..216fc81aa7e 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -342,4 +342,5 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index 450f232da8e..f7b3043af56 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -339,4 +339,5 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua index a15509552f2..e3a14ceb4b0 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua @@ -318,3 +318,4 @@ function write(e) return true end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua index 8c56a9a7dd8..7e9a4c5f946 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua @@ -336,3 +336,4 @@ function write(e) return true end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 2af12d2664f..9c1c5186b9c 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -398,3 +398,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua index 551d86ba581..6bf55dd63b6 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua @@ -289,3 +289,4 @@ function write(e) return true end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua index 3ff1f91d7ce..02f7c2162e3 100755 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua @@ -310,3 +310,4 @@ function write(e) return true end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua index 2afb289c434..9d2732cc3c3 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua @@ -135,3 +135,4 @@ function filter(category, element) end return false end + diff --git a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua index 9103aa4745d..2990b28ab90 100644 --- a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua +++ b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua @@ -113,3 +113,4 @@ function write(d) end return false end + From 9c5a9f7640c86ad8a4bbad36589f02aca9990488 Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Fri, 21 Apr 2023 16:32:08 +0200 Subject: [PATCH 169/219] enh(pack): two connectos were missing in the builds (#145) --- .../centreon-certified/logstash/logstash-events-apiv2.lua | 1 - .../centreon-certified/warp10/export-warp10-apiv1.lua | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index 869f2464ddb..c7fc5ee21a5 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -189,7 +189,6 @@ function EventQueue:build_payload(payload, event) return payload end - function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") diff --git a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua index 2990b28ab90..ac2d9cea029 100644 --- a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua +++ b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua @@ -23,6 +23,7 @@ -- token (string): the Warp10 write token -- max_size (number): how many queries to store before sending them to the server. -- + local curl = require "cURL" local my_data = { From 7ac821d30977e9001de7e4374a4f2bad9e6b2eae Mon Sep 17 00:00:00 2001 From: tuntoja <58987095+tuntoja@users.noreply.github.com> Date: Fri, 23 Jun 2023 11:45:54 +0200 Subject: [PATCH 170/219] fix(packaging): add missing lcurl.so to package (#147) * fix(packaging): add missing lcurl.so * update path * update path * update path * update path * update path * update workflow and spec * add missing path * Update dependencies/lua-curl/packaging/rpm/lua-curl.spec Co-authored-by: Kevin Duret * add cp * add lualibdir to install * update install * update install spectemplate * update files section spectemplate * fix install section --------- Co-authored-by: Kevin Duret --- .../workflows/stream-connectors-dependencies.yml | 1 + .../lua-curl/packaging/rpm/lua-curl.spec | 14 ++++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index 606750b948c..6a3e894e743 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -89,6 +89,7 @@ jobs: mkdir -p ${{ matrix.lib }}/cURL cp -rp /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL ${{ matrix.lib }}/ cp -p /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL.lua ${{ matrix.lib }}/ + cp -p /usr/lib64/lua/`lua -e "print(string.sub(_VERSION, 5))"`/lcurl.so ${{ matrix.lib }}/ tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} working-directory: dependencies/${{ matrix.lib }} shell: bash diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec index 23e6862d184..a9872840d03 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -31,14 +31,20 @@ lua curl library %build %install -%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/cURL -%{__cp} -rp ./* $RPM_BUILD_ROOT%{luapkgdir}/ +%{__install} -Dp -m0755 ./lcurl.so $RPM_BUILD_ROOT%{lualibdir}/lcurl.so +%{__install} -Dp -m0644 ./cURL.lua $RPM_BUILD_ROOT%{lualibdir}/cURL.lua +%{__install} -d -m 0755 $RPM_BUILD_ROOT%{luapkgdir}/cURL +%{__install} -Dp -m0644 ./cURL/safe.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/safe.lua +%{__install} -Dp -m0644 ./cURL/utils.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/utils.lua +%{__install} -d -m 0755 $RPM_BUILD_ROOT%{luapkgdir}/cURL/impl +%{__install} -Dp -m0644 ./cURL/impl/cURL.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/impl/cURL.lua %clean %{__rm} -rf $RPM_BUILD_ROOT %files -%{luapkgdir}/cURL.lua -%{luapkgdir}/cURL +%defattr(-, root, root, 0755) +%{lualibdir}/* +%{luapkgdir}/* %changelog From fc3463dc8578b054ca412040c0d7f2485ac755f8 Mon Sep 17 00:00:00 2001 From: tuntoja <58987095+tuntoja@users.noreply.github.com> Date: Fri, 23 Jun 2023 14:26:27 +0200 Subject: [PATCH 171/219] chore(ci): bump release number for lua-curl (#148) --- .../dependencies/lua-curl/packaging/rpm/lua-curl.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec index a9872840d03..bd677b05dbb 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -5,7 +5,7 @@ Name: lua-curl Version: %{VERSION} -Release: 1%{?dist} +Release: 2%{?dist} Summary: lua curl Group: Applications/System From 1e0cac59206f39f9bc4507f52bf290fa77d7104e Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Fri, 28 Jul 2023 09:29:05 +0200 Subject: [PATCH 172/219] fix(packaging): fix lua-curl and curl compat on el9 (#151) Refs: MON-20735 --- .../stream-connectors-dependencies.yml | 55 ++++++++++++++----- .../lua-curl/packaging/rpm/lua-curl.spec | 2 +- 2 files changed, 42 insertions(+), 15 deletions(-) diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index 6a3e894e743..ee6c6a6719a 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -42,7 +42,8 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v3 + - name: Checkout sources + uses: actions/checkout@v3 - name: Install dependencies run: | @@ -56,6 +57,7 @@ jobs: shell: bash - if: ${{ matrix.lib == 'lua-cffi' }} + name: Prepare packaging of lua-cffi run: | luarocks install cffi-lua luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt @@ -68,6 +70,7 @@ jobs: shell: bash - if: ${{ matrix.lib == 'lua-tz' }} + name: Prepare packaging of lua-tz run: | luarocks install luatz luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt @@ -80,21 +83,45 @@ jobs: shell: bash - if: ${{ matrix.lib == 'lua-curl' }} + name: Checkout sources of lua-curl + uses: actions/checkout@v3 + with: + repository: Lua-cURL/Lua-cURLv3 + path: lua-curl-src + ref: v0.3.13 + + - if: ${{ matrix.lib == 'lua-curl' }} + name: Compile lua-curl and prepare packaging run: | - luarocks install lua-curl - luarocks show lua-curl - luarocks show lua-curl | grep "Lua-cURL\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt - cat version.txt + dnf install -y openssl-devel openssl libcurl-devel python3-pip perl-IPC-Cmd perl-Digest-SHA perl-Thread-Queue perl-IO-Socket-SSL + + cd lua-curl-src + + pip3 install conan + conan profile detect + conan install --requires=libcurl/8.1.2 --build=missing -g CMakeToolchain --deployer=full_deploy + conan install --requires=openssl/3.1.1 --build=missing -g CMakeToolchain --deployer=full_deploy + conan install --requires=zlib/1.2.13 --build=missing -g CMakeToolchain --deployer=full_deploy + + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.1.2/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/3.1.1/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -L full_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a#" Makefile + make + + cd .. + + mkdir -p dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ + cp -p lua-curl-src/lcurl.so dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ + cp -rp lua-curl-src/src/lua/cURL dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ + cp -p lua-curl-src/src/lua/cURL.lua dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ + + cd dependencies/${{ matrix.lib }} + + echo "0.3.13" >> version.txt - mkdir -p ${{ matrix.lib }}/cURL - cp -rp /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL ${{ matrix.lib }}/ - cp -p /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/cURL.lua ${{ matrix.lib }}/ - cp -p /usr/lib64/lua/`lua -e "print(string.sub(_VERSION, 5))"`/lcurl.so ${{ matrix.lib }}/ tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} - working-directory: dependencies/${{ matrix.lib }} shell: bash - - run: | + - name: Package ${{ matrix.lib }} + run: | rpmbuild -ba packaging/rpm/${{ matrix.lib }}.spec -D "VERSION `cat version.txt`" mv ~/rpmbuild/RPMS/**/*.rpm ../../ @@ -134,7 +161,7 @@ jobs: shell: bash - if: ${{ matrix.lib == 'lua-cffi' }} - name: Package + name: Prepare packaging of lua-cffi run: | luarocks install cffi-lua luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt @@ -151,7 +178,7 @@ jobs: shell: bash - if: ${{ matrix.lib == 'lua-tz' }} - name: Package + name: Prepare packaging of lua-tz run: | luarocks install luatz luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt @@ -166,7 +193,7 @@ jobs: working-directory: dependencies/${{ matrix.lib }} shell: bash - - name: Package + - name: Package ${{ matrix.lib }} run: | PACKAGE_NAME="${{ matrix.lib }}" PACKAGE_VERSION=`cat version.txt` diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec index bd677b05dbb..a2c9cf9f1e5 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -5,7 +5,7 @@ Name: lua-curl Version: %{VERSION} -Release: 2%{?dist} +Release: 3%{?dist} Summary: lua curl Group: Applications/System From db2ba35e73cab11bbc765b5dee409f22a3e77f5e Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 31 Jul 2023 09:38:16 +0200 Subject: [PATCH 173/219] fix(packaging): set path to ca certificate (#152) Refs: MON-20735 --- .../stream-connectors-dependencies.yml | 32 ++++++++++++++++--- .../lua-curl/packaging/rpm/lua-curl.spec | 2 +- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index ee6c6a6719a..d4395c7cf10 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -93,17 +93,39 @@ jobs: - if: ${{ matrix.lib == 'lua-curl' }} name: Compile lua-curl and prepare packaging run: | - dnf install -y openssl-devel openssl libcurl-devel python3-pip perl-IPC-Cmd perl-Digest-SHA perl-Thread-Queue perl-IO-Socket-SSL + dnf install -y openssl-devel openssl libcurl-devel python3-pip cpanminus + + cpanm \ + IPC::Cmd \ + Digest::SHA \ + Thread::Queue \ + IO::Socket::SSL \ + File::Copy \ + File::Compare cd lua-curl-src pip3 install conan conan profile detect - conan install --requires=libcurl/8.1.2 --build=missing -g CMakeToolchain --deployer=full_deploy - conan install --requires=openssl/3.1.1 --build=missing -g CMakeToolchain --deployer=full_deploy - conan install --requires=zlib/1.2.13 --build=missing -g CMakeToolchain --deployer=full_deploy - sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.1.2/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/3.1.1/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -L full_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a#" Makefile + cat <<'EOF' >> conanfile.txt + [requires] + libcurl/8.1.2 + openssl/3.1.1 + zlib/1.2.13 + + [generators] + CMakeToolchain + + [options] + libcurl/*:with_ca_bundle=/etc/ssl/certs/ca-bundle.crt + libcurl/*:with_ca_fallback=False + libcurl/*:with_ca_path=/etc/ssl/certs/ + EOF + + conan install . --build=missing --deployer=full_deploy + + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.1.2/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/3.1.1/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile make cd .. diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec index a2c9cf9f1e5..28910a4219b 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -5,7 +5,7 @@ Name: lua-curl Version: %{VERSION} -Release: 3%{?dist} +Release: 4%{?dist} Summary: lua curl Group: Applications/System From 8fb7a6cbd9377854d17bff5e7cfff200a26beba5 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 31 Jul 2023 15:00:26 +0200 Subject: [PATCH 174/219] fix(packaging): force openssl 1.1.1 in lua-curl binary (#153) Refs: MON-20735 --- .../.github/workflows/stream-connectors-dependencies.yml | 6 +++--- .../dependencies/lua-curl/packaging/rpm/lua-curl.spec | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index d4395c7cf10..31e1521efe6 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -110,8 +110,8 @@ jobs: cat <<'EOF' >> conanfile.txt [requires] - libcurl/8.1.2 - openssl/3.1.1 + libcurl/8.0.1 + openssl/1.1.1t zlib/1.2.13 [generators] @@ -125,7 +125,7 @@ jobs: conan install . --build=missing --deployer=full_deploy - sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.1.2/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/3.1.1/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile make cd .. diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec index 28910a4219b..4243291cf57 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec @@ -5,7 +5,7 @@ Name: lua-curl Version: %{VERSION} -Release: 4%{?dist} +Release: 5%{?dist} Summary: lua curl Group: Applications/System From 94a728761d7ec00ab3f71d9a23f150bd405bfb0b Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 31 Jul 2023 16:10:36 +0200 Subject: [PATCH 175/219] Mon 16176 add elasticsearch metricv2 (#150) add elasticsearch metric v2 stream connector add new parameter to filter on metric name --- .../elasticsearch/elastic-metrics-apiv2.lua | 693 ++++++++++++++++++ .../sc_logger.lua | 27 +- .../sc_metrics.lua | 13 +- .../sc_params.lua | 1 + stream-connectors/modules/docs/sc_metrics.md | 48 +- stream-connectors/modules/docs/sc_param.md | 1 + ...eon-stream-connectors-lib-3.7.0-1.rockspec | 39 + 7 files changed, 783 insertions(+), 39 deletions(-) create mode 100644 stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua create mode 100644 stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua new file mode 100644 index 00000000000..fd25b126d9e --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua @@ -0,0 +1,693 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elastic Connector Metrics +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local mime = require("mime") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "elastic_username", + "elastic_password", + "http_server_url" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/elastic-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.elastic_username = params.elastic_username + self.sc_params.params.elastic_password = params.elastic_password + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_%.]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" + + -- elastic search index parameters + self.sc_params.params.index_template_api_endpoint = params.index_template_api_endpoint or "/_index_template" + self.sc_params.params.index_name = params.index_name or "centreon-metrics" + self.sc_params.params.index_pattern = params.index_pattern or self.sc_params.params.index_name .. "*" + self.sc_params.params.index_priority = params.index_priority or 200 + self.sc_params.params.create_datastream_index_template = params.create_datastream_index_template or 1 + self.sc_params.params.update_datastream_index_template = params.update_datastream_index_template or 0 + + -- index dimensions parameters + self.sc_params.params.add_hostgroups_dimension = params.add_hostgroups_dimension or 1 + self.sc_params.params.add_poller_dimension = params.add_poller_dimension or 0 + self.sc_params.params.add_servicegroups_dimension = params.add_servicegroups_dimension or 0 + -- can't get geo coords from cache nor event + -- self.sc_params.params.add_geocoords_dimension = params.add_geocoords_dimension or 0 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + local queue_metadata = { + endpoint = "/" .. self.sc_params.params.index_name .. "/_bulk", + method = "PUT" + } + + self.sc_flush.queues.global_queues_metadata = queue_metadata + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + self:build_index_template(self.sc_params.params) + self:handle_index(self.sc_params.params) + return self +end + +function EventQueue:build_index_template(params) + self.index_template_meta = { + description = "Timeseries index template for Centreon metrics", + created_by_centreon = true + } + + self.index_routing_path = { + "host.name", + "service.description", + "metric.name", + "metric.instance", + -- "metric.subinstances" + } + + self.elastic_index_template = { + index_patterns = {params.index_pattern}, + priority = params.index_priority, + _meta = self.index_template_meta, + template = { + settings = { + ["index.mode"] = "time_series" + }, + mappings = { + properties = { + ["host.name"] = { + type = "keyword", + time_series_dimension = true + }, + ["service.description"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric.name"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric.unit"] = { + type = "keyword", + time_series_dimension = false + }, + ["metric.instance"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric.subinstances"] = { + type = "keyword", + time_series_dimension = false + }, + ["metric.value"] = { + type = "double", + time_series_metric = gauge + }, + ["@timestamp"] = { + type = "date", + format = "epoch_second" + } + } + } + } + } + + -- add hostgroup property in the template + if params.add_hostgroups_dimension == 1 then + self.elastic_index_template.template.mappings.properties["host.groups"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "host.groups") + end + + -- add servicegroup property in the template + if params.add_servicegroups_dimension == 1 then + self.elastic_index_template.template.mappings.properties["service.groups"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "service.groups") + end + + -- add poller property in the template + if params.add_poller_dimension == 1 then + self.elastic_index_template.template.mappings.properties["poller"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "poller") + end + + + self.elastic_index_template.template.settings["index.routing_path"] = self.index_routing_path + -- add geocoords property in the template + -- can't get geo coords from cache nor event + --[[ + if params.add_geocoords_dimension == 1 then + self.elastic_index_template.mappings.properties["host.geocoords"] = { + type = "geo_point" + } + end + ]]-- + self.sc_logger:notice("[EventQueue:build_index_template]: The following index template is going to be created: " .. self.sc_common:dumper(self.elastic_index_template)) +end + +function EventQueue:handle_index(params) + local index_state = self:check_index_template(params) + + if (not index_state.is_created or not index_state.is_up_to_date) then + self.sc_logger:error("[EventQueue:handle_index]: It will not be possible to send data to elasticsearch because of an invalid index template structure") + self.fail = true + end +end + +function EventQueue:check_index_template(params) + local metadata = { + method = "GET", + endpoint = params.index_template_api_endpoint .. "/" .. params.index_name + } + local payload = nil + local index_state = { + is_created = false, + is_up_to_date = false, + } + + local return_code = self:send_data(payload, metadata) + if return_code then + self.sc_logger:notice("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has been found") + index_state.is_created = true + index_state.is_up_to_date = self:validate_index_template(params) + return index_state + end + + if (not return_code and params.create_datastream_index_template == 1) then + self.sc_logger:notice("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has not been found" + .. ". Trying to create it because create_datastream_index_template parameter is set to 1...") + + if self:create_index_template(params) then + index_state.is_created = true + -- it has just been created so obviously, it is up to date + index_state.is_up_to_date = true + return index_state + end + end + + self.sc_logger:error("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has not been found" + .. " and could not be created.") + + return index_state +end + +function EventQueue:create_index_template(params) + local metadata = { + endpoint = params.index_template_api_endpoint .. "/" .. params.index_name, + method = "PUT" + } + + if not self:send_data(broker.json_encode(self.elastic_index_template), metadata) then + self.sc_logger:error("[EventQueue:create_index_template]: Index template " .. tostring(params.index_name) .. " could not be created." + .. ". Error is: " .. tostring(self.elastic_result)) + return false + end + + self.sc_logger:notice("[EventQueue:create_index_template]: Index template " .. tostring(params.index_name) .. " successfully created") + return true +end + +function EventQueue:validate_index_template(params) + local index_template_structure, error = broker.json_decode(self.elastic_result) + + if error then + self.sc_logger:error("[EventQueue:validate_index_template]: Could not decode json: " .. tostring(self.elastic_result) .. ". Error message is: " .. tostring(error)) + return true + end + + local required_index_mapping_properties = { + "host.name", + "service.description", + "metric.value", + "metric.unit", + "metric.value", + "metric.instance", + "metric.subinstances" + } + + if params.add_hostgroups_dimension == 1 then + table.insert(required_index_mapping_properties, "host.groups") + end + + if params.add_servicegroups_dimension == 1 then + table.insert(required_index_mapping_properties, "service.groups") + end + + -- can't get geo coords from cache nor event + --[[ + if params.add_geocoords_dimension == 1 then + table.insert(required_index_mapping_properties, "host.geocoords") + end + ]]-- + + if params.add_poller_dimension == 1 then + table.insert(required_index_mapping_properties, "poller") + end + + local return_code = true + local update_template = false + + -- this double for_loop is only doing two things: logging all the missing properties in the index template for the sake of verbosity + -- and change above flags + for _, index_information in ipairs(index_template_structure.index_templates) do + if index_information.name == params.index_name then + for _, index_mapping_property_name in pairs(required_index_mapping_properties) do + -- check if all the mappings are created in the index template + if not index_information.index_template.template.mappings.properties[index_mapping_property_name] then + if (params.update_datastream_index_template == 1 and index_information.index_template["_meta"].created_by_centreon) then + self.sc_logger:notice("[EventQueue:validate_index_template]: Elastic index template is not valid. Missing mapping property: " + .. tostring(index_mapping_property_name) .. ". Template is going to be automatically updated") + update_template = true + else + -- we do not return at the first missing property because we want to display all the missing one in one go instead. + self.sc_logger:error("[EventQueue:validate_index_template]: Elastic index template is not valid. Missing mapping property: " + .. tostring(index_mapping_property_name)) + return_code = false + end + end + end + end + end + + if update_template then + self.sc_logger:notice("[EventQueue:validate_index_template]: Going to update index template with the following structure: " .. self.sc_common:dumper(self.elastic_index_template)) + return_code = self:create_index_template(params) + end + + return return_code +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[EventQueue:format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_event.event.formated_event = {} + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_event.event.formated_event = {} + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric host") + self:add_generic_information(metric) + self:add_generic_optional_information() + self:add() +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric service") + + self.sc_event.event.formated_event["service.description"] = tostring(self.sc_event.event.cache.service.description) + self:add_generic_information(metric) + self:add_generic_optional_information() + self:add_service_optional_information() + self:add() +end + +function EventQueue:add_generic_information(metric) + local event = self.sc_event.event + self.sc_event.event.formated_event = { + ["@timestamp"] = event.last_check, + ["host.name"] = tostring(event.cache.host.name), + ["metric.name"] = tostring(metric.metric_name), + ["metric.value"] = metric.value, + ["metric.instance"] = metric.instance, + ["metric.subinstances"] = metric.subinstances, + ["metric.unit"] = metric.unit + } +end + +function EventQueue:add_generic_optional_information() + local params = self.sc_event.params + local event = self.sc_event.event + + -- add hostgroups + if params.add_hostgroups_dimension == 1 then + local hostgroups = {} + + for _, hg_info in ipairs(event.cache.hostgroups) do + table.insert(hostgroups, hg_info.group_name) + end + + self.sc_event.event.formated_event["host.groups"] = hostgroups + end + + -- add poller + if params.add_poller_dimension == 1 then + self.sc_event.event.formated_event.poller = event.cache.poller + end +end + +function EventQueue:add_service_optional_information() + -- add servicegroups + if self.sc_params.params.add_servicegroups_dimension == 1 then + local servicegroups = {} + + for _, sg_info in ipairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg_info.group_name) + end + + self.sc_event.event.formated_event["service.groups"] = servicegroups + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = '{"index":{}}\n' .. broker.json_encode(event) .. "\n" + else + payload = payload .. '{"index":{}}\n' .. broker.json_encode(event) .. "\n" + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local params = self.sc_params.params + local url = params.http_server_url .. queue_metadata.endpoint + queue_metadata.headers = { + "Authorization: Basic " .. mime.b64(params.elastic_username .. ":" .. params.elastic_password), + "Content-type: application/json" + } + + + if payload then + -- write payload in the logfile for test purpose + if params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + return true + end + end + + self.sc_logger:info("[EventQueue:send_data]: Elastic address is: " .. tostring(url)) + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload, basic_auth) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, params.allow_insecure_connection) + :setopt(curl.OPT_SSL_VERIFYHOST, params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (params.proxy_address ~= '') then + if (params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, params.proxy_address .. ':' .. params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (params.proxy_username ~= '') then + if (params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, params.proxy_username .. ':' .. params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata.method == "PUT" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + end + + -- adding the HTTP POST data + if payload then + http_request:setopt_postfields(payload) + end + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- the gsub function is here to fix a bug with the broker method json_decode that crashes when a value is null. Internal issue: MON-20481 + self.elastic_result = string.gsub(http_response_body, "null", "false") + local decoded_elastic_result, error_json = broker.json_decode(self.elastic_result) + + if error_json then + self.sc_logger:error("[EventQueue:send_data]: Couldn't decode json from elasticsearch. Error is: " .. tostring(error_json) + .. ". Received json is: " .. tostring(http_response_body)) + return false + end + + if (http_response_code == 200 and not decoded_elastic_result.errors) then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + return true + end + + + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + return false +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua index 523e6b7f074..a41f7320eec 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -93,16 +93,18 @@ end -- @param metadata (table) a table that contains headers information and http method for curl -- @param params (table) the stream connector params table -- @param data (string) [opt] the data that must be send by curl -function ScLogger:log_curl_command(url, metadata, params, data) +-- @param basic_auth (table) [opt] a table that contains the username and the password if using basic auth ({"username" = username, "password" = password}) +function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) if params.log_curl_commands == 1 then self:debug("[sc_logger:log_curl_command]: starting computing curl command") - local curl_string = "curl " + local curl_string = "curl" -- handle proxy self:debug("[sc_looger:log_curl_command]: proxy information: protocol: " .. params.proxy_protocol .. ", address: " .. params.proxy_address .. ", port: " .. params.proxy_port .. ", user: " .. params.proxy_username .. ", password: " .. tostring(params.proxy_password)) local proxy_url + if params.proxy_address ~= "" then if params.proxy_username ~= "" then proxy_url = params.proxy_protocol .. "://" .. params.proxy_username .. ":" .. params.proxy_password @@ -111,35 +113,40 @@ function ScLogger:log_curl_command(url, metadata, params, data) proxy_url = params.proxy_protocol .. "://" .. params.proxy_address .. ":" .. params.proxy_port end - curl_string = curl_string .. "--proxy '" .. proxy_url .. "' " + curl_string = curl_string .. " --proxy '" .. proxy_url .. "'" end -- handle certificate verification if params.allow_insecure_connection == 1 then - curl_string = curl_string .. "-k " + curl_string = curl_string .. " -k" end -- handle http method if metadata.method then - curl_string = curl_string .. "-X " .. metadata.method .. " " + curl_string = curl_string .. " -X " .. metadata.method elseif data then - curl_string = curl_string .. "-X POST " + curl_string = curl_string .. " -X POST" else - curl_string = curl_string .. "-X GET " + curl_string = curl_string .. " -X GET" end -- handle headers if metadata.headers then for _, header in ipairs(metadata.headers) do - curl_string = curl_string .. "-H '" .. tostring(header) .. "' " + curl_string = curl_string .. " -H '" .. tostring(header) .. "'" end end - curl_string = curl_string .. "'" .. tostring(url) .. "' " + curl_string = curl_string .. " '" .. tostring(url) .. "'" -- handle curl data if data and data ~= "" then - curl_string = curl_string .. "-d '" .. data .. "'" + curl_string = curl_string .. " -d '" .. data .. "'" + end + + -- handle http basic auth + if basic_auth then + curl_string = curl_string .. " -u '" .. basic_auth.username .. ":" .. basic_auth.password .. "'" end self:notice("[sc_logger:log_curl_command]: " .. curl_string) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua index 453260f8c26..3fc65356b37 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -264,16 +264,19 @@ end -- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key -- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) ---- build_metric: use the stream connector format method to parse every metric in the event +--- build_metric: use the stream connector format method to parse every metric in the event and remove unwanted metrics based on their name -- @param format_metric (function) the format method from the stream connector function ScMetrics:build_metric(format_metric) local metrics_info = self.metrics_info - self.sc_logger:debug("perfdata: " .. self.sc_common:dumper(metrics_info)) for metric, metric_data in pairs(self.metrics_info) do - metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) - -- use stream connector method to format the metric event - format_metric(metrics_info[metric]) + if string.match(metric_data.metric_name, self.params.accepted_metrics) then + metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) + -- use stream connector method to format the metric event + format_metric(metrics_info[metric]) + else + self.sc_logger:debug("[ScMetric:build_metric]: metric name is filtered out: " .. tostring(metric_data.metric_name) .. ". Metric name filter is: " .. tostring(self.params.accepted_metrics)) + end end end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 6732ddd18c5..1791520ae90 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -61,6 +61,7 @@ function sc_params.new(common, logger) accepted_bvs = "", accepted_pollers = "", accepted_authors = "", + accepted_metrics = ".*", service_severity_threshold = nil, service_severity_operator = ">=", host_severity_threshold = nil, diff --git a/stream-connectors/modules/docs/sc_metrics.md b/stream-connectors/modules/docs/sc_metrics.md index ef183c49ba3..cac7e0ad81c 100644 --- a/stream-connectors/modules/docs/sc_metrics.md +++ b/stream-connectors/modules/docs/sc_metrics.md @@ -1,32 +1,32 @@ # Documentation of the sc_flush module -- [Documentation of the sc_flush module](#documentation-of-the-sc_flush-module) +- [Documentation of the sc\_flush module](#documentation-of-the-sc_flush-module) - [Introduction](#introduction) - [Module initialization](#module-initialization) - [module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [is_valid_bbdo_element method](#is_valid_bbdo_element-method) - - [is_valid_bbdo_element: returns](#is_valid_bbdo_element-returns) - - [is_valid_bbdo_element: example](#is_valid_bbdo_element-example) - - [is_valid_metric_event method](#is_valid_metric_event-method) - - [is_valid_metric_event: returns](#is_valid_metric_event-returns) - - [is_valid_metric_event: example](#is_valid_metric_event-example) - - [is_valid_host_metric_event method](#is_valid_host_metric_event-method) - - [is_valid_host_metric_event: returns](#is_valid_host_metric_event-returns) - - [is_valid_host_metric_event: example](#is_valid_host_metric_event-example) - - [is_valid_service_metric_event method](#is_valid_service_metric_event-method) - - [is_valid_service_metric_event: returns](#is_valid_service_metric_event-returns) - - [is_valid_service_metric_event: example](#is_valid_service_metric_event-example) - - [is_valid_kpi_metric_event method](#is_valid_kpi_metric_event-method) - - [is_valid_kpi_metric_event: returns](#is_valid_kpi_metric_event-returns) - - [is_valid_kpi_metric_event: example](#is_valid_kpi_metric_event-example) - - [is_valid_perfdata method](#is_valid_perfdata-method) - - [is_valid_perfdata parameters](#is_valid_perfdata-parameters) - - [is_valid_perfdata: returns](#is_valid_perfdata-returns) - - [is_valid_perfdata: example](#is_valid_perfdata-example) - - [build_metric method](#build_metric-method) - - [build_metric parameters](#build_metric-parameters) - - [build_metric: example](#build_metric-example) + - [is\_valid\_bbdo\_element method](#is_valid_bbdo_element-method) + - [is\_valid\_bbdo\_element: returns](#is_valid_bbdo_element-returns) + - [is\_valid\_bbdo\_element: example](#is_valid_bbdo_element-example) + - [is\_valid\_metric\_event method](#is_valid_metric_event-method) + - [is\_valid\_metric\_event: returns](#is_valid_metric_event-returns) + - [is\_valid\_metric\_event: example](#is_valid_metric_event-example) + - [is\_valid\_host\_metric\_event method](#is_valid_host_metric_event-method) + - [is\_valid\_host\_metric\_event: returns](#is_valid_host_metric_event-returns) + - [is\_valid\_host\_metric\_event: example](#is_valid_host_metric_event-example) + - [is\_valid\_service\_metric\_event method](#is_valid_service_metric_event-method) + - [is\_valid\_service\_metric\_event: returns](#is_valid_service_metric_event-returns) + - [is\_valid\_service\_metric\_event: example](#is_valid_service_metric_event-example) + - [is\_valid\_kpi\_metric\_event method](#is_valid_kpi_metric_event-method) + - [is\_valid\_kpi\_metric\_event: returns](#is_valid_kpi_metric_event-returns) + - [is\_valid\_kpi\_metric\_event: example](#is_valid_kpi_metric_event-example) + - [is\_valid\_perfdata method](#is_valid_perfdata-method) + - [is\_valid\_perfdata parameters](#is_valid_perfdata-parameters) + - [is\_valid\_perfdata: returns](#is_valid_perfdata-returns) + - [is\_valid\_perfdata: example](#is_valid_perfdata-example) + - [build\_metric method](#build_metric-method) + - [build\_metric parameters](#build_metric-parameters) + - [build\_metric: example](#build_metric-example) ## Introduction @@ -242,7 +242,7 @@ local result = test_metrics:is_valid_perfdata(perfdata) ## build_metric method -The **build_metric** method uses the provided stream connector format method to parse every metric in the event +The **build_metric** method uses the provided stream connector format method to parse every metric in the event. It also filter out metrics based on their name and the parameter **[accepted_metrics](sc_param.md#default-parameters)** ### build_metric parameters diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 878bdeedfd8..5d5d8a78e57 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -46,6 +46,7 @@ The sc_param module provides methods to help you handle parameters for your stre | accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | | accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | | accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| accepted_metrics | string | `.*` | filter metrics based on their name. Use lua pattern to filter | metrics stream connectors | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | | skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | diff --git a/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec b/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec new file mode 100644 index 00000000000..81d9db31cb6 --- /dev/null +++ b/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.7.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.7.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} From 7986ed57937b5dfe8c6eb758d58d9b704afd6243 Mon Sep 17 00:00:00 2001 From: David Boucher Date: Wed, 2 Aug 2023 11:16:26 +0200 Subject: [PATCH 176/219] chore(doc): typo in modules/docs/sc_param.md (#154) --- stream-connectors/modules/docs/sc_param.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 5d5d8a78e57..82d2235a6ee 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -83,7 +83,7 @@ The sc_param module provides methods to help you handle parameters for your stre | metric_name_regex | string | "" | the regex that will be used to transform the metric name to a compatible name for the software that will receive the data | service_status(neb), host_status(neb) | | | metric_replacement_character | string | "_" | the character that will be used to replace invalid characters in the metric name | service_status(neb), host_status(neb) | | | logfile | string | **check the stream connector documentation** | the logfile that will be used for the stream connector | any | | -| log_level | number | 1 | the verbosity level for the logs. 1 = error + notice, 2 = error + warning + notice, 3 = error + warning + notice + debug (you should avoir using level 3) | any | | +| log_level | number | 1 | the verbosity level for the logs. 1 = error + notice, 2 = error + warning + notice, 3 = error + warning + notice + debug (you should avoid using level 3) | any | | | log_curl_commands | number | 0 | log ready to use curl commands when enabled (0 = disabled, 1 = enabled) | any | | ## Module initialization From d5d945b00f97694a9da3489771176ad7ac1e5dc8 Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut Date: Fri, 4 Aug 2023 14:51:23 +0200 Subject: [PATCH 177/219] Add function to handle with deprecated parameters --- .../sc_params.lua | 31 ++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 1791520ae90..a3153796f05 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -738,6 +738,22 @@ function sc_params.new(common, logger) return self end +--- deprecated_params: check if param_name provides from the web configuration is deprecated or not +-- @param param_name (string) the name of a parameter from the web interface +-- @return final_param_name (string) the right name of the parameter to avoid deprecated ones. +function deprecated_params(param_name) + local final_param_name + + -- max_buffer_age param had been replace by max_all_queues_age + if param_name == "max_buffer_age" then + final_param_name = "max_all_queues_age" + else + final_param_name = param_name + end + + return final_param_name +end + --- param_override: change default param values with the one provides from the web configuration -- @param user_params (table) the table of all parameters from the web interface function ScParams:param_override(user_params) @@ -748,11 +764,18 @@ function ScParams:param_override(user_params) for param_name, param_value in pairs(user_params) do if self.params[param_name] or string.find(param_name, "^_sc") ~= nil then - self.params[param_name] = param_value - self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) - else + + -- Check if the param is deprecated + local param_name_verified = deprecated_params(param_name) + if param_name_verified ~= param_name then + self.logger:notice("[sc_params:param_override]: following parameter: " .. tostring(param_name) .. " is deprecated and had been replace by : " .. tostring(param_name_verified)) + end + + self.params[param_name_verified] = param_value + self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) + else self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name) .. " is not handled by this stream connector") - end + end end end From 1e73c4d528c0b51c709dd6e9dd3270e96f467a48 Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut Date: Fri, 4 Aug 2023 14:53:16 +0200 Subject: [PATCH 178/219] Work in progress --- .../modules/centreon-stream-connectors-lib/sc_params.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index a3153796f05..9d629920ca0 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -775,7 +775,7 @@ function ScParams:param_override(user_params) self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) else self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name) .. " is not handled by this stream connector") - end + end end end From 6da89f250f547a97cda415194bb566b856fd35cb Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut Date: Fri, 4 Aug 2023 15:04:38 +0200 Subject: [PATCH 179/219] Work in progress --- .../centreon-stream-connectors-lib/sc_params.lua | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 9d629920ca0..c59d5895381 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -746,12 +746,12 @@ function deprecated_params(param_name) -- max_buffer_age param had been replace by max_all_queues_age if param_name == "max_buffer_age" then - final_param_name = "max_all_queues_age" + return "max_all_queues_age" else - final_param_name = param_name + return param_name end - return final_param_name + end --- param_override: change default param values with the one provides from the web configuration @@ -768,14 +768,14 @@ function ScParams:param_override(user_params) -- Check if the param is deprecated local param_name_verified = deprecated_params(param_name) if param_name_verified ~= param_name then - self.logger:notice("[sc_params:param_override]: following parameter: " .. tostring(param_name) .. " is deprecated and had been replace by : " .. tostring(param_name_verified)) + self.logger:notice("[sc_params:param_override]: following parameter: " .. tostring(param_name) .. " is deprecated and had been replace by: " .. tostring(param_name_verified)) end self.params[param_name_verified] = param_value - self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name) .. " with value: " .. tostring(param_value)) + self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name_verified) .. " with value: " .. tostring(param_value)) else - self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name) .. " is not handled by this stream connector") - end + self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name_verified) .. " is not handled by this stream connector") + end end end From 1dc4b2bb5e7db12b1625d979d5f0ef0ae9d0eb24 Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut Date: Fri, 4 Aug 2023 15:18:01 +0200 Subject: [PATCH 180/219] Work in progress --- .../modules/centreon-stream-connectors-lib/sc_params.lua | 1 - 1 file changed, 1 deletion(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index c59d5895381..49a34bf3722 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -751,7 +751,6 @@ function deprecated_params(param_name) return param_name end - end --- param_override: change default param values with the one provides from the web configuration From 09fc3f57fafc765174b5b65245bafc3d77e19cb1 Mon Sep 17 00:00:00 2001 From: tcharles Date: Sun, 10 Sep 2023 20:38:54 +0200 Subject: [PATCH 181/219] add influxdb2 metrics (#155) --- .../influxdb/influxdb2-metrics-apiv2.lua | 413 ++++++++++++++++++ 1 file changed, 413 insertions(+) create mode 100644 stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua diff --git a/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua new file mode 100644 index 00000000000..2e2eb6aae97 --- /dev/null +++ b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua @@ -0,0 +1,413 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker influxdb Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "bucket_id", + "bucket_api_key", + "org_name", + "http_server_url" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/infuxdb2-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.bucket_api_key = params.bucket_api_key + self.sc_params.params.bucket_id = params.bucket_id + self.sc_params.params.org_name = params.org_name + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.influxdb2_api_endpoint = params.influxdb2_api_endpoint or "/api/v2/write" + self.sc_params.params.influxdb2_precision = params.influxdb2_precision or "s" -- can be ms, s, us, ns [default] + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + -- according to https://docs.influxdata.com/influxdb/cloud/write-data/best-practices/optimize-writes/#batch-writes best practice is 5000 lines + self.sc_params.params.max_buffer_size = params.max_buffer_size or 5000 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + -- https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#special-characters + self.sc_params.params.metric_name_regex = params.metric_name_regex or "([, =])" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "\\%1" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +--- escape_special_characters: escape influxdb2 characters according to https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#special-characters +-- @param string (string) the string that probably contains special characters +-- @return (string) the string with escaped special characters +function EventQueue:escape_special_characters(string) + local params = self.sc_params.params + return string.gsub(tostring(string), params.metric_name_regex, params.metric_replacement_character) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") + self.sc_event.event.formated_event = metric.metric_name .. ",type=host," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + local params = self.sc_params.params + self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") + self.sc_event.event.formated_event = metric.metric_name .. ",type=service,service.name=" + .. self:escape_special_characters(self.sc_event.event.cache.service.description) + .. "," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_tags method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:build_generic_tags(metric) + local event = self.sc_event.event + local tags = 'host.name=' .. event.cache.host.name .. ',poller=' .. self:escape_special_characters(event.cache.poller) + + -- add metric instance in tags + if metric.instance ~= "" then + tags = tags .. ',metric.instance=' .. self:escape_special_characters(metric.instance) + end + + if metric.uom ~= "" then + tags = tags .. ',metric.unit=' .. metric.uom + end + + -- add metric subinstances in tags + if metric.subinstance[1] then + for subinstance_name, subinstance_value in ipairs(metric.subinstance) do + tags = tags .. ',' .. self.sc_common:trim(subinstance_name, "_") .. '=' .. self:escape_special_characters(subinstance_value) + end + end + + return tags +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = event + else + payload = payload .. "\n" .. event + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local params = self.sc_params.params + + local url = params.http_server_url .. tostring(params.influxdb2_api_endpoint) + .. "?bucket=" .. tostring(params.bucket_id) .. "&org=" .. tostring(params.org_name) + .. "&precision=" .. tostring(params.influxdb2_precision) + + queue_metadata.headers = { + "content-type: text/plain; charset=utf-8", + "accept: application/json", + "Authorization: Token " .. tostring(params.bucket_api_key) + } + + self.sc_logger:log_curl_command(url, queue_metadata, params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Influxdb address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.influxdata.com/influxdb/cloud/api/#operation/PostWrite other than 204 is not good + if http_response_code == 204 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file From e3e45ec6957775a973b01d2b5b5c0e53b07bb4a8 Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut Date: Wed, 20 Sep 2023 14:51:47 +0200 Subject: [PATCH 182/219] Set deprecated_params as local function and convert list of deprecated params in table to avoid if-else too heavy occurence --- .../sc_params.lua | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 49a34bf3722..e65c2b55855 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -732,7 +732,6 @@ function sc_params.new(common, logger) -- acknowledgement status mapping self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].host_status = self.params.status_mapping[categories.neb.id][elements.host_status.id] self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].service_status = self.params.status_mapping[categories.neb.id][elements.service_status.id] - setmetatable(self, { __index = ScParams }) return self @@ -740,16 +739,22 @@ end --- deprecated_params: check if param_name provides from the web configuration is deprecated or not -- @param param_name (string) the name of a parameter from the web interface --- @return final_param_name (string) the right name of the parameter to avoid deprecated ones. -function deprecated_params(param_name) +-- @return if a match had been found with deprecated parameter : new_param_name (string) the right name of the parameter to avoid deprecated ones. Else, param_name is return. +local function deprecated_params(param_name) local final_param_name - -- max_buffer_age param had been replace by max_all_queues_age - if param_name == "max_buffer_age" then - return "max_all_queues_age" - else - return param_name + -- initiate deprecated parameters table + local deprecated_params = { + -- max_buffer_age param had been replace by max_all_queues_age + ["max_buffer_age"] = "max_all_queues_age" + } + + for deprecated_param_name, new_param_name in pairs(deprecated_params) do + if param_name == deprecated_param_name then + return new_param_name + end end + return param_name end From edc8b803a3c411fa83bea4a9cd93401e3498278f Mon Sep 17 00:00:00 2001 From: Lucie Dubrunfaut <123162035+lucie-dubrunfaut@users.noreply.github.com> Date: Fri, 22 Sep 2023 09:51:59 +0200 Subject: [PATCH 183/219] Update modules/centreon-stream-connectors-lib/sc_params.lua Co-authored-by: tcharles --- .../modules/centreon-stream-connectors-lib/sc_params.lua | 2 -- 1 file changed, 2 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index e65c2b55855..274c22769c0 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -741,8 +741,6 @@ end -- @param param_name (string) the name of a parameter from the web interface -- @return if a match had been found with deprecated parameter : new_param_name (string) the right name of the parameter to avoid deprecated ones. Else, param_name is return. local function deprecated_params(param_name) - local final_param_name - -- initiate deprecated parameters table local deprecated_params = { -- max_buffer_age param had been replace by max_all_queues_age From cccf0b390609aabf71ce63251e0d023bdd8c0d4f Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 9 Oct 2023 15:18:05 +0200 Subject: [PATCH 184/219] enh(ci): use new structure to deliver on artifactory (#159) --- .../.github/actions/deb-delivery/action.yml | 7 +++++-- .../.github/actions/rpm-delivery/action.yml | 4 ++-- .../.github/workflows/docker-packaging.yml | 2 +- .../.github/workflows/get-environment.yml | 2 +- .../workflows/stream-connectors-dependencies.yml | 13 +++++++------ .../.github/workflows/stream-connectors-lib.yml | 9 +++++---- .../.github/workflows/stream-connectors.yml | 11 ++++++----- 7 files changed, 27 insertions(+), 21 deletions(-) diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml index 6b3ef265a1e..602e2a906b4 100644 --- a/stream-connectors/.github/actions/deb-delivery/action.yml +++ b/stream-connectors/.github/actions/deb-delivery/action.yml @@ -1,6 +1,9 @@ name: "deb-delivery" description: "Deliver deb packages" inputs: + module_name: + description: "The package module name" + required: true distrib: description: "The distribution used for packaging" required: true @@ -18,7 +21,7 @@ runs: using: "composite" steps: - name: Use cache DEB files - uses: actions/cache@v3 + uses: actions/cache/restore@v3 with: path: ./*.deb key: ${{ inputs.cache_key }} @@ -38,6 +41,6 @@ runs: ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) - jf rt upload "$FILE" "apt-plugins-${{ inputs.stability }}/pool/" --deb "${{ inputs.distrib }}/main/$ARCH" + jf rt upload "$FILE" "apt-plugins-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" done shell: bash diff --git a/stream-connectors/.github/actions/rpm-delivery/action.yml b/stream-connectors/.github/actions/rpm-delivery/action.yml index 17ae85ca21a..4a8343d6549 100644 --- a/stream-connectors/.github/actions/rpm-delivery/action.yml +++ b/stream-connectors/.github/actions/rpm-delivery/action.yml @@ -21,7 +21,7 @@ runs: using: "composite" steps: - name: Use cache RPM files - uses: actions/cache@v3 + uses: actions/cache/restore@v3 with: path: ./*.rpm key: ${{ inputs.cache_key }} @@ -63,7 +63,7 @@ runs: for ARCH in "noarch" "x86_64"; do if [ "$(ls -A $ARCH)" ]; then if [ "${{ inputs.stability }}" == "stable" ]; then - jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/" --flat + jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" --flat else jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat fi diff --git a/stream-connectors/.github/workflows/docker-packaging.yml b/stream-connectors/.github/workflows/docker-packaging.yml index b2dd069ba37..8b2952c0bba 100644 --- a/stream-connectors/.github/workflows/docker-packaging.yml +++ b/stream-connectors/.github/workflows/docker-packaging.yml @@ -24,7 +24,7 @@ jobs: distrib: [alma8, alma9, bullseye] steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Login to registry uses: docker/login-action@v2 diff --git a/stream-connectors/.github/workflows/get-environment.yml b/stream-connectors/.github/workflows/get-environment.yml index 8717e0ba891..7d4cb1079de 100644 --- a/stream-connectors/.github/workflows/get-environment.yml +++ b/stream-connectors/.github/workflows/get-environment.yml @@ -12,7 +12,7 @@ jobs: stability: ${{ steps.get_environment.outputs.stability }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - id: get_environment run: | diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index 31e1521efe6..a310ccfec13 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -43,7 +43,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install dependencies run: | @@ -84,7 +84,7 @@ jobs: - if: ${{ matrix.lib == 'lua-curl' }} name: Checkout sources of lua-curl - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: Lua-cURL/Lua-cURLv3 path: lua-curl-src @@ -174,7 +174,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install dependencies run: | @@ -255,7 +255,7 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v3 with: @@ -285,7 +285,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -308,11 +308,12 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish DEB packages uses: ./.github/actions/deb-delivery with: + module_name: stream-connectors-dependencies distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.lib }}-${{ matrix.distrib }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index 93f47575cba..3abdfa73a5b 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -43,7 +43,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - if: ${{ matrix.package_extension == 'rpm' }} run: | @@ -108,7 +108,7 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v3 with: @@ -137,7 +137,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -159,11 +159,12 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish DEB packages uses: ./.github/actions/deb-delivery with: + module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index 9fc33dc28e1..3c398c77a06 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -25,7 +25,7 @@ jobs: outputs: connectors: ${{ steps.list-connectors.outputs.connectors }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dorny/paths-filter@v2 id: filter @@ -78,7 +78,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Add specific dependencies id: list-dependencies @@ -197,7 +197,7 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/cache@v3 with: @@ -227,7 +227,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -250,11 +250,12 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Publish DEB packages uses: ./.github/actions/deb-delivery with: + module_name: stream-connectors distrib: ${{ matrix.distrib }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.connector_path }}-${{ matrix.distrib }} From 96de7da640fe84ece3db7c2f439abe62fba80827 Mon Sep 17 00:00:00 2001 From: hamzabessa Date: Fri, 27 Oct 2023 12:35:37 +0100 Subject: [PATCH 185/219] feat: add linting to worflow and actions files --- .../.github/workflows/actionlint.yml | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 stream-connectors/.github/workflows/actionlint.yml diff --git a/stream-connectors/.github/workflows/actionlint.yml b/stream-connectors/.github/workflows/actionlint.yml new file mode 100644 index 00000000000..8ae2a172240 --- /dev/null +++ b/stream-connectors/.github/workflows/actionlint.yml @@ -0,0 +1,70 @@ +name: actionlint + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + - hotfix-* + - release-* + paths: + - ".github/**" + +jobs: + actionlint: + runs-on: ubuntu-22.04 + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Download actionlint + id: get_actionlint + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + shell: bash + + - name: Check workflow files + run: | + ${{ steps.get_actionlint.outputs.executable }} \ + -ignore 'label "common" is unknown' \ + -ignore 'label "veracode" is unknown' \ + -ignore '"github.head_ref" is potentially untrusted' \ + -shellcheck= \ + -pyflakes= \ + -color + shell: bash + yaml-lint: + runs-on: ubuntu-22.04 + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Install Yaml + run: | + pip install yamllint==1.32.0 + + - name: Add Yaml Lint Rules + run: | + cat <>./yamllint_rules.yml + extends: default + + rules: + document-start: disable + line-length: disable + truthy: + check-keys: false + level: error + indentation: + spaces: 2 + indent-sequences: true + check-multi-line-strings: false + EOF + + - name: Lint YAML files + run: | + yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ From 3b0159bfa5f1a0753ff4d48f56e96b8bfb0542cf Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 31 Oct 2023 10:12:23 +0100 Subject: [PATCH 186/219] Update .github/workflows/actionlint.yml --- stream-connectors/.github/workflows/actionlint.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/stream-connectors/.github/workflows/actionlint.yml b/stream-connectors/.github/workflows/actionlint.yml index 8ae2a172240..c1c365d446a 100644 --- a/stream-connectors/.github/workflows/actionlint.yml +++ b/stream-connectors/.github/workflows/actionlint.yml @@ -8,9 +8,7 @@ on: pull_request: branches: - develop - - dev-[2-9][0-9].[0-9][0-9].x - master - - "[2-9][0-9].[0-9][0-9].x" - hotfix-* - release-* paths: From 8a74ebe9ede83bac44761a5293b98f0bb8bc2582 Mon Sep 17 00:00:00 2001 From: tcharles Date: Mon, 6 Nov 2023 14:32:31 +0100 Subject: [PATCH 187/219] fix deb packaging (#164) --- stream-connectors/.github/workflows/stream-connectors-lib.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index 3abdfa73a5b..db14c314d4f 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -74,6 +74,7 @@ jobs: cd centreon-stream-connectors-lib-$PACKAGE_VERSION + apt-get update debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} debuild-pbuilder --no-lintian shell: bash From 0e9747061b3e28a2ed2404275ac10fe56d24ab8e Mon Sep 17 00:00:00 2001 From: sdepassio <114986849+sdepassio@users.noreply.github.com> Date: Wed, 8 Nov 2023 14:15:09 +0100 Subject: [PATCH 188/219] Mon 20969 - have the possibility to invert filters accepted hostgroups rejected hostgroups (#158) * Adding rejected_hostgroups parameter * Adding rejected_servicegroups parameter * update * update * update doc * fix * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: tcharles * Update modules/docs/sc_event.md Co-authored-by: tcharles * Update modules/docs/sc_event.md Co-authored-by: tcharles * Update modules/docs/sc_event.md Co-authored-by: tcharles * Update modules/docs/sc_event.md Co-authored-by: tcharles * Update modules/docs/sc_event.md Co-authored-by: tcharles * update (indentation) --------- Co-authored-by: tcharles --- .../sc_event.lua | 251 ++++++++++++------ .../sc_params.lua | 25 ++ stream-connectors/modules/docs/sc_event.md | 93 +++++-- stream-connectors/modules/docs/sc_param.md | 8 +- 4 files changed, 278 insertions(+), 99 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 1b0e75dd93c..658b9560e38 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -446,45 +446,67 @@ end function ScEvent:is_valid_hostgroup() self.event.cache.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) - -- return true if option is not set - if self.params.accepted_hostgroups == "" then + -- return true if options are not set or if both options are set + local accepted_hostgroups_isnotempty = self.params.accepted_hostgroups ~= "" + local rejected_hostgroups_isnotempty = self.params.rejected_hostgroups ~= "" + if (not accepted_hostgroups_isnotempty and not rejected_hostgroups_isnotempty) or (accepted_hostgroups_isnotempty and rejected_hostgroups_isnotempty) then return true end -- return false if no hostgroups were found if not self.event.cache.hostgroups then - self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) - return false + if accepted_hostgroups_isnotempty then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups ..".") + return false + elseif rejected_hostgroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_hostgroup]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups ..".") + return true + end end - local accepted_hostgroup_name = self:find_hostgroup_in_list() + local accepted_hostgroup_name = self:find_hostgroup_in_list(self.params.accepted_hostgroups) + local rejected_hostgroup_name = self:find_hostgroup_in_list(self.params.rejected_hostgroups) -- return false if the host is not in a valid hostgroup - if not accepted_hostgroup_name then + if accepted_hostgroups_isnotempty and not accepted_hostgroup_name then self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) return false + elseif rejected_hostgroups_isnotempty and rejected_hostgroup_name then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is in a rejected hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups) + return false else - self.sc_logger:debug("[sc_event:is_valid_hostgroup]: event for host with id: " .. tostring(self.event.host_id) - .. " matched hostgroup: " .. accepted_hostgroup_name) + local debug_msg = "[sc_event:is_valid_hostgroup]: event for host with id: " .. tostring(self.event.host_id) + if accepted_hostgroups_isnotempty then + debug_msg = debug_msg .. " matched hostgroup: " .. tostring(accepted_hostgroup_name) + elseif rejected_hostgroups_isnotempty then + debug_msg = debug_msg .. " did not match hostgroup: " .. tostring(rejected_hostgroup_name) + end + self.sc_logger:debug(debug_msg) end return true end --- find_hostgroup_in_list: compare accepted hostgroups from parameters with the event hostgroups --- @return accepted_name (string) the name of the first matching hostgroup +-- @param hostgroups_list (string) a coma separated list of hostgroup name +-- @return hostgroup_name (string) the name of the first matching hostgroup -- @return false (boolean) if no matching hostgroup has been found -function ScEvent:find_hostgroup_in_list() - for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_hostgroups, ",")) do - for _, event_hostgroup in pairs(self.event.cache.hostgroups) do - if accepted_name == event_hostgroup.group_name then - return accepted_name +function ScEvent:find_hostgroup_in_list(hostgroups_list) + if hostgroups_list == nil or hostgroups_list == "" then + return false + else + for _, hostgroup_name in ipairs(self.sc_common:split(hostgroups_list, ",")) do + for _, event_hostgroup in pairs(self.event.cache.hostgroups) do + if hostgroup_name == event_hostgroup.group_name then + return hostgroup_name + end end end - end - + end return false end @@ -492,45 +514,67 @@ end -- @return true|false (boolean) function ScEvent:is_valid_servicegroup() self.event.cache.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) - - -- return true if option is not set - if self.params.accepted_servicegroups == "" then + + -- return true if options are not set or if both options are set + local accepted_servicegroups_isnotempty = self.params.accepted_servicegroups ~= "" + local rejected_servicegroups_isnotempty = self.params.rejected_servicegroups ~= "" + if (not accepted_servicegroups_isnotempty and not rejected_servicegroups_isnotempty) or (accepted_servicegroups_isnotempty and rejected_servicegroups_isnotempty) then return true end -- return false if no servicegroups were found if not self.event.cache.servicegroups then - self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) - .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) - return false + if accepted_servicegroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups ..".") + return false + elseif rejected_servicegroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: accepting event because service with id: " .. tostring(self.event.service_id) + .. " is not linked to a servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups ..".") + return true + end end - local accepted_servicegroup_name = self:find_servicegroup_in_list() + local accepted_servicegroup_name = self:find_servicegroup_in_list(self.params.accepted_servicegroups) + local rejected_servicegroup_name = self:find_servicegroup_in_list(self.params.rejected_servicegroups) - -- return false if the host is not in a valid servicegroup - if not accepted_servicegroup_name then + -- return false if the service is not in a valid servicegroup + if accepted_servicegroups_isnotempty and not accepted_servicegroup_name then self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) return false + elseif rejected_servicegroups_isnotempty and rejected_servicegroup_name then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is in an rejected servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups) + return false end - - self.sc_logger:debug("[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) - .. "matched servicegroup: " .. accepted_servicegroup_name) + + local debug_msg = "[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) + if accepted_servicegroups_isnotempty then + debug_msg = debug_msg .. " matched servicegroup: " .. tostring(accepted_servicegroup_name) + elseif rejected_servicegroups_isnotempty then + debug_msg = debug_msg .. " did not match servicegroup: " .. tostring(rejected_servicegroup_name) + end + self.sc_logger:debug(debug_msg) return true end --- find_servicegroup_in_list: compare accepted servicegroups from parameters with the event servicegroups --- @return accepted_name or false (string|boolean) the name of the first matching servicegroup if found or false if not found -function ScEvent:find_servicegroup_in_list() - for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_servicegroups, ",")) do - for _, event_servicegroup in pairs(self.event.cache.servicegroups) do - if accepted_name == event_servicegroup.group_name then - return accepted_name +-- @param servicegroups_list (string) a coma separated list of servicegroup name +-- @return servicegroup_name or false (string|boolean) the name of the first matching servicegroup if found or false if not found +function ScEvent:find_servicegroup_in_list(servicegroups_list) + if servicegroups_list == nil or servicegroups_list == "" then + return false + else + for _, servicegroup_name in ipairs(self.sc_common:split(servicegroups_list, ",")) do + for _, event_servicegroup in pairs(self.event.cache.servicegroups) do + if servicegroup_name == event_servicegroup.group_name then + return servicegroup_name + end end - end - end - + end + end return false end @@ -634,26 +678,39 @@ end -- @return true|false (boolean) function ScEvent:is_valid_bv() self.event.cache.bvs = self.sc_broker:get_bvs_infos(self.event.host_id) - - -- return true if option is not set - if self.params.accepted_bvs == "" then + + -- return true if options are not set or if both options are set + local accepted_bvs_isnotempty = self.params.accepted_bvs ~= "" + local rejected_bvs_isnotempty = self.params.rejected_bvs ~= "" + if (not accepted_bvs_isnotempty and not rejected_bvs_isnotempty) or (accepted_bvs_isnotempty and rejected_bvs_isnotempty) then return true end - -- return false if no hostgroups were found + -- return false if no bvs were found if not self.event.cache.bvs then - self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) - .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs) - return false + if accepted_bvs_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs ..".") + return false + elseif rejected_bvs_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_bv]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a BV. Rejected BVs are: " .. self.params.rejected_bvs ..".") + return true + end end - local accepted_bv_name = self:find_bv_in_list() + local accepted_bv_name = self:find_bv_in_list(self.params.accepted_bvs) + local rejected_bv_name = self:find_bv_in_list(self.params.rejected_bvs) -- return false if the BA is not in a valid BV - if not accepted_bv_name then - self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + if accepted_bvs_isnotempty and not accepted_bv_name then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) .. " is not in an accepted BV. Accepted BVs are: " .. self.params.accepted_bvs) return false + elseif rejected_bvs_isnotempty and rejected_bv_name then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + .. " is in a rejected BV. Rejected BVs are: " .. self.params.rejected_bvs) + return false else self.sc_logger:debug("[sc_event:is_valid_bv]: event for BA with id: " .. tostring(self.event.ba_id) .. "matched BV: " .. accepted_bv_name) @@ -663,17 +720,21 @@ function ScEvent:is_valid_bv() end --- find_bv_in_list: compare accepted BVs from parameters with the event BVs --- @return accepted_name (string) the name of the first matching BV +-- @param bvs_list (string) a coma separated list of BV name +-- @return bv_name (string) the name of the first matching BV -- @return false (boolean) if no matching BV has been found -function ScEvent:find_bv_in_list() - for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_bvs,",")) do - for _, event_bv in pairs(self.event.cache.bvs) do - if accepted_name == event_bv.bv_name then - return accepted_name +function ScEvent:find_bv_in_list(bvs_list) + if bvs_list == nil or bvs_list == "" then + return false + else + for _, bv_name in ipairs(self.sc_common:split(bvs_list,",")) do + for _, event_bv in pairs(self.event.cache.bvs) do + if bv_name == event_bv.bv_name then + return bv_name + end end end - end - + end return false end @@ -693,27 +754,39 @@ function ScEvent:is_valid_poller() id = self.event.cache.host.instance_id, name = self.event.cache.poller } - - -- return true if option is not set - if self.params.accepted_pollers == "" then + + -- return true if options are not set or if both options are set + local accepted_pollers_isnotempty = self.params.accepted_pollers ~= "" + local rejected_pollers_isnotempty = self.params.rejected_pollers ~= "" + if (not accepted_pollers_isnotempty and not rejected_pollers_isnotempty) or (accepted_pollers_isnotempty and rejected_pollers_isnotempty) then return true end - -- return false if no poller found in cache if not self.event.cache.poller then - self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) - .. " is not linked to an accepted poller (no poller found in cache). Accepted pollers are: " .. self.params.accepted_pollers) - return false + if accepted_pollers_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to an accepted poller (no poller found in cache). Accepted pollers are: " .. self.params.accepted_pollers) + return false + elseif rejected_pollers_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_poller]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a rejected poller (no poller found in cache). Rejected pollers are: " .. self.params.rejected_pollers) + return true + end end - local accepted_poller_name = self:find_poller_in_list() + local accepted_poller_name = self:find_poller_in_list(self.params.accepted_pollers) + local rejected_poller_name = self:find_poller_in_list(self.params.rejected_pollers) -- return false if the host is not monitored from a valid poller - if not accepted_poller_name then + if accepted_pollers_isnotempty and not accepted_poller_name then self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) .. " is not linked to an accepted poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Accepted pollers are: " .. self.params.accepted_pollers) return false + elseif rejected_pollers_isnotempty and rejected_poller_name then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is linked to a rejected poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Rejected pollers are: " .. self.params.rejected_pollers) + return false else self.sc_logger:debug("[sc_event:is_valid_poller]: event for host with id: " .. tostring(self.event.host_id) .. "matched poller: " .. accepted_poller_name) @@ -723,14 +796,18 @@ function ScEvent:is_valid_poller() end --- find_poller_in_list: compare accepted pollers from parameters with the event poller --- @return accepted_name or false (string|boolean) the name of the first matching poller if found or false if not found -function ScEvent:find_poller_in_list() - for _, accepted_name in ipairs(self.sc_common:split(self.params.accepted_pollers, ",")) do - if accepted_name == self.event.cache.poller then - return accepted_name +-- @param pollers_list (string) a coma separated list of poller name +-- @return poller_name or false (string|boolean) the name of the first matching poller if found or false if not found +function ScEvent:find_poller_in_list(pollers_list) + if pollers_list == nil or pollers_list == "" then + return false + else + for _, poller_name in ipairs(self.sc_common:split(pollers_list, ",")) do + if poller_name == self.event.cache.poller then + return poller_name + end end - end - + end return false end @@ -960,15 +1037,23 @@ end --- is_valid_author: check if the author of a comment is valid based on contact alias in Centreon -- return true|false (boolean) function ScEvent:is_valid_author() - -- do not handle authors if it is not configured - if self.params.accepted_authors == "" then + -- return true if options are not set or if both options are set + local accepted_authors_isnotempty = self.params.accepted_authors ~= "" + local rejected_authors_isnotempty = self.params.rejected_authors ~= "" + if (not accepted_authors_isnotempty and not rejected_authors_isnotempty) or (accepted_authors_isnotempty and rejected_authors_isnotempty) then return true end -- check if author is accepted - if not self:find_author_in_list() then + local accepted_author_name = self:find_author_in_list(self.params.accepted_authors) + local rejected_author_name = self:find_author_in_list(self.params.rejected_authors) + if accepted_authors_isnotempty and not accepted_author_name then self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) - .. " is not in an accepted authors list. Accepted authorss are: " .. self.params.accepted_authors) + .. " is not in an accepted authors list. Accepted authors are: " .. self.params.accepted_authors) + return false + elseif rejected_authors_isnotempty and rejected_author_name then + self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) + .. " is in a rejected authors list. Rejected authors are: " .. self.params.rejected_authors) return false end @@ -976,14 +1061,18 @@ function ScEvent:is_valid_author() end --- find_author_in_list: compare accepted authors from parameters with the event author +-- @param authors_list (string) a coma separeted list of author name -- @return accepted_alias or false (string|boolean) the alias of the first matching author if found or false if not found -function ScEvent:find_author_in_list() - for _, accepted_alias in ipairs(self.sc_common:split(self.params.accepted_authors, ",")) do - if accepted_alias == self.event.author then - return accepted_alias +function ScEvent:find_author_in_list(authors_list) + if authors_list == nil or authors_list == "" then + return false + else + for _, author_alias in ipairs(self.sc_common:split(authors_list, ",")) do + if author_alias == self.event.author then + return author_alias + end end end - return false end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 274c22769c0..7bebd803dc0 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -57,10 +57,15 @@ function sc_params.new(common, logger) -- objects filter accepted_hostgroups = "", + rejected_hostgroups = "", accepted_servicegroups = "", + rejected_servicegroups = "", accepted_bvs = "", + rejected_bvs = "", accepted_pollers = "", + rejected_pollers = "", accepted_authors = "", + rejected_authors = "", accepted_metrics = ".*", service_severity_threshold = nil, service_severity_operator = ">=", @@ -790,10 +795,15 @@ function ScParams:check_params() self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) self.params.accepted_authors = self.common:if_wrong_type(self.params.accepted_authors, "string", "") + self.params.rejected_authors = self.common:if_wrong_type(self.params.rejected_authors, "string", "") self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") + self.params.rejected_hostgroups = self.common:if_wrong_type(self.params.rejected_hostgroups, "string", "") self.params.accepted_servicegroups = self.common:if_wrong_type(self.params.accepted_servicegroups, "string", "") + self.params.rejected_servicegroups = self.common:if_wrong_type(self.params.rejected_servicegroups, "string", "") self.params.accepted_bvs = self.common:if_wrong_type(self.params.accepted_bvs, "string", "") + self.params.rejected_bvs = self.common:if_wrong_type(self.params.rejected_bvs, "string", "") self.params.accepted_pollers = self.common:if_wrong_type(self.params.accepted_pollers, "string", "") + self.params.rejected_pollers = self.common:if_wrong_type(self.params.rejected_pollers, "string", "") self.params.host_severity_threshold = self.common:if_wrong_type(self.params.host_severity_threshold, "number", nil) self.params.service_severity_threshold = self.common:if_wrong_type(self.params.service_severity_threshold, "number", nil) self.params.host_severity_operator = self.common:if_wrong_type(self.params.host_severity_operator, "string", ">=") @@ -821,6 +831,21 @@ function ScParams:check_params() self.params.metric_name_regex = self.common:if_wrong_type(self.params.metric_name_regex, "string", "") self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") self.params.output_size_limit = self.common:if_wrong_type(self.params.output_size_limit, "number", "") + if self.params.accepted_hostgroups ~= '' and self.params.rejected_hostgroups ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_hostgroups and rejected_hostgroups cannot be used together. None will be used.") + end + if self.params.accepted_servicegroups ~= '' and self.params.rejected_servicegroups ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_servicegroups and rejected_servicegroups cannot be used together. None will be used.") + end + if self.params.accepted_bvs ~= '' and self.params.rejected_bvs ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_bvs and rejected_bvs cannot be used together. None will be used.") + end + if self.params.accepted_pollers ~= '' and self.params.rejected_pollers ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_pollers and rejected_pollers cannot be used together. None will be used.") + end + if self.params.accepted_authors ~= '' and self.params.rejected_authors ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_authors and rejected_authors cannot be used together. None will be used.") + end end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md index 3fbc7ff09a5..2d44affbf62 100644 --- a/stream-connectors/modules/docs/sc_event.md +++ b/stream-connectors/modules/docs/sc_event.md @@ -73,9 +73,11 @@ - [is\_valid\_bv: returns](#is_valid_bv-returns) - [is\_valid\_bv: example](#is_valid_bv-example) - [find\_hostgroup\_in\_list method](#find_hostgroup_in_list-method) + - [find\_hostgroup\_in\_list: parameters](#find_hostgroup_in_list-parameters) - [find\_hostgroup\_in\_list: returns](#find_hostgroup_in_list-returns) - [find\_hostgroup\_in\_list: example](#find_hostgroup_in_list-example) - [find\_servicegroup\_in\_list method](#find_servicegroup_in_list-method) + - [find\_servicegroup\_in\_list: parameters](#find_servicegroup_in_list-parameters) - [find\_servicegroup\_in\_list: returns](#find_servicegroup_in_list-returns) - [find\_servicegroup\_in\_list: example](#find_servicegroup_in_list-example) - [find\_bv\_in\_list method](#find_bv_in_list-method) @@ -493,9 +495,9 @@ local result = test_event:is_valid_event_flapping_state() ## is_valid_hostgroup method -The **is_valid_hostgroup** method checks if the event is in a valid hostgroup based on [**accepted_hostgroups**](sc_param.md#default-parameters) in the **host_status or service_status** scope +The **is_valid_hostgroup** method checks if the event is in a valid hostgroup based on [**accepted_hostgroups or rejected_hostgroups**](sc_param.md#default-parameters) in the **host_status or service_status** scope -If the **accepted_hostgroup** is configured, all broker cache information regarding the hostgroups linked to a host will be added to the event in a cache.hostgroups table. More details about this cache table [**here**](sc_broker.md#get_hostgroups-example) +If the **accepted_hostgroups or rejected_hostgroups** is configured, all broker cache information regarding the hostgroups linked to a host will be added to the event in a cache.hostgroups table. More details about this cache table [**here**](sc_broker.md#get_hostgroups-example) ### is_valid_hostgroup: returns @@ -524,9 +526,9 @@ local result = test_event:is_valid_hostgroup() ## is_valid_servicegroup method -The **is_valid_servicegroup** method checks if the event is in a valid servicegroup based on [**accepted_servicegroups**](sc_param.md#default-parameters) in the **service_status** scope +The **is_valid_servicegroup** method checks if the event is in a valid servicegroup based on [**accepted_servicegroups or rejected_servicegroups**](sc_param.md#default-parameters) in the **service_status** scope -If the **accepted_servicegroup** is configured, all broker cache information regarding the servicegroups linked to a service will be added to the event in a cache.servicegroups table. More details about this cache table [**here**](sc_broker.md#get_servicegroups-example) +If the **accepted_servicegroup or rejected_servicegroups** is configured, all broker cache information regarding the servicegroups linked to a service will be added to the event in a cache.servicegroups table. More details about this cache table [**here**](sc_broker.md#get_servicegroups-example) ### is_valid_servicegroup: returns @@ -666,7 +668,7 @@ local result = test_event:is_valid_ba_acknowledge_state() ## is_valid_bv method -The **is_valid_bv** method checks if the event is linked to a valid BV based on [**accepted_bvs**](sc_param.md#default-parameters) in the **ba_status** scope +The **is_valid_bv** method checks if the event is linked to a valid BV based on [**accepted_bvs or rejected_bvs**](sc_param.md#default-parameters) in the **ba_status** scope If the **accepted_bvs** is configured, all broker cache information regarding the BVs linked to a service will be added to the event in a cache.bvs table. More details about this cache table [**here**](sc_broker.md#get_bvs_infos-example) @@ -697,7 +699,13 @@ local result = test_event:is_valid_bv() ## find_hostgroup_in_list method -The **find_hostgroup_in_list** method checks if one of the hostgroup in [**accepted_hostgroups**](sc_param.md#default-parameters) is linked to the host. +The **find_hostgroup_in_list** method checks if one of the hostgroup in the hostgroups list parameter ([**accepted_hostgroups or rejected_hostgroups parameters**](sc_param.md#default-parameters)) is linked to the host. + +### find_hostgroup_in_list: parameters + +| parameter | type | optional | default value | +|-------------------------------------| ------ | -------- | ------------- | +| a coma separated list of hostgroups | string | no | | ### find_hostgroup_in_list: returns @@ -712,19 +720,25 @@ The **find_hostgroup_in_list** method checks if one of the hostgroup in [**accep -- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 -- host from event is linked to my_hostgroup_2 -local result = test_event:find_hostgroup_in_list() +local result = test_event:find_hostgroup_in_list(accepted_hostgroups) --> result is: "my_hostgroup_2" -- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 -- host from is linked to my_hostgroup_2712 -result = test_event:find_hostgroup_in_list() +result = test_event:find_hostgroup_in_list(accepted_hostgroups) --> result is: false ``` ## find_servicegroup_in_list method -The **find_servicegroup_in_list** method checks if one of the servicegroup in [**accepted_servicegroups**](sc_param.md#default-parameters) is linked to the service. +The **find_servicegroup_in_list** method checks if one of the servicegroup in the servicegroups list parameter ([**accepted_servicegroups or rejected_servicegroups**](sc_param.md#default-parameters)) is linked to the service. + +### find_servicegroup_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------------| ------ | -------- | ------------- | +| a coma separated list of servicegroups | string | no | | ### find_servicegroup_in_list: returns @@ -739,19 +753,25 @@ The **find_servicegroup_in_list** method checks if one of the servicegroup in [* -- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 -- service from event is linked to my_servicegroup_2 -local result = test_event:find_servicegroup_in_list() +local result = test_event:find_servicegroup_in_list(accepted_servicegroups) --> result is: "my_servicegroup_2" -- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 -- service from is linked to my_servicegroup_2712 -result = test_event:find_servicegroup_in_list() +result = test_event:find_servicegroup_in_list(accepted_servicegroups) --> result is: false ``` ## find_bv_in_list method -The **find_bv_in_list** method checks if one of the BV in [**accepted_bvs**](sc_param.md#default-parameters) is linked to the BA. +The **find_bv_in_list** method checks if one of the BV in the bvs list parameter ([**accepted_bvs or rejected_bvs**](sc_param.md#default-parameters)) is linked to the BA. + +### find_bv_in_list: parameters + +| parameter | type | optional | default value | +|------------------------------| ------ | -------- | ------------- | +| a coma separated list of bvs | string | no | | ### find_bv_in_list: returns @@ -766,19 +786,19 @@ The **find_bv_in_list** method checks if one of the BV in [**accepted_bvs**](sc_ -- accepted_bvs are my_bv_1 and my_bv_2 -- BA from event is linked to my_bv_2 -local result = test_event:find_bv_in_list() +local result = test_event:find_bv_in_list(accepted_bvs) --> result is: "my_bv_2" -- accepted_bvs are my_bv_1 and my_bv_2 -- BA from is linked to my_bv_2712 -result = test_event:find_bv_in_list() +result = test_event:find_bv_in_list(accepted_bvs) --> result is: false ``` ## is_valid_poller method -The **is_valid_poller** method checks if the event is monitored from an accepted poller based on [**accepted_pollers**](sc_param.md#default-parameters) in the **host_status or service_status** scope +The **is_valid_poller** method checks if the event is monitored from an accepted poller based on [**accepted_pollers or rejected_pollers**](sc_param.md#default-parameters) in the **host_status or service_status** scope If the **accepted_pollers** is configured, all broker cache information regarding the poller linked to a host will be added to the event in a cache.poller index. More details about this cache index [**here**](sc_broker.md#get_instance-example) @@ -807,7 +827,13 @@ local result = test_event:is_valid_poller() ## find_poller_in_list method -The **find_poller_in_list** method checks if one of the pollers in [**accepted_pollers**](sc_param.md#default-parameters) is monitoring the host. +The **find_poller_in_list** method checks if one of the pollers in the pollers list parameter ([**accepted_pollers or rejected_pollers**](sc_param.md#default-parameters)) is monitoring the host. + +### find_poller_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------| ------ | -------- | ------------- | +| a coma separated list of pollers | string | no | | ### find_poller_in_list: returns @@ -1024,7 +1050,7 @@ The **get_downtime_service_status** method retrieve the status of the host in a ## is_valid_author method -The **is_valid_author** method checks if the author of a comment is valid according to the [**accepted_authors parameter**](sc_param.md#default-parameters). +The **is_valid_author** method checks if the author of a comment is valid according to the [**accepted_authors or rejected_authors parameter**](sc_param.md#default-parameters). ### is_valid_author: returns @@ -1039,6 +1065,39 @@ local result = test_event:is_valid_author() --> result is true or false ``` +## find_author_in_list method + +The **find_author_in_list** method checks if one of the author in the authors list parameter ([**accepted_authors or rejected_authors**](sc_param.md#default-parameters)) is the author of a comment. + +### find_author_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------| ------ | -------- | ------------- | +| a coma separated list of authors | string | no | | + +### find_author_in_list: returns + +| return | type | always | condition | +|--------------------------------------------| ------- | ------ |----------------------| +| the name of the first author that is found | string | no | an author must match | +| false | boolean | no | if no author matched | + +### find_author_in_list: example + +```lua +-- accepted_authors are author_1 and author_2 +-- author_1 is the author of the comment + +local result = test_event:find_author_in_list(accepted_authors) +--> result is: "author_1" + +-- accepted_authors are author_1 and author_2 +-- author_3 is the author of the comment + +result = test_event:find_author_in_list(accepted_authors) +--> result is: false +``` + ## is_downtime_event_useless method The **is_downtime_event_useless** method checks if the downtime event is a true start or end of a downtime. diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 82d2235a6ee..137e07da610 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -32,7 +32,7 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters | Parameter name | type | default value | description | default scope | additionnal information | -| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +|-----------------------------------------| ------ | ----------------------------------------------------------------------------- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | | accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | | host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | @@ -43,9 +43,15 @@ The sc_param module provides methods to help you handle parameters for your stre | in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | | flapping | number | 0 | accept only events that aren't flapping (use 1 to accept flapping events too) | host_status(neb), service_status(neb) | | | accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| rejected_hostgroups | string | | coma separated list of hostgroups that are rejected (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | | accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| rejected_servicegroups | string | | coma separated list of servicegroups that are rejected (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | | accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| rejected_bvs | string | | coma separated list of BVs that are rejected (for example: my_bv_1,my_bv_2) | ba_status(bam) | | | accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_pollers | string | | coma separated list of pollers that are rejected (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| accepted_authors | string | | coma separated list of authors that are accepted (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_authors | string | | coma separated list of authors that are rejected (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | | accepted_metrics | string | `.*` | filter metrics based on their name. Use lua pattern to filter | metrics stream connectors | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | | skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | From 7b80ef672e432c3166c0dc82c3678d1221ad5066 Mon Sep 17 00:00:00 2001 From: tcharles Date: Wed, 8 Nov 2023 14:36:07 +0100 Subject: [PATCH 189/219] fix downtime ack ba_status bbdo3 (#163) * start elastic metric v2 * handle downtime event bbdo 2 and 3 * forgot compat for ba_status * forgot a very important detail * fix typo * another typo --------- Co-authored-by: Kevin Duret --- .../sc_common.lua | 14 ++ .../sc_event.lua | 39 +++- .../sc_params.lua | 209 +++++++++++++++--- stream-connectors/modules/docs/README.md | 1 + stream-connectors/modules/docs/sc_common.md | 114 ++++++---- 5 files changed, 297 insertions(+), 80 deletions(-) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index 663a7544f5d..be537c679b3 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -339,4 +339,18 @@ function ScCommon:trim(string, character) return result end +--- get the first digit of bbdo protocol version +-- @return bbdo_version (number) the first digit of the bddo version +function ScCommon:get_bbdo_version() + local bbdo_version + + if broker.bbdo_version ~= nil then + _, _, bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") + else + bbdo_version = 2 + end + + return tonumber(bbdo_version) +end + return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index 658b9560e38..c0aae805e30 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -25,6 +25,7 @@ function sc_event.new(event, params, common, logger, broker) self.params = params self.event = event self.sc_broker = broker + self.bbdo_version = self.sc_common:get_bbdo_version() self.event.cache = {} @@ -1236,18 +1237,30 @@ end --- is_valid_downtime_event_start: make sure that the event is the one notifying us that a downtime has just started -- @return true|false (boolean) function ScEvent:is_valid_downtime_event_start() - -- event is about the end of the downtime (actual_end_time key is not present in a start downtime event) - if self.event.actual_end_time then - self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_end_time found in the downtime event. It can't be a downtime start event") + -- event is about the end of the downtime (actual_end_time key is not present in a start downtime bbdo2 event) + -- with bbdo3 value is set to -1 + if (self.bbdo_version > 2 and self.event.actual_end_time ~= -1) or (self.bbdo_version == 2 and self.event.actual_end_time) then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_end_time found in the downtime event and value equal to -1 or bbdo v2 in use. It can't be a downtime start event") return false end - -- event hasn't actually started until the actual_start_time key is present in the start downtime event - if not self.event.actual_start_time then - self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_start_time not found in the downtime event. The downtime hasn't yet started") + -- event hasn't actually started until the actual_start_time key is present in the start downtime bbdo 2 event + -- with bbdo3 donwtime is not started until value is a valid timestamp + if (not self.event.actual_start_time and self.bbdo_version == 2) or (self.event.actual_start_time == -1 and self.bbdo_version > 2) then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_start_time not found in the downtime event (or value set to -1). The downtime hasn't yet started") return false end + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.internal_id and self.event.id) then + self.event.internal_id = self.event.id + end + + if (not self.event.id and self.event.internal_id) then + self.event.id = self.event.internal_id + end + -- end compat patch + return true end @@ -1255,12 +1268,22 @@ end -- @return true|false (boolean) function ScEvent:is_valid_downtime_event_end() -- event is about the end of the downtime (deletion_time key is only present in a end downtime event) - if self.event.deletion_time then + if (self.bbdo_version == 2 and self.event.deletion_time) or (self.bbdo_version > 2 and self.event.deletion_time ~= -1) then + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.internal_id and self.event.id) then + self.event.internal_id = self.event.id + end + + if (not self.event.id and self.event.internal_id) then + self.event.id = self.event.internal_id + end + -- end compat patch + return true end -- any other downtime event is not about the actual end of a downtime so we return false - self.sc_logger:debug("[sc_event:is_valid_downtime_event_end]: deletion_time not found in the downtime event. The downtime event is not about the end of a downtime") + self.sc_logger:debug("[sc_event:is_valid_downtime_event_end]: deletion_time not found in the downtime event or equal to -1. The downtime event is not about the end of a downtime") return false end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index 7bebd803dc0..cf39efd07a3 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -26,13 +26,7 @@ function sc_params.new(common, logger) self.common = common -- get the version of the bbdo protocol (only the first digit, nothing else matters) - if broker.bbdo_version ~= nil then - _, _, self.bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") - else - self.bbdo_version = 2 - end - - self.bbdo_version = tonumber(self.bbdo_version) + self.bbdo_version = self.common:get_bbdo_version() -- initiate params self.params = { @@ -174,6 +168,24 @@ function sc_params.new(common, logger) id = 24, name = "service_status" }, + acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 1, + name = "acknowledgement" + }, + downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 5, + name = "downtime" + }, + ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 1, + name = "ba_status" + } }, [3] = { host_status = { @@ -187,17 +199,30 @@ function sc_params.new(common, logger) category_name = categories.neb.name, id = 29, name = "pb_service_status" + }, + acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 45, + name = "pb_acknowledgement" + }, + downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 36, + name = "pb_downtime" + }, + ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 19, + name = "pb_ba_status" } } } self.params.bbdo.elements = { - acknowledgement = { - category_id = categories.neb.id, - category_name = categories.neb.name, - id = 1, - name = "acknowledgement" - }, + acknowledgement = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["acknowledgement"], comment = { category_id = categories.neb.id, category_name = categories.neb.name, @@ -216,12 +241,7 @@ function sc_params.new(common, logger) id = 4, name = "custom_variable_status" }, - downtime = { - category_id = categories.neb.id, - category_name = categories.neb.name, - id = 5, - name = "downtime" - }, + downtime = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["downtime"], event_handler = { category_id = categories.neb.id, category_name = categories.neb.name, @@ -386,6 +406,72 @@ function sc_params.new(common, logger) id = 34, name = "pb_tag" }, + pb_comment = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 35, + name = "pb_comment" + }, + pb_downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 36, + name = "pb_downtime" + }, + pb_custom_variable = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 37, + name = "pb_custom_variable" + }, + pb_custom_variable_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 38, + name = "pb_custom_variable_status" + }, + pb_host_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 39, + name = "pb_host_check" + }, + pb_service_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 40, + name = "pb_host_check" + }, + pb_log_entry = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 41, + name = "pb_log_entry" + }, + pb_instance_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 42, + name = "pb_instance_status" + }, + pb_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 44, + name = "pb_instance" + }, + pb_acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 45, + name = "pb_acknowledgement" + }, + pb_responsive_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 46, + name = "pb_responsive_instance" + }, metric = { category_id = categories.storage.id, category_name = categories.storage.name, @@ -422,12 +508,7 @@ function sc_params.new(common, logger) id = 6, name = "metric_mapping" }, - ba_status = { - category_id = categories.bam.id, - category_name = categories.bam.name, - id = 1, - name = "ba_status" - }, + ba_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["ba_status"], kpi_status = { category_id = categories.bam.id, category_name = categories.bam.name, @@ -523,6 +604,84 @@ function sc_params.new(common, logger) category_name = categories.bam.name, id = 17, name = "inherited_downtime" + }, + pb_inherited_downtime = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 18, + name = "pb_inherited_downtime" + }, + pb_ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 19, + name = "pb_ba_status" + }, + pb_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 20, + name = "pb_ba_event" + }, + pb_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 20, + name = "pb_kpi_event" + }, + pb_dimension_bv_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 21, + name = "pb_dimension_bv_event" + }, + pb_dimension_ba_bv_relation_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 22, + name = "pb_dimension_ba_bv_relation_event" + }, + pb_dimension_timeperiod = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 23, + name = "pb_dimension_timeperiod" + }, + pb_dimension_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 24, + name = "pb_dimension_ba_event" + }, + pb_dimension_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 25, + name = "pb_dimension_kpi_event" + }, + pb_kpi_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 26, + name = "pb_kpi_status" + }, + pb_ba_duration_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 27, + name = "pb_ba_duration_event" + }, + pb_dimension_ba_timeperiod_relation = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 28, + name = "pb_dimension_ba_timeperiod_relation" + }, + pb_dimension_truncate_table_signal = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 29, + name = "pb_dimension_truncate_table_signal" } } diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 5cf1fb96110..346dfd96ad3 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -47,6 +47,7 @@ | lua_regex_escape | escape lua regex special characters in a string | [Documentation](sc_common.md#lua_regex_escape-method) | | dumper | dump any variable for debug purpose | [Documentation](sc_common.md#dumper-method) | | trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | +| get_bbdo_version | returns the first digit of the bbdo protocol version | [Documentation](sc_common.md#get_bbdo_version-method) | ## sc_logger methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index 805ec769ce9..c7fd2159b5e 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -1,58 +1,58 @@ # Documentation of the sc_common module -- [Documentation of the sc_common module](#documentation-of-the-sc_common-module) +- [Documentation of the sc\_common module](#documentation-of-the-sc_common-module) - [Introduction](#introduction) - [Module initialization](#module-initialization) - [Module constructor](#module-constructor) - [constructor: Example](#constructor-example) - - [ifnil_or_empty method](#ifnil_or_empty-method) - - [ifnil_or_empty: parameters](#ifnil_or_empty-parameters) - - [ifnil_or_empty: returns](#ifnil_or_empty-returns) - - [ifnil_empty: example](#ifnil_empty-example) - - [if_wrong_type method](#if_wrong_type-method) - - [if_wrong_type: parameters](#if_wrong_type-parameters) - - [if_wrong_type: returns](#if_wrong_type-returns) - - [if_wrong_type: example](#if_wrong_type-example) - - [boolean_to_number method](#boolean_to_number-method) - - [boolean_to_number: parameters](#boolean_to_number-parameters) - - [boolean_to_number: returns](#boolean_to_number-returns) - - [boolean_to_number: example](#boolean_to_number-example) - - [number_to_boolean method](#number_to_boolean-method) - - [number_to_boolean: parameters](#number_to_boolean-parameters) - - [number_to_boolean: returns](#number_to_boolean-returns) - - [number_to_boolean: example](#number_to_boolean-example) - - [check_boolean_number_option_syntax method](#check_boolean_number_option_syntax-method) - - [check_boolean_number_option_syntax: parameters](#check_boolean_number_option_syntax-parameters) - - [check_boolean_number_option_syntax: returns](#check_boolean_number_option_syntax-returns) - - [check_boolean_number_option_syntax: example](#check_boolean_number_option_syntax-example) + - [ifnil\_or\_empty method](#ifnil_or_empty-method) + - [ifnil\_or\_empty: parameters](#ifnil_or_empty-parameters) + - [ifnil\_or\_empty: returns](#ifnil_or_empty-returns) + - [ifnil\_empty: example](#ifnil_empty-example) + - [if\_wrong\_type method](#if_wrong_type-method) + - [if\_wrong\_type: parameters](#if_wrong_type-parameters) + - [if\_wrong\_type: returns](#if_wrong_type-returns) + - [if\_wrong\_type: example](#if_wrong_type-example) + - [boolean\_to\_number method](#boolean_to_number-method) + - [boolean\_to\_number: parameters](#boolean_to_number-parameters) + - [boolean\_to\_number: returns](#boolean_to_number-returns) + - [boolean\_to\_number: example](#boolean_to_number-example) + - [number\_to\_boolean method](#number_to_boolean-method) + - [number\_to\_boolean: parameters](#number_to_boolean-parameters) + - [number\_to\_boolean: returns](#number_to_boolean-returns) + - [number\_to\_boolean: example](#number_to_boolean-example) + - [check\_boolean\_number\_option\_syntax method](#check_boolean_number_option_syntax-method) + - [check\_boolean\_number\_option\_syntax: parameters](#check_boolean_number_option_syntax-parameters) + - [check\_boolean\_number\_option\_syntax: returns](#check_boolean_number_option_syntax-returns) + - [check\_boolean\_number\_option\_syntax: example](#check_boolean_number_option_syntax-example) - [split method](#split-method) - [split: parameters](#split-parameters) - [split: returns](#split-returns) - [split: example](#split-example) - - [compare_numbers method](#compare_numbers-method) - - [compare_numbers: parameters](#compare_numbers-parameters) - - [compare_numbers: returns](#compare_numbers-returns) - - [compare_numbers: example](#compare_numbers-example) - - [generate_postfield_param_string method](#generate_postfield_param_string-method) - - [generate_postfield_param_string: parameters](#generate_postfield_param_string-parameters) - - [generate_postfield_param_string: returns](#generate_postfield_param_string-returns) - - [generate_postfield_param_string: example](#generate_postfield_param_string-example) - - [load_json_file method](#load_json_file-method) - - [load_json_file: parameters](#load_json_file-parameters) - - [load_json_file: returns](#load_json_file-returns) - - [load_json_file: example](#load_json_file-example) - - [json_escape method](#json_escape-method) - - [json_escape: parameters](#json_escape-parameters) - - [json_escape: returns](#json_escape-returns) - - [json_escape: example](#json_escape-example) - - [xml_escape method](#xml_escape-method) - - [xml_escape: parameters](#xml_escape-parameters) - - [xml_escape: returns](#xml_escape-returns) - - [xml_escape: example](#xml_escape-example) - - [lua_regex_escape method](#lua_regex_escape-method) - - [lua_regex_escape: parameters](#lua_regex_escape-parameters) - - [lua_regex_escape: returns](#lua_regex_escape-returns) - - [lua_regex_escape: example](#lua_regex_escape-example) + - [compare\_numbers method](#compare_numbers-method) + - [compare\_numbers: parameters](#compare_numbers-parameters) + - [compare\_numbers: returns](#compare_numbers-returns) + - [compare\_numbers: example](#compare_numbers-example) + - [generate\_postfield\_param\_string method](#generate_postfield_param_string-method) + - [generate\_postfield\_param\_string: parameters](#generate_postfield_param_string-parameters) + - [generate\_postfield\_param\_string: returns](#generate_postfield_param_string-returns) + - [generate\_postfield\_param\_string: example](#generate_postfield_param_string-example) + - [load\_json\_file method](#load_json_file-method) + - [load\_json\_file: parameters](#load_json_file-parameters) + - [load\_json\_file: returns](#load_json_file-returns) + - [load\_json\_file: example](#load_json_file-example) + - [json\_escape method](#json_escape-method) + - [json\_escape: parameters](#json_escape-parameters) + - [json\_escape: returns](#json_escape-returns) + - [json\_escape: example](#json_escape-example) + - [xml\_escape method](#xml_escape-method) + - [xml\_escape: parameters](#xml_escape-parameters) + - [xml\_escape: returns](#xml_escape-returns) + - [xml\_escape: example](#xml_escape-example) + - [lua\_regex\_escape method](#lua_regex_escape-method) + - [lua\_regex\_escape: parameters](#lua_regex_escape-parameters) + - [lua\_regex\_escape: returns](#lua_regex_escape-returns) + - [lua\_regex\_escape: example](#lua_regex_escape-example) - [dumper method](#dumper-method) - [dumper: parameters](#dumper-parameters) - [dumper: returns](#dumper-returns) @@ -61,6 +61,9 @@ - [trim: parameters](#trim-parameters) - [trim: returns](#trim-returns) - [trim: example](#trim-example) + - [get\_bbdo\_version method](#get_bbdo_version-method) + - [get\_bbdo\_version: returns](#get_bbdo_version-returns) + - [get\_bbdo\_version: example](#get_bbdo_version-example) ## Introduction @@ -522,8 +525,8 @@ The **trim** methods remove spaces (or the specified character) at the beginning ### trim: returns -| return | type | always | condition | -| ------------------- | ------ | ------ | --------- | +| return | type | always | condition | +| -------------------- | ------ | ------ | --------- | | the trimmed variable | string | yes | | ### trim: example @@ -539,3 +542,20 @@ local string = ";;;;;;I'm no longer a space maaaaan;;;;;;;;;;;;;;" local result = test_common:trim(string, ";") --> result is: "I'm no longer a space maaaaan" ``` + +## get_bbdo_version method + +The **get_bbdo_version** method returns the first digit of the bbdo protocol version. + +### get_bbdo_version: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | --------- | +| the bbdo version | number | yes | | + +### get_bbdo_version: example + +```lua +local result = test_common:get_bbdo_version() +--> result is: 3 +``` From 5cf708e1892c56dcd38e4c6b8e0b25297326a6e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Chapron?= <34628915+sc979@users.noreply.github.com> Date: Thu, 23 Nov 2023 15:56:20 +0100 Subject: [PATCH 190/219] enh(chore): new code_owners (#165) --- stream-connectors/.github/CODEOWNERS | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 stream-connectors/.github/CODEOWNERS diff --git a/stream-connectors/.github/CODEOWNERS b/stream-connectors/.github/CODEOWNERS new file mode 100644 index 00000000000..9b986324e27 --- /dev/null +++ b/stream-connectors/.github/CODEOWNERS @@ -0,0 +1,8 @@ +* @centreon/owners-lua + +*.md @centreon/owners-doc +*.mdx @centreon/owners-doc + +.github/** @centreon/owners-pipelines +packaging/** @centreon/owners-pipelines +selinux/** @centreon/owners-pipelines From a4287ca67b1d303269d8b6ab65bf931bb888d4f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Chapron?= <34628915+sc979@users.noreply.github.com> Date: Wed, 20 Dec 2023 15:04:19 +0100 Subject: [PATCH 191/219] enh(chore): github actions hardening (#170) * enh(chore): github actions hardening * fix code owner * Revert "fix code owner" This reverts commit 7e5994e89aa8347761ed3936de66ce1b57b26415. --- .../.github/actions/deb-delivery/action.yml | 4 ++-- .../.github/actions/rpm-delivery/action.yml | 4 ++-- stream-connectors/.github/dependabot.yml | 9 +++++++++ .../.github/workflows/actionlint.yml | 4 ++-- .../.github/workflows/docker-packaging.yml | 8 ++++---- .../.github/workflows/get-environment.yml | 2 +- .../stream-connectors-dependencies.yml | 20 +++++++++---------- .../workflows/stream-connectors-lib.yml | 16 +++++++-------- .../.github/workflows/stream-connectors.yml | 20 +++++++++---------- 9 files changed, 48 insertions(+), 39 deletions(-) create mode 100644 stream-connectors/.github/dependabot.yml diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml index 602e2a906b4..16cbfc8fa6f 100644 --- a/stream-connectors/.github/actions/deb-delivery/action.yml +++ b/stream-connectors/.github/actions/deb-delivery/action.yml @@ -21,13 +21,13 @@ runs: using: "composite" steps: - name: Use cache DEB files - uses: actions/cache/restore@v3 + uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.deb key: ${{ inputs.cache_key }} fail-on-cache-miss: true - - uses: jfrog/setup-jfrog-cli@v3 + - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 env: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} diff --git a/stream-connectors/.github/actions/rpm-delivery/action.yml b/stream-connectors/.github/actions/rpm-delivery/action.yml index 4a8343d6549..c87dd0b19cd 100644 --- a/stream-connectors/.github/actions/rpm-delivery/action.yml +++ b/stream-connectors/.github/actions/rpm-delivery/action.yml @@ -21,13 +21,13 @@ runs: using: "composite" steps: - name: Use cache RPM files - uses: actions/cache/restore@v3 + uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: ${{ inputs.cache_key }} fail-on-cache-miss: true - - uses: jfrog/setup-jfrog-cli@v3 + - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 env: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} diff --git a/stream-connectors/.github/dependabot.yml b/stream-connectors/.github/dependabot.yml new file mode 100644 index 00000000000..ff9fb1d3446 --- /dev/null +++ b/stream-connectors/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: '/' + schedule: + interval: weekly + open-pull-requests-limit: 10 + labels: + - 'pr: dependencies' diff --git a/stream-connectors/.github/workflows/actionlint.yml b/stream-connectors/.github/workflows/actionlint.yml index c1c365d446a..61c74f5ffb7 100644 --- a/stream-connectors/.github/workflows/actionlint.yml +++ b/stream-connectors/.github/workflows/actionlint.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download actionlint id: get_actionlint @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install Yaml run: | diff --git a/stream-connectors/.github/workflows/docker-packaging.yml b/stream-connectors/.github/workflows/docker-packaging.yml index 8b2952c0bba..f7ff2300aa2 100644 --- a/stream-connectors/.github/workflows/docker-packaging.yml +++ b/stream-connectors/.github/workflows/docker-packaging.yml @@ -24,18 +24,18 @@ jobs: distrib: [alma8, alma9, bullseye] steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Login to registry - uses: docker/login-action@v2 + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@885d1462b80bc1c1c7f0b00334ad271f09369c55 # v2.10.0 - - uses: docker/build-push-action@v3 + - uses: docker/build-push-action@1104d471370f9806843c095c1db02b5a90c5f8b6 # v3.3.1 with: file: .github/docker/Dockerfile.packaging-${{ matrix.distrib }} context: . diff --git a/stream-connectors/.github/workflows/get-environment.yml b/stream-connectors/.github/workflows/get-environment.yml index 7d4cb1079de..04db07b582f 100644 --- a/stream-connectors/.github/workflows/get-environment.yml +++ b/stream-connectors/.github/workflows/get-environment.yml @@ -12,7 +12,7 @@ jobs: stability: ${{ steps.get_environment.outputs.stability }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: get_environment run: | diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index a310ccfec13..65d52db27f0 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -43,7 +43,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install dependencies run: | @@ -84,7 +84,7 @@ jobs: - if: ${{ matrix.lib == 'lua-curl' }} name: Checkout sources of lua-curl - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: repository: Lua-cURL/Lua-cURLv3 path: lua-curl-src @@ -150,7 +150,7 @@ jobs: working-directory: dependencies/${{ matrix.lib }} shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} @@ -174,7 +174,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install dependencies run: | @@ -231,7 +231,7 @@ jobs: working-directory: dependencies/${{ matrix.lib }} shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} @@ -255,9 +255,9 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} @@ -268,7 +268,7 @@ jobs: - run: rpmsign --addsign ./*.rpm shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} @@ -285,7 +285,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -308,7 +308,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages uses: ./.github/actions/deb-delivery diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index db14c314d4f..d9b8c49c684 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -43,7 +43,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - if: ${{ matrix.package_extension == 'rpm' }} run: | @@ -80,13 +80,13 @@ jobs: shell: bash - if: ${{ matrix.package_extension == 'deb' }} - uses: actions/cache@v3 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} - if: ${{ matrix.package_extension == 'rpm' }} - uses: actions/cache@v3 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} @@ -109,9 +109,9 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} @@ -122,7 +122,7 @@ jobs: - run: rpmsign --addsign ./*.rpm shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} @@ -138,7 +138,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -160,7 +160,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages uses: ./.github/actions/deb-delivery diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index 3c398c77a06..d9d2fd1c3bb 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -25,9 +25,9 @@ jobs: outputs: connectors: ${{ steps.list-connectors.outputs.connectors }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: dorny/paths-filter@v2 + - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 id: filter with: base: ${{ github.ref }} @@ -78,7 +78,7 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Add specific dependencies id: list-dependencies @@ -167,13 +167,13 @@ jobs: shell: bash - if: ${{ matrix.package_extension == 'deb' }} - uses: actions/cache@v3 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} - if: ${{ matrix.package_extension == 'rpm' }} - uses: actions/cache@v3 + uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ matrix.package_extension }} key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} @@ -197,9 +197,9 @@ jobs: - run: apt-get install -y zstd shell: bash - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} @@ -210,7 +210,7 @@ jobs: - run: rpmsign --addsign ./*.rpm shell: bash - - uses: actions/cache@v3 + - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.rpm key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} @@ -227,7 +227,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages uses: ./.github/actions/rpm-delivery @@ -250,7 +250,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages uses: ./.github/actions/deb-delivery From 97f9545e5fba462620e9132c6ed7f684c8e6e62f Mon Sep 17 00:00:00 2001 From: hamzabessa <148857497+hamzabessa@users.noreply.github.com> Date: Wed, 3 Jan 2024 09:36:23 +0100 Subject: [PATCH 192/219] enh(ci): package centreon-stream-connector-scripts using nfpm (#168) * enh(ci): package centreon-stream-connector-scripts using nfpm * fix: install nfpm in docker containers * fix: remove condition to tests job run in ci * fix: pull and push path in connectors-lib workflow * fix: path * fix: nfpm action bash script * fix: path * fix: delete dependencies * fix: deb dependencies * fix: rpm dependencies * fix: delete job for signing rpm packages * delete: old packaging * fix: artifact upload set to false after tests * fix: missed dependencies * fix: var not used in nfpm packaging file * refactor: dockerfile for deb packaging * delete: paths for pull and push in the workflow * fix: for testing purposes * fix: re do dockerfile as it was before refactoring * Revert "fix: re do dockerfile as it was before refactoring" This reverts commit 58fb73d4b7aef0c84a9ce619584c42c1004b3a01. * fix: lua version for deb * fix: luaver for deb packaging * fix: dockerfile installations for deb * fix: lua version extraction * fix: lua packets installation on docker * fix: debian docker run fails * fix: lua installation errors * fix: pull and push paths and delete condition on package job * Update packaging/connectors-lib/centreon-stream-connectors-lib.yaml --------- Co-authored-by: Kevin Duret --- .../.github/actions/package-nfpm/action.yml | 94 ++++++++++++++ .../.github/docker/Dockerfile.packaging-alma8 | 8 +- .../.github/docker/Dockerfile.packaging-alma9 | 8 +- .../docker/Dockerfile.packaging-bullseye | 10 +- .../workflows/stream-connectors-lib.yml | 116 ++++-------------- .../centreon-stream-connectors-lib.yaml | 38 ++++++ .../packaging/connectors-lib/deb/control | 18 --- .../packaging/connectors-lib/deb/copyright | 23 ---- .../packaging/connectors-lib/deb/rules | 5 - .../connectors-lib/deb/source/format | 1 - .../rpm/centreon-stream-connectors-lib.spec | 44 ------- 11 files changed, 181 insertions(+), 184 deletions(-) create mode 100644 stream-connectors/.github/actions/package-nfpm/action.yml create mode 100644 stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml delete mode 100644 stream-connectors/packaging/connectors-lib/deb/control delete mode 100644 stream-connectors/packaging/connectors-lib/deb/copyright delete mode 100644 stream-connectors/packaging/connectors-lib/deb/rules delete mode 100644 stream-connectors/packaging/connectors-lib/deb/source/format delete mode 100644 stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml new file mode 100644 index 00000000000..6c111273864 --- /dev/null +++ b/stream-connectors/.github/actions/package-nfpm/action.yml @@ -0,0 +1,94 @@ +name: package-nfpm +description: Package module using nfpm +inputs: + nfpm_file_pattern: + description: The pattern of the nfpm configuration file(s) + required: true + package_extension: + description: The package extension (deb or rpm) + required: true + distrib: + description: The package distrib + required: true + major_version: + description: The major version + required: false + minor_version: + description: The minor version + required: false + release: + description: The package release number + required: false + arch: + description: The package architecture + required: false + commit_hash: + description: The commit hash + required: true + cache_key: + description: The package files cache key + required: true + rpm_gpg_key: + description: The rpm gpg key + required: true + rpm_gpg_signing_key_id: + description: The rpm gpg signing key identifier + required: true + rpm_gpg_signing_passphrase: + description: The rpm gpg signing passphrase + required: true + +runs: + using: composite + + steps: + - name: Import gpg key + env: + RPM_GPG_SIGNING_KEY: ${{ inputs.rpm_gpg_key }} + run: echo -n "$RPM_GPG_SIGNING_KEY" > key.gpg + shell: bash + + - name: Build ${{ inputs.package_extension }} files + env: + RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} + RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} + run: | + export ARCH="${{ inputs.arch }}" + + if [ "${{ inputs.package_extension }}" = "rpm" ]; then + export DIST=".${{ inputs.distrib }}" + else + export DIST="" + fi + + luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) + echo "luaver is $luaver" + if [ $luaver = "0" ]; then + echo "Cannot get lua version" + exit 1 + fi + + export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" + export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" + export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" + + sed -i "s/@luaver@/$luaver/g" ./${{ inputs.nfpm_file_pattern }} + sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" ./${{ inputs.nfpm_file_pattern }} + nfpm package --config ./${{ inputs.nfpm_file_pattern }} --packager ${{ inputs.package_extension }} + + shell: bash + + - name: Cache packages + uses: actions/cache/save@v3 + with: + path: ./*.${{ inputs.package_extension }} + key: ${{ inputs.cache_key }} + + # Update if condition to true to get packages as artifacts + - if: ${{ false }} + name: Upload package artifacts + uses: actions/upload-artifact@v3 + with: + name: packages-${{ inputs.distrib }} + path: ./*.${{ inputs.package_extension}} + retention-days: 1 diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-alma8 b/stream-connectors/.github/docker/Dockerfile.packaging-alma8 index 22ea005e28d..08ab6795482 100644 --- a/stream-connectors/.github/docker/Dockerfile.packaging-alma8 +++ b/stream-connectors/.github/docker/Dockerfile.packaging-alma8 @@ -4,7 +4,13 @@ FROM ${REGISTRY_URL}/almalinux:8 RUN <> $GITHUB_ENV - shell: bash + name: package ${{ matrix.distrib }} - - run: rpmsign --addsign ./*.rpm - shell: bash + steps: + - name: Checkout sources + uses: actions/checkout@v4 - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - name: Package + uses: ./.github/actions/package-nfpm with: - path: ./*.rpm - key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + nfpm_file_pattern: "packaging/connectors-lib/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, sign-rpm] + needs: [get-environment] runs-on: ubuntu-22.04 strategy: matrix: diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml new file mode 100644 index 00000000000..628310aa189 --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -0,0 +1,38 @@ +name: "centreon-stream-connectors-lib" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "3.6.0" +release: "2${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Those modules provides helpful methods to create stream connectors for Centreon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "./modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/@luaver@/centreon-stream-connectors-lib" + +overrides: + rpm: + depends: + - lua-socket >= 3.0 + - centreon-broker-core >= 22.04.0 + - lua-curl + deb: + depends: + - "centreon-broker-core (>= 22.04.0)" + - "lua-socket (>= 3.0~)" + - "lua-curl" + - "lua5.4" + +rpm: + summary: Centreon stream connectors lua modules + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/packaging/connectors-lib/deb/control b/stream-connectors/packaging/connectors-lib/deb/control deleted file mode 100644 index 8a366c6385e..00000000000 --- a/stream-connectors/packaging/connectors-lib/deb/control +++ /dev/null @@ -1,18 +0,0 @@ -Source: centreon-stream-connectors-lib -Section: interpreters -Priority: optional -Maintainer: Centreon -Version: 3.6.0 -Build-Depends: - debhelper-compat (=12), - dh-lua (>= 21) -Standards-Version: 4.5.0 -Homepage: https://wwww.centreon.com - -Package: centreon-stream-connectors-lib -Architecture: all -Depends: - centreon-broker-core (>= 22.04.0), - lua-socket (>= 3.0~), - lua-curl -Description: Centreon stream connectors lib for lua modules diff --git a/stream-connectors/packaging/connectors-lib/deb/copyright b/stream-connectors/packaging/connectors-lib/deb/copyright deleted file mode 100644 index e874fbccb2b..00000000000 --- a/stream-connectors/packaging/connectors-lib/deb/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: centreon-stream-connectors-lib -Upstream-Contact: Centreon -Source: https://www.centreon.com - -Files: * -Copyright: 2023 Centreon -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". - diff --git a/stream-connectors/packaging/connectors-lib/deb/rules b/stream-connectors/packaging/connectors-lib/deb/rules deleted file mode 100644 index edf5c6cdafc..00000000000 --- a/stream-connectors/packaging/connectors-lib/deb/rules +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/make -f - -%: - dh $@ --buildsystem=lua --with lua - diff --git a/stream-connectors/packaging/connectors-lib/deb/source/format b/stream-connectors/packaging/connectors-lib/deb/source/format deleted file mode 100644 index 163aaf8d82b..00000000000 --- a/stream-connectors/packaging/connectors-lib/deb/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec b/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec deleted file mode 100644 index e07fde0e8d2..00000000000 --- a/stream-connectors/packaging/connectors-lib/rpm/centreon-stream-connectors-lib.spec +++ /dev/null @@ -1,44 +0,0 @@ -%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} -%global luapkgdir %{_datadir}/lua/%{luaver} - -Name: centreon-stream-connectors-lib -Version: 3.6.0 -Release: 1%{?dist} -Summary: Centreon stream connectors lua modules - -Group: Applications/System -License: Apache-2.0 -URL: https://www.centreon.com -Packager: Centreon -Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ - -Source0: %{name}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch - -BuildRequires: lua -BuildRequires: lua-devel - -Requires: centreon-broker-core >= 22.04.0 -Requires: lua-socket >= 3.0 -Requires: lua-curl - -%description -Those modules provides helpful methods to create stream connectors for Centreon - -%prep -%setup -q -n %{name} - -%build - -%install -%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/centreon-stream-connectors-lib -%{__cp} -pr ./* $RPM_BUILD_ROOT%{luapkgdir}/centreon-stream-connectors-lib - -%clean -%{__rm} -rf $RPM_BUILD_ROOT - -%files -%{luapkgdir}/centreon-stream-connectors-lib - -%changelog From 0f905f99d6ce56873498b6c5b0ddd2748a178d44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Chapron?= <34628915+sc979@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:30:46 +0100 Subject: [PATCH 193/219] enh(ci): add consistent labels (#183) --- stream-connectors/.github/dependabot.yml | 3 ++- stream-connectors/.github/workflows/actionlint.yml | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/stream-connectors/.github/dependabot.yml b/stream-connectors/.github/dependabot.yml index ff9fb1d3446..3db635e65a3 100644 --- a/stream-connectors/.github/dependabot.yml +++ b/stream-connectors/.github/dependabot.yml @@ -6,4 +6,5 @@ updates: interval: weekly open-pull-requests-limit: 10 labels: - - 'pr: dependencies' + - 'dependencies' + - 'gha' diff --git a/stream-connectors/.github/workflows/actionlint.yml b/stream-connectors/.github/workflows/actionlint.yml index 61c74f5ffb7..021d34926bf 100644 --- a/stream-connectors/.github/workflows/actionlint.yml +++ b/stream-connectors/.github/workflows/actionlint.yml @@ -61,6 +61,13 @@ jobs: spaces: 2 indent-sequences: true check-multi-line-strings: false + comments: + ignore-shebangs: true + min-spaces-from-content: 1 + comments-indentation: disable + new-lines: + type: unix + new-line-at-end-of-file: enable EOF - name: Lint YAML files From 41bc2f93b1bf8e0dc4c57f5319d16b5e7c5ede4a Mon Sep 17 00:00:00 2001 From: tuntoja <58987095+tuntoja@users.noreply.github.com> Date: Thu, 18 Jan 2024 15:37:53 +0100 Subject: [PATCH 194/219] chore(ci): upgrade gh actions (#178) * Bump docker/login-action from 2.2.0 to 3.0.0 (#173) Bumps [docker/login-action](https://github.com/docker/login-action) from 2.2.0 to 3.0.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/465a07811f14bebb1938fbed4728c6a1ff8901fc...343f7c4344506bcbf9b4de18042ae17996df046d) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docker/setup-buildx-action from 2.10.0 to 3.0.0 (#172) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2.10.0 to 3.0.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/885d1462b80bc1c1c7f0b00334ad271f09369c55...f95db51fddba0c2d1ec667646a06c2ce06100226) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump docker/build-push-action from 3.3.1 to 5.1.0 (#171) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3.3.1 to 5.1.0. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/1104d471370f9806843c095c1db02b5a90c5f8b6...4a13e500e55cf31b7a5d59a38ab2040ab0f42f56) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * enh yaml linting conf * add cache's hash * Bump actions/cache from 3.3.2 to 3.3.3 (#180) Bumps [actions/cache](https://github.com/actions/cache) from 3.3.2 to 3.3.3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/704facf57e6136b1bc63b828d79edcd491f0ee84...e12d46a63a90f2fae62d114769bbf2a179198b5c) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: schapron --- stream-connectors/.github/actions/package-nfpm/action.yml | 2 +- stream-connectors/.github/workflows/docker-packaging.yml | 6 +++--- .../.github/workflows/stream-connectors-dependencies.yml | 8 ++++---- stream-connectors/.github/workflows/stream-connectors.yml | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml index 6c111273864..971f3301105 100644 --- a/stream-connectors/.github/actions/package-nfpm/action.yml +++ b/stream-connectors/.github/actions/package-nfpm/action.yml @@ -79,7 +79,7 @@ runs: shell: bash - name: Cache packages - uses: actions/cache/save@v3 + uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 with: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} diff --git a/stream-connectors/.github/workflows/docker-packaging.yml b/stream-connectors/.github/workflows/docker-packaging.yml index f7ff2300aa2..8b04c9e23aa 100644 --- a/stream-connectors/.github/workflows/docker-packaging.yml +++ b/stream-connectors/.github/workflows/docker-packaging.yml @@ -27,15 +27,15 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Login to registry - uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2.2.0 + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@885d1462b80bc1c1c7f0b00334ad271f09369c55 # v2.10.0 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - - uses: docker/build-push-action@1104d471370f9806843c095c1db02b5a90c5f8b6 # v3.3.1 + - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: file: .github/docker/Dockerfile.packaging-${{ matrix.distrib }} context: . diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml index 65d52db27f0..e9340788069 100644 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml @@ -150,7 +150,7 @@ jobs: working-directory: dependencies/${{ matrix.lib }} shell: bash - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.${{ matrix.package_extension }} key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} @@ -231,7 +231,7 @@ jobs: working-directory: dependencies/${{ matrix.lib }} shell: bash - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.${{ matrix.package_extension }} key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} @@ -257,7 +257,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.rpm key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} @@ -268,7 +268,7 @@ jobs: - run: rpmsign --addsign ./*.rpm shell: bash - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.rpm key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index d9d2fd1c3bb..03888b42226 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -167,13 +167,13 @@ jobs: shell: bash - if: ${{ matrix.package_extension == 'deb' }} - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.${{ matrix.package_extension }} key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} - if: ${{ matrix.package_extension == 'rpm' }} - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.${{ matrix.package_extension }} key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} @@ -199,7 +199,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.rpm key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} @@ -210,7 +210,7 @@ jobs: - run: rpmsign --addsign ./*.rpm shell: bash - - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 with: path: ./*.rpm key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} From 5ceb10486c1ad7e5ed3f4a08d9a821012121c5c6 Mon Sep 17 00:00:00 2001 From: tcharles Date: Fri, 19 Jan 2024 15:51:39 +0100 Subject: [PATCH 195/219] Mon 16176 add elasticsearch metricv2 username (#156) * start elastic metric v2 * set user & password as optionnal params * add new params accepted_hosts and services * try to fix conflicts * change name of indexes (replace . by _) * better naming convention * add doc for params and new methods * fix pattern filter * remove debug and fix json * fix missing service_desc in payload * add min_max and thresholds dimension * fix metric host and add anti spam log system * improve error logging * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: cg-tw <83637804+cg-tw@users.noreply.github.com> * Update modules/centreon-stream-connectors-lib/sc_event.lua Co-authored-by: cg-tw <83637804+cg-tw@users.noreply.github.com> * Update modules/docs/README.md Co-authored-by: cg-tw <83637804+cg-tw@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: cg-tw <83637804+cg-tw@users.noreply.github.com> --------- Co-authored-by: cg-tw <83637804+cg-tw@users.noreply.github.com> --- .../elasticsearch/elastic-metrics-apiv2.lua | 181 +++++++++++++----- .../splunk/splunk-metrics-apiv2.lua | 2 +- .../sc_common.lua | 13 ++ .../sc_event.lua | 44 +++++ .../sc_params.lua | 65 +++++++ stream-connectors/modules/docs/README.md | 3 + stream-connectors/modules/docs/sc_common.md | 34 ++++ stream-connectors/modules/docs/sc_param.md | 116 ++++++----- 8 files changed, 366 insertions(+), 92 deletions(-) diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua index fd25b126d9e..821ee296aaa 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua @@ -37,12 +37,14 @@ function EventQueue.new(params) local self = {} local mandatory_parameters = { - "elastic_username", - "elastic_password", + -- "elastic_username", + -- "elastic_password", "http_server_url" } self.fail = false + self.last_fail_message_date = 0 + self.fail_message_counter = 0 -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/elastic-metrics.log" @@ -60,8 +62,8 @@ function EventQueue.new(params) end -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs - self.sc_params.params.elastic_username = params.elastic_username - self.sc_params.params.elastic_password = params.elastic_password + self.sc_params.params.elastic_username = params.elastic_username or "" + self.sc_params.params.elastic_password = params.elastic_password or "" self.sc_params.params.http_server_url = params.http_server_url self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" @@ -84,6 +86,8 @@ function EventQueue.new(params) self.sc_params.params.add_hostgroups_dimension = params.add_hostgroups_dimension or 1 self.sc_params.params.add_poller_dimension = params.add_poller_dimension or 0 self.sc_params.params.add_servicegroups_dimension = params.add_servicegroups_dimension or 0 + self.sc_params.params.add_min_max_dimension = params.add_min_max_dimension or 0 + self.sc_params.params.add_thresholds_dimension = params.add_thresholds_dimension or 0 -- can't get geo coords from cache nor event -- self.sc_params.params.add_geocoords_dimension = params.add_geocoords_dimension or 0 @@ -145,10 +149,10 @@ function EventQueue:build_index_template(params) } self.index_routing_path = { - "host.name", - "service.description", - "metric.name", - "metric.instance", + "host_name", + "service_description", + "metric_name", + "metric_instance", -- "metric.subinstances" } @@ -162,31 +166,31 @@ function EventQueue:build_index_template(params) }, mappings = { properties = { - ["host.name"] = { + ["host_name"] = { type = "keyword", time_series_dimension = true }, - ["service.description"] = { + ["service_description"] = { type = "keyword", time_series_dimension = true }, - ["metric.name"] = { + ["metric_name"] = { type = "keyword", time_series_dimension = true }, - ["metric.unit"] = { + ["metric_unit"] = { type = "keyword", time_series_dimension = false }, - ["metric.instance"] = { + ["metric_instance"] = { type = "keyword", time_series_dimension = true }, - ["metric.subinstances"] = { + ["metric_subinstances"] = { type = "keyword", time_series_dimension = false }, - ["metric.value"] = { + ["metric_value"] = { type = "double", time_series_metric = gauge }, @@ -201,7 +205,7 @@ function EventQueue:build_index_template(params) -- add hostgroup property in the template if params.add_hostgroups_dimension == 1 then - self.elastic_index_template.template.mappings.properties["host.groups"] = { + self.elastic_index_template.template.mappings.properties["host_groups"] = { type = "keyword", time_series_dimension = false } @@ -211,7 +215,7 @@ function EventQueue:build_index_template(params) -- add servicegroup property in the template if params.add_servicegroups_dimension == 1 then - self.elastic_index_template.template.mappings.properties["service.groups"] = { + self.elastic_index_template.template.mappings.properties["service_groups"] = { type = "keyword", time_series_dimension = false } @@ -229,6 +233,42 @@ function EventQueue:build_index_template(params) -- table.insert(self.index_routing_path, "poller") end + -- add min and max property in the template + if params.add_min_max_dimension == 1 then + self.elastic_index_template.template.mappings.properties["metric_min"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_max"] = { + type = "keyword", + time_series_dimension = false + } + end + + -- add warn and max property in the template + if params.add_thresholds_dimension == 1 then + self.elastic_index_template.template.mappings.properties["metric_warning_low"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_warning_high"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_critical_low"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_critical_high"] = { + type = "keyword", + time_series_dimension = false + } + end + self.elastic_index_template.template.settings["index.routing_path"] = self.index_routing_path -- add geocoords property in the template @@ -264,6 +304,7 @@ function EventQueue:check_index_template(params) } local return_code = self:send_data(payload, metadata) + if return_code then self.sc_logger:notice("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has been found") index_state.is_created = true @@ -306,6 +347,11 @@ function EventQueue:create_index_template(params) end function EventQueue:validate_index_template(params) + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[EventQueue:validate_index_template]: send_data_test is set to 1, ignoring template validation") + return true + end + local index_template_structure, error = broker.json_decode(self.elastic_result) if error then @@ -314,21 +360,33 @@ function EventQueue:validate_index_template(params) end local required_index_mapping_properties = { - "host.name", - "service.description", - "metric.value", - "metric.unit", - "metric.value", - "metric.instance", - "metric.subinstances" + "host_name", + "service_description", + "metric_value", + "metric_unit", + "metric_value", + "metric_instance", + "metric_subinstances" } if params.add_hostgroups_dimension == 1 then - table.insert(required_index_mapping_properties, "host.groups") + table.insert(required_index_mapping_properties, "host_groups") end if params.add_servicegroups_dimension == 1 then - table.insert(required_index_mapping_properties, "service.groups") + table.insert(required_index_mapping_properties, "service_groups") + end + + if params.add_min_max_dimension == 1 then + table.insert(required_index_mapping_properties, "metric_min") + table.insert(required_index_mapping_properties, "metric_max") + end + + if params.add_thresholds_dimension == 1 then + table.insert(required_index_mapping_properties, "metric_warning_low") + table.insert(required_index_mapping_properties, "metric_warning_high") + table.insert(required_index_mapping_properties, "metric_critical_low") + table.insert(required_index_mapping_properties, "metric_critical_high") end -- can't get geo coords from cache nor event @@ -403,7 +461,6 @@ end function EventQueue:format_event_host() local event = self.sc_event.event self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") - self.sc_event.event.formated_event = {} self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) end @@ -413,7 +470,6 @@ end function EventQueue:format_event_service() self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") local event = self.sc_event.event - self.sc_event.event.formated_event = {} self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) end @@ -424,7 +480,7 @@ end function EventQueue:format_metric_host(metric) self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric host") self:add_generic_information(metric) - self:add_generic_optional_information() + self:add_generic_optional_information(metric) self:add() end @@ -435,9 +491,9 @@ end function EventQueue:format_metric_service(metric) self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric service") - self.sc_event.event.formated_event["service.description"] = tostring(self.sc_event.event.cache.service.description) self:add_generic_information(metric) - self:add_generic_optional_information() + self.sc_event.event.formated_event["service_description"] = tostring(self.sc_event.event.cache.service.description) + self:add_generic_optional_information(metric) self:add_service_optional_information() self:add() end @@ -446,16 +502,16 @@ function EventQueue:add_generic_information(metric) local event = self.sc_event.event self.sc_event.event.formated_event = { ["@timestamp"] = event.last_check, - ["host.name"] = tostring(event.cache.host.name), - ["metric.name"] = tostring(metric.metric_name), - ["metric.value"] = metric.value, - ["metric.instance"] = metric.instance, - ["metric.subinstances"] = metric.subinstances, - ["metric.unit"] = metric.unit + ["host_name"] = tostring(event.cache.host.name), + ["metric_name"] = tostring(metric.metric_name), + ["metric_value"] = metric.value, + ["metric_instance"] = metric.instance, + ["metric_subinstances"] = metric.subinstances, + ["metric_unit"] = metric.unit } end -function EventQueue:add_generic_optional_information() +function EventQueue:add_generic_optional_information(metric) local params = self.sc_event.params local event = self.sc_event.event @@ -467,13 +523,35 @@ function EventQueue:add_generic_optional_information() table.insert(hostgroups, hg_info.group_name) end - self.sc_event.event.formated_event["host.groups"] = hostgroups + self.sc_event.event.formated_event["host_groups"] = hostgroups end -- add poller if params.add_poller_dimension == 1 then self.sc_event.event.formated_event.poller = event.cache.poller end + + -- add min and max + if params.add_min_max_dimension == 1 then + self.sc_event.event.formated_event.metric_min = self:handle_NaN(metric.min) + self.sc_event.event.formated_event.metric_max = self:handle_NaN(metric.max) + end + + -- add thresholds + if params.add_thresholds_dimension == 1 then + self.sc_event.event.formated_event.metric_warning_low = self:handle_NaN(metric.warning_low) + self.sc_event.event.formated_event.metric_warning_high = self:handle_NaN(metric.warning_high) + self.sc_event.event.formated_event.metric_critical_low = self:handle_NaN(metric.critical_low) + self.sc_event.event.formated_event.metric_critical_high = self:handle_NaN(metric.critical_high) + end +end + +function EventQueue:handle_NaN(value) + if value == value then + return value + end + + return nil end function EventQueue:add_service_optional_information() @@ -485,7 +563,7 @@ function EventQueue:add_service_optional_information() table.insert(servicegroups, sg_info.group_name) end - self.sc_event.event.formated_event["service.groups"] = servicegroups + self.sc_event.event.formated_event["service_groups"] = servicegroups end end @@ -502,7 +580,6 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) end @@ -528,12 +605,14 @@ function EventQueue:send_data(payload, queue_metadata) local params = self.sc_params.params local url = params.http_server_url .. queue_metadata.endpoint queue_metadata.headers = { - "Authorization: Basic " .. mime.b64(params.elastic_username .. ":" .. params.elastic_password), "Content-type: application/json" } - - if payload then + if (params.elastic_username ~= "" and params.elastic_password ~= "") then + table.insert(queue_metadata.headers, "Authorization: Basic " .. mime.b64(params.elastic_username .. ":" .. params.elastic_password)) + end + + if payload or queue_metadata.method == "GET" then -- write payload in the logfile for test purpose if params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -600,7 +679,7 @@ function EventQueue:send_data(payload, queue_metadata) if error_json then self.sc_logger:error("[EventQueue:send_data]: Couldn't decode json from elasticsearch. Error is: " .. tostring(error_json) - .. ". Received json is: " .. tostring(http_response_body)) + .. ". Received json is: " .. tostring(http_response_body) .. ". Sent data is: " .. tostring(payload)) return false end @@ -610,7 +689,7 @@ function EventQueue:send_data(payload, queue_metadata) end - self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body) .. ". Sent data is: " .. tostring(payload)) return false end @@ -633,7 +712,15 @@ end function write (event) -- skip event if a mandatory parameter is missing if queue.fail then - queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + if queue.fail_message_counter <= 3 and queue.last_fail_message_date + 30 < os.time(os.date("*t")) then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + queue.last_fail_message_date = os.time(os.date("*t")) + queue.fail_message_counter = queue.fail_message_counter + 1 + elseif queue.fail_message_counter > 3 and queue.last_fail_message_date + 300 < os.time(os.date("*t")) then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + queue.last_fail_message_date = os.time(os.date("*t")) + queue.fail_message_counter = queue.fail_message_counter + 1 + end return false end diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua index 9c1c5186b9c..f22704292c0 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -37,7 +37,7 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics.log" - local log_level = params.log_level or 3 + local log_level = params.log_level or 1 -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua index be537c679b3..20652248a81 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -353,4 +353,17 @@ function ScCommon:get_bbdo_version() return tonumber(bbdo_version) end +--- is_valid_pattern: check if a Lua pattern is valid or not +-- @param pattern (string) the pattern that must be validated +-- @return boolean (boolean) true if pattern is valid, false otherwise +function ScCommon:is_valid_pattern(pattern) + local status, result = pcall(string.match, "a random string", pattern) + + if not status then + self.sc_logger:error("[sc_common:validate_pattern]: invalid pattern. Error message is: " .. tostring(result)) + end + + return status +end + return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua index c0aae805e30..2441444e04e 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -280,6 +280,28 @@ function ScEvent:is_valid_host() return false end + -- loop through each Lua pattern to check if host name match the filter + local is_valid_pattern = false + if self.params.accepted_hosts ~= "" then + for index, pattern in ipairs(self.params.accepted_hosts_pattern_list) do + if string.match(self.event.cache.host.name, pattern) then + self.sc_logger:debug("[sc_event:is_valid_host]: host " .. tostring(self.event.cache.host.name) + .. " matched pattern: " .. tostring(pattern)) + is_valid_pattern = true + break + end + end + else + is_valid_pattern = true + end + + if not is_valid_pattern then + self.sc_logger:info("[sc_event:is_valid_host]: Host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_hosts pattern: " .. tostring(self.params.accepted_hosts) + .. " or any of the sub-patterns if accepted_hosts_enable_split_pattern is enabled") + return false + end + return true end @@ -311,6 +333,28 @@ function ScEvent:is_valid_service() self.event.cache.service.description = self.event.service_id end + -- loop through each Lua pattern to check if service description match the filter + local is_valid_pattern = false + if self.params.accepted_services ~= "" then + for index, pattern in ipairs(self.params.accepted_services_pattern_list) do + if string.match(self.event.cache.service.description, pattern) then + self.sc_logger:debug("[sc_event:is_valid_service]: service " .. tostring(self.event.cache.service.description) + .. " from host: " .. tostring(self.event.cache.host.name) .. " matched pattern: " .. tostring(pattern)) + is_valid_pattern = true + break + end + end + else + is_valid_pattern = true + end + + if not is_valid_pattern then + self.sc_logger:info("[sc_event:is_valid_service]: Service: " .. tostring(self.event.cache.service.description) .. " from host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_services pattern: " .. tostring(self.params.accepted_services) + .. " or any of the sub-patterns if accepted_services_enable_split_pattern is enabled") + return false + end + return true end diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua index cf39efd07a3..356410aba63 100644 --- a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -54,6 +54,12 @@ function sc_params.new(common, logger) rejected_hostgroups = "", accepted_servicegroups = "", rejected_servicegroups = "", + accepted_hosts = "", + accepted_services = "", + accepted_hosts_enable_split_pattern = 0, + accepted_services_enable_split_pattern = 0, + accepted_hosts_split_character = ",", + accepted_services_split_character = ",", accepted_bvs = "", rejected_bvs = "", accepted_pollers = "", @@ -990,6 +996,7 @@ function ScParams:check_params() self.params.metric_name_regex = self.common:if_wrong_type(self.params.metric_name_regex, "string", "") self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") self.params.output_size_limit = self.common:if_wrong_type(self.params.output_size_limit, "number", "") + if self.params.accepted_hostgroups ~= '' and self.params.rejected_hostgroups ~= '' then self.logger:error("[sc_params:check_params]: Parameters accepted_hostgroups and rejected_hostgroups cannot be used together. None will be used.") end @@ -1005,6 +1012,9 @@ function ScParams:check_params() if self.params.accepted_authors ~= '' and self.params.rejected_authors ~= '' then self.logger:error("[sc_params:check_params]: Parameters accepted_authors and rejected_authors cannot be used together. None will be used.") end + + -- handle some dedicated parameters that can use lua pattern (such as accepted_hosts and accepted_services) + self:build_and_validate_filters_pattern({"accepted_hosts", "accepted_services"}) end --- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table @@ -1142,4 +1152,59 @@ function ScParams:build_accepted_elements_info() end end +--- validate_pattern_param: check if paramater has a valid lua pattern +-- @param param_name (string) the name of the parameter +-- @param param_value (string) the Lua pattern to test +-- @return param_value (string) either the param value if pattern is valid, empty string otherwise +function ScParams:validate_pattern_param(param_name, param_value) + if not self.common:validate_pattern(param_value) then + self.logger:error("[sc_params:validate_pattern_param]: couldn't validate Lua pattern: " .. tostring(param_value) + .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") + return "" + end + + return param_value +end + +--- build_and_validate_filters_pattern: make sure lua patterns are valid and build a table of pattern according to the +-- @param param_list (table) a list of all parameters that must be checked. +--[[ + exemple: self.params.accepted_hosts value is "foo.*,.*bar.*" + this method will generate the following parameter + self.params.accepted_hosts_pattern_list = { + "foo.*", + ".*bar.*" + } +]]-- +function ScParams:build_and_validate_filters_pattern(param_list) + local temp_pattern_table + + -- we need to build a table containing all patterns for each filter compatible with this feature + for index, param_name in ipairs(param_list) do + self.params[param_name .. "_pattern_list"] = {} + + -- we try to split the pattern in multiple sub patterns if option is enabled + -- this option is here to overcome the lack of alternation operator ("|" character in POSIX regex) in Lua regex + if self.params[param_name .. "_enable_split_pattern"] == 1 then + temp_pattern_table = self.common:split(self.params[param_name], self.params[param_name .. "_split_character"]) + + for index, temp_pattern in ipairs(temp_pattern_table) do + -- each sub pattern must be a valid standalone pattern. We are not here to develop regex in Lua + if self.common:is_valid_pattern(temp_pattern) then + table.insert(self.params[param_name .. "_pattern_list"], temp_pattern) + self.logger:notice("[sc_params:build_accepted_filters_pattern]: adding " .. tostring(temp_pattern) + .. " to the list of filtering patterns for parameter: " .. param_name) + else + -- if the sub pattern is not valid, just ignore it + self.logger:error("[sc_params:build_accepted_filters_pattern]: ignoring pattern for param: " + .. param_name .. " because after splitting the string:" .. param_name + .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") + end + end + else + table.insert(self.params[param_name .. "_pattern_list"], self.params[param_name]) + end + end +end + return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md index 346dfd96ad3..74dfdc050e8 100644 --- a/stream-connectors/modules/docs/README.md +++ b/stream-connectors/modules/docs/README.md @@ -48,6 +48,7 @@ | dumper | dump any variable for debug purpose | [Documentation](sc_common.md#dumper-method) | | trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | | get_bbdo_version | returns the first digit of the bbdo protocol version | [Documentation](sc_common.md#get_bbdo_version-method) | +| is_valid_pattern | check if a Lua pattern is valid | [Documentation](sc_common.md#is_valid_pattern-method) | ## sc_logger methods @@ -85,6 +86,8 @@ | get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | | load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | | build_accepted_elements_info | build a table that store information about accepted elements | [Documentation](sc_param.md#build_accepted_elements_info-method) | +| validate_pattern_param | check if a parameter has a valid Lua pattern as a value | [Documentation](sc_param.md#validate_pattern_param-method) | +| build_and_validate_filters_pattern | build a table that stores information about patterns for compatible parameters | [Documentation](sc_param.md#build_and_validate_filters_pattern-method) | ## sc_event methods diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md index c7fd2159b5e..547724a2de0 100644 --- a/stream-connectors/modules/docs/sc_common.md +++ b/stream-connectors/modules/docs/sc_common.md @@ -64,6 +64,10 @@ - [get\_bbdo\_version method](#get_bbdo_version-method) - [get\_bbdo\_version: returns](#get_bbdo_version-returns) - [get\_bbdo\_version: example](#get_bbdo_version-example) + - [is\_valid\_pattern method](#is_valid_pattern-method) + - [is\_valid\_pattern: parameters](#is_valid_pattern-parameters) + - [is\_valid\_pattern: returns](#is_valid_pattern-returns) + - [is\_valid\_pattern: example](#is_valid_pattern-example) ## Introduction @@ -559,3 +563,33 @@ The **get_bbdo_version** method returns the first digit of the bbdo protocol ver local result = test_common:get_bbdo_version() --> result is: 3 ``` + +## is_valid_pattern method + +The **is_valid_pattern** method checks if a Lua pattern is valid or not. + +### is_valid_pattern: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the pattern that must be checked | string | no | | + +### is_valid_pattern: returns + +| return | type | always | condition | +| ------------------- | ------ | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_pattern: example + +```lua +local good_pattern = "a random pattern .*" + +local result = test_common:is_valid_pattern(good_pattern) +--> result is: true + +local wrong_pattern = "a random pattern %2" + +local result = test_common:is_valid_pattern(wrong_pattern) +--> result is: false +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md index 137e07da610..f5b3877eb90 100644 --- a/stream-connectors/modules/docs/sc_param.md +++ b/stream-connectors/modules/docs/sc_param.md @@ -22,8 +22,13 @@ - [load\_event\_format\_file: parameters](#load_event_format_file-parameters) - [load\_event\_format\_file: returns](#load_event_format_file-returns) - [load\_event\_format\_file: example](#load_event_format_file-example) - - [build\_accepted\_elements\_info method](#build_accepted_elements_info-method) - - [build\_accepted\_elements\_info: example](#build_accepted_elements_info-example) + - [validate\_pattern\_param method](#validate_pattern_param-method) + - [validate\_pattern\_param: parameters](#validate_pattern_param-parameters) + - [validate\_pattern\_param: returns](#validate_pattern_param-returns) + - [validate\_pattern\_param: example](#validate_pattern_param-example) + - [build\_and\_validate\_filters\_pattern method](#build_and_validate_filters_pattern-method) + - [build\_and\_validate\_filters\_pattern: parameters](#build_and_validate_filters_pattern-parameters) + - [build\_and\_validate\_filters\_pattern: example](#build_and_validate_filters_pattern-example) ## Introduction @@ -31,11 +36,11 @@ The sc_param module provides methods to help you handle parameters for your stre ### Default parameters -| Parameter name | type | default value | description | default scope | additionnal information | -|-----------------------------------------| ------ | ----------------------------------------------------------------------------- |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a coma separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | -| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a coma separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank space with underscore. "Host status" becomes "host_status") | -| host_status | string | 0,1,2 | coma separated list of accepted host status (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| Parameter name | type | default value | description | default scope | additional information | +| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a comma-separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a comma-separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank spaces with underscores. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | comma-separated list of accepted host statuses (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | | service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | | ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | | hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | @@ -43,15 +48,21 @@ The sc_param module provides methods to help you handle parameters for your stre | in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | | flapping | number | 0 | accept only events that aren't flapping (use 1 to accept flapping events too) | host_status(neb), service_status(neb) | | | accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| rejected_hostgroups | string | | coma separated list of hostgroups that are rejected (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | -| accepted_servicegroups | string | | coma separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | -| rejected_servicegroups | string | | coma separated list of servicegroups that are rejected (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | -| accepted_bvs | string | | coma separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| rejected_bvs | string | | coma separated list of BVs that are rejected (for example: my_bv_1,my_bv_2) | ba_status(bam) | | -| accepted_pollers | string | | coma separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| rejected_pollers | string | | coma separated list of pollers that are rejected (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| accepted_authors | string | | coma separated list of authors that are accepted (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | -| rejected_authors | string | | coma separated list of authors that are rejected (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_hostgroups | string | | comma-separated list of hostgroups that are rejected (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | comma-separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| rejected_servicegroups | string | | comma-separated list of servicegroups that are rejected (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_hosts | string | | accepted hosts (can use Lua patterns) that are accepted (for example: host_A%2) | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | +| accepted_services | string | | accepted services (can use Lua patterns) that are accepted (for example: service_A%d+) | service_status(neb), acknowledgement(neb), downtime(neb) | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | +| accepted_hosts_enable_split | number | 0 | allows you to use a comma-separated list of hosts in the accepted_hosts parameter (for example: host_A%d+,host_B%d+). (0 = disabled, 1 = enable) | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_services_enable_split | number | 0 | allows you to use a comma-separated list of services in the accepted_services parameter (for example: service_A%d+,service_B%d+). (0 = disabled, 1 = enable) | service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_services_split_characters | string | , | the separator that is used when using the accepted_services_enable_split and accepted_services parameters | service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_hosts_split_characters | string | , | the separator that is used when using the accepted_hosts_enable_split and accepted_hosts parameters | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_bvs | string | | comma-separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| rejected_bvs | string | | comma-separated list of BVs that are rejected (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | comma-separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_pollers | string | | comma-separated list of pollers that are rejected (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| accepted_authors | string | | comma-separated list of authors that are accepted (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_authors | string | | comma-separated list of authors that are rejected (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | | accepted_metrics | string | `.*` | filter metrics based on their name. Use lua pattern to filter | metrics stream connectors | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | | skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | | skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | @@ -345,38 +356,55 @@ result = test_param:load_event_format_file(true) --> result is false ``` -## build_accepted_elements_info method +## validate_pattern_param method -The **build_accepted_elements_info** creates a table with information related to the accepted elements. It will use the [**accepted_elements parameter**](#default-parameters) in order to create this table. +The **validate_pattern_param** method checks if a parameter has a valid Lua pattern as a value. -### build_accepted_elements_info: example +### validate_pattern_param: parameters + +| parameter | type | optional | default value | +| -------------------------- | ------ | -------- | ------------- | +| the name of the parameter | string | no | | +| the value of the parameter | string | no | | + +### validate_pattern_param: returns + +| return | type | always | condition | +| ----------------------------------- | ------ | ------ | ----------------------------------------------------------------------------------------- | +| the parameter value or empty string | string | yes | if the Lua pattern is not validated it will return an empty string instead of the pattern | + +### validate_pattern_param: example ```lua -test_param.params.accepted_elements = "host_status,service_status,ba_status" +-- create a list of mandatory parameters +local param_name = "my_param" +local param_value = "pattern.*" -test_param:build_accepted_elements_info() +local result = test_param:validate_pattern_param(param_name, param_value) +--> result is "pattern.*" because it is a valid Lua pattern ---> a test_param.params.accepted_elements_info table is now created and here is its content ---[[ - test_param.params.accepted_elements_info = { - host_status = { - category_id = 1, - category_name = "neb", - id = 14, - name = "host_status" - }, - service_status = { - category_id = 1, - category_name = "neb", - id = 24, - name = "service_status" - }, - ba_status = { - category_id = 6, - category_name = "bam", - id = 1, - name = "ba_status - } - } -]]-- +param_value = "pattern%2" + +local result = test_param:validate_pattern_param(param_name, param_value) +--> result is "" because it is not a valid Lua pattern +``` + +## build_and_validate_filters_pattern method + +The **build_and_validate_filters_pattern** method validates Lua patterns and builds a table of patterns to compensate the lack of alternation operator (commonly known as | in POSIX regex) + +### build_and_validate_filters_pattern: parameters + +| parameter | type | optional | default value | +| -------------------------- | ------ | -------- | ------------- | +| a list of parameter names that may have Lua patterns as a value | string | no | | + +### build_and_validate_filters_pattern: example + +```lua +-- create a list of parameters name +local param_list = {"accepted_hosts", "accepted_services"} + +test_param:build_and_validate_filters_pattern(param_name, param_value) +--> it creates a test_param.params.accepted_hosts_pattern_list table and a test_param.params.accepted_services_pattern_list ``` From a577bc6cf3bac43b2e35bd4faffb01a838356ea9 Mon Sep 17 00:00:00 2001 From: hamzabessa <148857497+hamzabessa@users.noreply.github.com> Date: Tue, 30 Jan 2024 09:57:02 +0100 Subject: [PATCH 196/219] enh(ci): package lua dependencies using nfpm (#175) Refs: MON-33552 --- .../.github/actions/deb-delivery/action.yml | 2 +- .../.github/actions/package-nfpm/action.yml | 26 +- .../.github/actions/rpm-delivery/action.yml | 2 +- ...le.packaging-stream-connectors-nfpm-alma8} | 9 +- ...le.packaging-stream-connectors-nfpm-alma9} | 9 +- ...packaging-stream-connectors-nfpm-bullseye} | 8 +- .../.github/workflows/docker-packaging.yml | 4 +- .../.github/workflows/lua-cffi.yml | 144 ++++++++ .../.github/workflows/lua-curl.yml | 142 ++++++++ .../.github/workflows/lua-tz.yml | 122 +++++++ .../stream-connectors-dependencies.yml | 320 ------------------ .../workflows/stream-connectors-lib.yml | 6 +- .../lua-cffi/packaging/deb/control | 17 - .../lua-cffi/packaging/deb/copyright | 23 -- .../lua-cffi/packaging/deb/install | 1 - .../dependencies/lua-cffi/packaging/deb/rules | 6 - .../lua-cffi/packaging/deb/source/format | 1 - .../lua-cffi/packaging/lua-cffi.yaml | 42 +++ .../lua-cffi/packaging/rpm/lua-cffi.spec | 47 --- .../lua-curl/packaging/lua-curl.yaml | 37 ++ .../lua-curl/packaging/rpm/lua-curl.spec | 50 --- .../dependencies/lua-tz/packaging/deb/control | 15 - .../lua-tz/packaging/deb/copyright | 23 -- .../dependencies/lua-tz/packaging/deb/dirs | 1 - .../dependencies/lua-tz/packaging/deb/install | 1 - .../dependencies/lua-tz/packaging/deb/rules | 8 - .../lua-tz/packaging/deb/source/format | 1 - .../dependencies/lua-tz/packaging/lua-tz.yaml | 38 +++ .../lua-tz/packaging/rpm/lua-tz.spec | 43 --- 29 files changed, 560 insertions(+), 588 deletions(-) rename stream-connectors/.github/docker/{Dockerfile.packaging-alma8 => Dockerfile.packaging-stream-connectors-nfpm-alma8} (57%) rename stream-connectors/.github/docker/{Dockerfile.packaging-alma9 => Dockerfile.packaging-stream-connectors-nfpm-alma9} (58%) rename stream-connectors/.github/docker/{Dockerfile.packaging-bullseye => Dockerfile.packaging-stream-connectors-nfpm-bullseye} (63%) create mode 100644 stream-connectors/.github/workflows/lua-cffi.yml create mode 100644 stream-connectors/.github/workflows/lua-curl.yml create mode 100644 stream-connectors/.github/workflows/lua-tz.yml delete mode 100644 stream-connectors/.github/workflows/stream-connectors-dependencies.yml delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/control delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/copyright delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/install delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/rules delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/deb/source/format create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml delete mode 100644 stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec create mode 100644 stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml delete mode 100644 stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/control delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/copyright delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/dirs delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/install delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/rules delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/deb/source/format create mode 100644 stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml delete mode 100644 stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml index 16cbfc8fa6f..0fd02f838bb 100644 --- a/stream-connectors/.github/actions/deb-delivery/action.yml +++ b/stream-connectors/.github/actions/deb-delivery/action.yml @@ -21,7 +21,7 @@ runs: using: "composite" steps: - name: Use cache DEB files - uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.deb key: ${{ inputs.cache_key }} diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml index 971f3301105..0e97a8f7019 100644 --- a/stream-connectors/.github/actions/package-nfpm/action.yml +++ b/stream-connectors/.github/actions/package-nfpm/action.yml @@ -10,11 +10,8 @@ inputs: distrib: description: The package distrib required: true - major_version: - description: The major version - required: false - minor_version: - description: The minor version + version: + description: The version required: false release: description: The package release number @@ -53,6 +50,7 @@ runs: RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | + export VERSION="${{ inputs.version }}" export ARCH="${{ inputs.arch }}" if [ "${{ inputs.package_extension }}" = "rpm" ]; then @@ -72,14 +70,20 @@ runs: export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" - sed -i "s/@luaver@/$luaver/g" ./${{ inputs.nfpm_file_pattern }} - sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" ./${{ inputs.nfpm_file_pattern }} - nfpm package --config ./${{ inputs.nfpm_file_pattern }} --packager ${{ inputs.package_extension }} - + for FILE in ${{ inputs.nfpm_file_pattern }}; do + DIRNAME=$(dirname $FILE) + BASENAME=$(basename $FILE) + cd $DIRNAME + sed -i "s/@luaver@/$luaver/g" $BASENAME + sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" $BASENAME + nfpm package --config $BASENAME --packager ${{ inputs.package_extension }} + cd - + mv $DIRNAME/*.${{ inputs.package_extension }} ./ + done shell: bash - name: Cache packages - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} @@ -87,7 +91,7 @@ runs: # Update if condition to true to get packages as artifacts - if: ${{ false }} name: Upload package artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: name: packages-${{ inputs.distrib }} path: ./*.${{ inputs.package_extension}} diff --git a/stream-connectors/.github/actions/rpm-delivery/action.yml b/stream-connectors/.github/actions/rpm-delivery/action.yml index c87dd0b19cd..ad12396c92f 100644 --- a/stream-connectors/.github/actions/rpm-delivery/action.yml +++ b/stream-connectors/.github/actions/rpm-delivery/action.yml @@ -21,7 +21,7 @@ runs: using: "composite" steps: - name: Use cache RPM files - uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.rpm key: ${{ inputs.cache_key }} diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-alma8 b/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 similarity index 57% rename from stream-connectors/.github/docker/Dockerfile.packaging-alma8 rename to stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 index 08ab6795482..06885ef3bf0 100644 --- a/stream-connectors/.github/docker/Dockerfile.packaging-alma8 +++ b/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 @@ -2,7 +2,10 @@ ARG REGISTRY_URL FROM ${REGISTRY_URL}/almalinux:8 -RUN <> conanfile.txt + [requires] + libcurl/8.0.1 + openssl/1.1.1t + zlib/1.2.13 + + [generators] + CMakeToolchain + + [options] + libcurl/*:with_ca_bundle=/etc/ssl/certs/ca-bundle.crt + libcurl/*:with_ca_fallback=False + libcurl/*:with_ca_path=/etc/ssl/certs/ + EOF + + conan install . --build=missing --deployer=full_deploy + + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile + make + + cd .. + + mkdir -p dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/lcurl.so dependencies/lua-curl/lua-curl/ + cp -rp lua-curl-src/src/lua/cURL dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/src/lua/cURL.lua dependencies/lua-curl/lua-curl/ + shell: bash + + - name: Package + uses: ./.github/actions/package-nfpm + with: + nfpm_file_pattern: "dependencies/lua-curl/packaging/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + arch: amd64 + version: "0.3.13" + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/rpm-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/lua-tz.yml b/stream-connectors/.github/workflows/lua-tz.yml new file mode 100644 index 00000000000..e210ec1b080 --- /dev/null +++ b/stream-connectors/.github/workflows/lua-tz.yml @@ -0,0 +1,122 @@ +name: lua-tz + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - dependencies/lua-tz/** + push: + branches: + - develop + - master + paths: + - dependencies/lua-tz/** + +jobs: + get-environment: + uses: ./.github/workflows/get-environment.yml + + package: + needs: [get-environment] + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye] + include: + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma8 + distrib: el8 + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma9 + distrib: el9 + - package_extension: deb + image: packaging-stream-connectors-nfpm-bullseye + distrib: bullseye + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Checkout luatz sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: "daurnimator/luatz" + path: "luatz-src" + ref: "v0.4-1" + + - name: Prepare packaging of lua-tz + run: cp -r luatz-src/luatz dependencies/lua-tz/lua-tz + shell: bash + + - name: Package + uses: ./.github/actions/package-nfpm + with: + nfpm_file_pattern: "dependencies/lua-tz/packaging/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + arch: all + version: "0.4" + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/rpm-delivery + with: + module_name: lua-tz + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-tz-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/deb-delivery + with: + module_name: lua-tz + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-tz-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml b/stream-connectors/.github/workflows/stream-connectors-dependencies.yml deleted file mode 100644 index e9340788069..00000000000 --- a/stream-connectors/.github/workflows/stream-connectors-dependencies.yml +++ /dev/null @@ -1,320 +0,0 @@ -name: stream-connectors-dependencies - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - workflow_dispatch: - pull_request: - paths: - - dependencies/** - push: - branches: - - develop - - master - paths: - - dependencies/** - -jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml - - package-rpm: - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [el8, el9] - lib: [lua-cffi, lua-tz, lua-curl] - include: - - distrib: el8 - image: packaging-stream-connectors-alma8 - package_extension: rpm - - distrib: el9 - image: packaging-stream-connectors-alma9 - package_extension: rpm - - name: package ${{ matrix.distrib }} ${{ matrix.lib }} - container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest - credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Install dependencies - run: | - mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} - - yum install -y yum-utils epel-release - yum config-manager --set-enabled crb || true # alma 9 - yum config-manager --set-enabled powertools || true # alma 8 - - yum install -y git make gcc luarocks meson gcc-c++ cmake libffi libffi-devel lua-devel libcurl-devel - shell: bash - - - if: ${{ matrix.lib == 'lua-cffi' }} - name: Prepare packaging of lua-cffi - run: | - luarocks install cffi-lua - luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt - cat version.txt - - mkdir ${{ matrix.lib }} - find /usr/ -type f -name "cffi.so" -exec cp {} ${{ matrix.lib }}/ \; - tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - if: ${{ matrix.lib == 'lua-tz' }} - name: Prepare packaging of lua-tz - run: | - luarocks install luatz - luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt - cat version.txt - - mkdir ${{ matrix.lib }} - cp -rp /usr/share/lua/`lua -e "print(string.sub(_VERSION, 5))"`/luatz/* ${{ matrix.lib }}/ - tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - if: ${{ matrix.lib == 'lua-curl' }} - name: Checkout sources of lua-curl - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: Lua-cURL/Lua-cURLv3 - path: lua-curl-src - ref: v0.3.13 - - - if: ${{ matrix.lib == 'lua-curl' }} - name: Compile lua-curl and prepare packaging - run: | - dnf install -y openssl-devel openssl libcurl-devel python3-pip cpanminus - - cpanm \ - IPC::Cmd \ - Digest::SHA \ - Thread::Queue \ - IO::Socket::SSL \ - File::Copy \ - File::Compare - - cd lua-curl-src - - pip3 install conan - conan profile detect - - cat <<'EOF' >> conanfile.txt - [requires] - libcurl/8.0.1 - openssl/1.1.1t - zlib/1.2.13 - - [generators] - CMakeToolchain - - [options] - libcurl/*:with_ca_bundle=/etc/ssl/certs/ca-bundle.crt - libcurl/*:with_ca_fallback=False - libcurl/*:with_ca_path=/etc/ssl/certs/ - EOF - - conan install . --build=missing --deployer=full_deploy - - sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile - make - - cd .. - - mkdir -p dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ - cp -p lua-curl-src/lcurl.so dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ - cp -rp lua-curl-src/src/lua/cURL dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ - cp -p lua-curl-src/src/lua/cURL.lua dependencies/${{ matrix.lib }}/${{ matrix.lib }}/ - - cd dependencies/${{ matrix.lib }} - - echo "0.3.13" >> version.txt - - tar czf ~/rpmbuild/SOURCES/${{ matrix.lib }}.tar.gz ${{ matrix.lib }} - shell: bash - - - name: Package ${{ matrix.lib }} - run: | - rpmbuild -ba packaging/rpm/${{ matrix.lib }}.spec -D "VERSION `cat version.txt`" - - mv ~/rpmbuild/RPMS/**/*.rpm ../../ - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.${{ matrix.package_extension }} - key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} - - package-deb: - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [bullseye] - lib: [lua-cffi, lua-tz] - include: - - distrib: bullseye - image: packaging-stream-connectors-bullseye - package_extension: deb - - name: package ${{ matrix.distrib }} ${{ matrix.lib }} - container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest - credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Install dependencies - run: | - apt update - apt install -y git make gcc luarocks meson cmake libffi7 libffi-dev lua5.3 liblua5.3-dev - shell: bash - - - if: ${{ matrix.lib == 'lua-cffi' }} - name: Prepare packaging of lua-cffi - run: | - luarocks install cffi-lua - luarocks show cffi-lua | grep "cffi-lua\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt - - PACKAGE_NAME="${{ matrix.lib }}" - PACKAGE_VERSION=`cat version.txt` - - mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian - - cp -rp packaging/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ - - find /usr/ -type f -name "cffi.so" -exec cp {} $PACKAGE_NAME-$PACKAGE_VERSION/ \; - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - if: ${{ matrix.lib == 'lua-tz' }} - name: Prepare packaging of lua-tz - run: | - luarocks install luatz - luarocks show luatz | grep "luatz\s[0-9]" | cut -d' ' -f2 | perl -nle 'm/(\d+\.\d+(\.\d+)?)/; print $1' >> version.txt - - PACKAGE_NAME="${{ matrix.lib }}" - PACKAGE_VERSION=`cat version.txt` - - mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian - cp -rp packaging/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ - - cp -rp /usr/local/share/lua/5.3/luatz $PACKAGE_NAME-$PACKAGE_VERSION/luatz - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - name: Package ${{ matrix.lib }} - run: | - PACKAGE_NAME="${{ matrix.lib }}" - PACKAGE_VERSION=`cat version.txt` - - tar czf $PACKAGE_NAME-$PACKAGE_VERSION.tar.gz $PACKAGE_NAME-$PACKAGE_VERSION - - cd $PACKAGE_NAME-$PACKAGE_VERSION - - debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} - debuild-pbuilder --no-lintian - - mv ../*.deb ../../../ - working-directory: dependencies/${{ matrix.lib }} - shell: bash - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.${{ matrix.package_extension }} - key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.lib }}-${{ matrix.distrib }} - - sign-rpm: - needs: [package-rpm] - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [el8, el9] - lib: [lua-cffi, lua-tz, lua-curl] - name: sign rpm ${{ matrix.distrib }} ${{ matrix.lib }} - container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/rpm-signing:ubuntu - options: -t - credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - steps: - - run: apt-get install -y zstd - shell: bash - - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.rpm - key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} - - - run: echo "HOME=/root" >> $GITHUB_ENV - shell: bash - - - run: rpmsign --addsign ./*.rpm - shell: bash - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.rpm - key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} - - deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, sign-rpm] - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [el8, el9] - lib: [lua-cffi, lua-tz, lua-curl] - name: deliver ${{ matrix.distrib }} ${{ matrix.lib }} - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery - with: - module_name: stream-connectors-dependencies - distrib: ${{ matrix.distrib }} - artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.lib }}-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} - - deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package-deb] - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [bullseye] - lib: [lua-cffi, lua-tz] - name: deliver ${{ matrix.distrib }} ${{ matrix.lib }} - - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Publish DEB packages - uses: ./.github/actions/deb-delivery - with: - module_name: stream-connectors-dependencies - distrib: ${{ matrix.distrib }} - artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.lib }}-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index 9d142f133c1..ae4a8647ffd 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -29,13 +29,13 @@ jobs: distrib: [el8, el9, bullseye] include: - package_extension: rpm - image: packaging-stream-connectors-alma8 + image: packaging-stream-connectors-nfpm-alma8 distrib: el8 - package_extension: rpm - image: packaging-stream-connectors-alma9 + image: packaging-stream-connectors-nfpm-alma9 distrib: el9 - package_extension: deb - image: packaging-stream-connectors-bullseye + image: packaging-stream-connectors-nfpm-bullseye distrib: bullseye runs-on: ubuntu-22.04 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/control b/stream-connectors/dependencies/lua-cffi/packaging/deb/control deleted file mode 100644 index 1ce8a9278d9..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/deb/control +++ /dev/null @@ -1,17 +0,0 @@ -Source: lua-cffi -Section: interpreters -Priority: optional -Maintainer: Centreon -Build-Depends: - debhelper-compat (=12), - dh-lua (>= 21) -Standards-Version: 4.5.0 -Homepage: https://wwww.centreon.com - -Package: lua-cffi -Architecture: all -Depends: - libffi7, - libffi-dev, - lua5.3 -Description: lua cffi library diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright b/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright deleted file mode 100644 index 25875f802b4..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/deb/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: lua-cffi -Upstream-Contact: Centreon -Source: https://www.centreon.com - -Files: * -Copyright: 2023 Centreon -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". - diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/install b/stream-connectors/dependencies/lua-cffi/packaging/deb/install deleted file mode 100644 index 5eb40401080..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/deb/install +++ /dev/null @@ -1 +0,0 @@ -cffi.so usr/lib64/lua/5.3 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/rules b/stream-connectors/dependencies/lua-cffi/packaging/deb/rules deleted file mode 100644 index d8309f67d01..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/deb/rules +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/make -f - -export DEB_BUILD_MAINT_OPTIONS = hardening=+all - -%: - dh $@ diff --git a/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format b/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format deleted file mode 100644 index 163aaf8d82b..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/deb/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml new file mode 100644 index 00000000000..21e20af20e0 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml @@ -0,0 +1,42 @@ +name: "lua-cffi" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "1${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua cffi library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-cffi/cffi.so" + dst: "/usr/lib64/lua/@luaver@/cffi.so" + packager: rpm + + - src: "../lua-cffi/cffi.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/5.3/cffi.so" + packager: deb + +overrides: + rpm: + depends: + - lua + - libffi + - libffi-devel + deb: + depends: + - "lua5.3" + - "libffi7" + - "libffi-dev" + +rpm: + summary: lua cffi + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec b/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec deleted file mode 100644 index 9f5251d6c2f..00000000000 --- a/stream-connectors/dependencies/lua-cffi/packaging/rpm/lua-cffi.spec +++ /dev/null @@ -1,47 +0,0 @@ -%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} -%global luapkgdir %{_datadir}/lua/%{luaver} -%global lualibdir %{_libdir}/lua/%{luaver} -%global debug_package %{nil} - -Name: lua-cffi -Version: %{VERSION} -Release: 1%{?dist} -Summary: lua cffi - -Group: Applications/System -License: Apache-2.0 -URL: https://www.centreon.com -Packager: Centreon -Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ - -Source0: %{name}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: lua -BuildRequires: lua-devel -BuildRequires: libffi -BuildRequires: libffi-devel - -Requires: lua -Requires: libffi -Requires: libffi-devel - -%description -lua cffi library - -%prep -%setup -q -n %{name} - -%build - -%install -%{__install} -d $RPM_BUILD_ROOT%{lualibdir} -%{__cp} -p ./cffi.so $RPM_BUILD_ROOT%{lualibdir}/cffi.so - -%clean -%{__rm} -rf $RPM_BUILD_ROOT - -%files -%{lualibdir}/cffi.so - -%changelog diff --git a/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml new file mode 100644 index 00000000000..2dd2798496b --- /dev/null +++ b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml @@ -0,0 +1,37 @@ +name: "lua-curl" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "6${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua curl library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-curl" + dst: "/usr/lib64/lua/@luaver@" + file_info: + mode: 0755 + + - src: "../lua-curl" + dst: "/usr/share/lua/@luaver@" + file_info: + mode: 0755 + +overrides: + rpm: + depends: + - lua + +rpm: + summary: lua curl + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec b/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec deleted file mode 100644 index 4243291cf57..00000000000 --- a/stream-connectors/dependencies/lua-curl/packaging/rpm/lua-curl.spec +++ /dev/null @@ -1,50 +0,0 @@ -%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} -%global luapkgdir %{_datadir}/lua/%{luaver} -%global lualibdir %{_libdir}/lua/%{luaver} -%global debug_package %{nil} - -Name: lua-curl -Version: %{VERSION} -Release: 5%{?dist} -Summary: lua curl - -Group: Applications/System -License: Apache-2.0 -URL: https://www.centreon.com -Packager: Centreon -Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ - -Source0: %{name}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: lua -BuildRequires: lua-devel - -Requires: lua - -%description -lua curl library - -%prep -%setup -q -n %{name} - -%build - -%install -%{__install} -Dp -m0755 ./lcurl.so $RPM_BUILD_ROOT%{lualibdir}/lcurl.so -%{__install} -Dp -m0644 ./cURL.lua $RPM_BUILD_ROOT%{lualibdir}/cURL.lua -%{__install} -d -m 0755 $RPM_BUILD_ROOT%{luapkgdir}/cURL -%{__install} -Dp -m0644 ./cURL/safe.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/safe.lua -%{__install} -Dp -m0644 ./cURL/utils.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/utils.lua -%{__install} -d -m 0755 $RPM_BUILD_ROOT%{luapkgdir}/cURL/impl -%{__install} -Dp -m0644 ./cURL/impl/cURL.lua $RPM_BUILD_ROOT%{luapkgdir}/cURL/impl/cURL.lua - -%clean -%{__rm} -rf $RPM_BUILD_ROOT - -%files -%defattr(-, root, root, 0755) -%{lualibdir}/* -%{luapkgdir}/* - -%changelog diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/control b/stream-connectors/dependencies/lua-tz/packaging/deb/control deleted file mode 100644 index 54c7a1ed33a..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/control +++ /dev/null @@ -1,15 +0,0 @@ -Source: lua-tz -Section: interpreters -Priority: optional -Maintainer: Centreon -Build-Depends: - debhelper-compat (=12), - dh-lua (>= 21) -Standards-Version: 4.5.0 -Homepage: https://wwww.centreon.com - -Package: lua-tz -Architecture: all -Depends: - lua5.3 -Description: lua tz library diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/copyright b/stream-connectors/dependencies/lua-tz/packaging/deb/copyright deleted file mode 100644 index f495613e125..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: lua-tz -Upstream-Contact: Centreon -Source: https://www.centreon.com - -Files: * -Copyright: 2023 Centreon -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". - diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/dirs b/stream-connectors/dependencies/lua-tz/packaging/deb/dirs deleted file mode 100644 index 38d80e9ce7e..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/dirs +++ /dev/null @@ -1 +0,0 @@ -/usr/local/share/lua/5.3/luatz diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/install b/stream-connectors/dependencies/lua-tz/packaging/deb/install deleted file mode 100644 index ac189cc0100..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/install +++ /dev/null @@ -1 +0,0 @@ -luatz/* /usr/local/share/lua/5.3/luatz diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/rules b/stream-connectors/dependencies/lua-tz/packaging/deb/rules deleted file mode 100644 index 4c83552dc55..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/rules +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/make -f - -export DEB_BUILD_MAINT_OPTIONS = hardening=+all - -%: - dh $@ - -override_dh_usrlocal: diff --git a/stream-connectors/dependencies/lua-tz/packaging/deb/source/format b/stream-connectors/dependencies/lua-tz/packaging/deb/source/format deleted file mode 100644 index 163aaf8d82b..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/deb/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml new file mode 100644 index 00000000000..75a2876d54e --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml @@ -0,0 +1,38 @@ +name: "lua-tz" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "1${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua tz library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-tz" + dst: "/usr/share/lua/@luaver@/luatz" + packager: rpm + + - src: "../lua-tz" + dst: "/usr/share/lua/5.3/luatz" + packager: deb + +overrides: + rpm: + depends: + - lua + deb: + depends: + - "lua5.3" + +rpm: + summary: lua tz + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec b/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec deleted file mode 100644 index 8477f42447f..00000000000 --- a/stream-connectors/dependencies/lua-tz/packaging/rpm/lua-tz.spec +++ /dev/null @@ -1,43 +0,0 @@ -%{!?luaver: %global luaver %(lua -e "print(string.sub(_VERSION, 5))" || echo 0)} -%global luapkgdir %{_datadir}/lua/%{luaver} -%global lualibdir %{_libdir}/lua/%{luaver} -%global debug_package %{nil} - -Name: lua-tz -Version: %{VERSION} -Release: 1%{?dist} -Summary: lua tz - -Group: Applications/System -License: Apache-2.0 -URL: https://www.centreon.com -Packager: Centreon -Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ - -Source0: %{name}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: lua -BuildRequires: lua-devel - -Requires: lua - -%description -lua tz library - -%prep -%setup -q -n %{name} - -%build - -%install -%{__install} -d $RPM_BUILD_ROOT%{luapkgdir}/luatz -%{__cp} -p ./* $RPM_BUILD_ROOT%{luapkgdir}/luatz - -%clean -%{__rm} -rf $RPM_BUILD_ROOT - -%files -%{luapkgdir}/luatz - -%changelog From 84cbd53df9b62860498c0e92d58ae7799e29c8d8 Mon Sep 17 00:00:00 2001 From: hamzabessa <148857497+hamzabessa@users.noreply.github.com> Date: Wed, 31 Jan 2024 10:13:46 +0100 Subject: [PATCH 197/219] enh(ci): package connectors using nfpm (#174) Co-authored-by: Kevin Duret --- .../.github/actions/package-nfpm/action.yml | 12 +- .../.github/workflows/lua-cffi.yml | 6 +- .../.github/workflows/lua-curl.yml | 63 +++++-- .../.github/workflows/lua-tz.yml | 6 +- .../workflows/stream-connectors-lib.yml | 9 +- .../.github/workflows/stream-connectors.yml | 158 ++++-------------- .../bsm/bsm_connector-apiv1.lua | 4 +- .../capensis/canopsis2-events-apiv2.lua | 46 ++--- .../capensis/canopsis4-events-apiv2.lua | 52 +++--- .../datadog/datadog-events-apiv2.lua | 35 ++-- .../datadog/datadog-metrics-apiv2.lua | 32 ++-- .../elasticsearch/elastic-events-apiv2.lua | 28 ++-- .../elasticsearch/elastic-metrics-apiv2.lua | 38 ++--- .../elasticsearch/elastic-neb-apiv1.lua | 16 +- .../google/bigquery-events-apiv2.lua | 66 ++++---- .../influxdb/influxdb-metrics-apiv1.lua | 12 +- .../influxdb/influxdb-neb-apiv1.lua | 4 +- .../influxdb/influxdb2-metrics-apiv2.lua | 38 ++--- .../kafka/kafka-events-apiv2.lua | 24 +-- .../logstash/logstash-events-apiv2.lua | 30 ++-- .../omi/omi_connector-apiv1.lua | 20 +-- .../omi/omi_events-apiv2.lua | 2 +- .../opsgenie/opsgenie-apiv1.lua | 74 ++++---- .../opsgenie/opsgenie-events-apiv2.lua | 62 +++---- .../pagerduty/pagerduty-apiv1.lua | 12 +- .../pagerduty/pagerduty-events-apiv2.lua | 4 +- .../lua-cffi/packaging/lua-cffi.yaml | 2 +- .../lua-curl/packaging/lua-curl.yaml | 36 +++- .../dependencies/lua-tz/packaging/lua-tz.yaml | 2 +- .../centreon-stream-connectors-lib.yaml | 19 ++- .../centreon-stream-connectors.yaml | 36 ++++ .../packaging/connectors/deb/control | 14 -- .../packaging/connectors/deb/copyright | 23 --- .../packaging/connectors/deb/install | 0 .../packaging/connectors/deb/rules | 7 - .../packaging/connectors/deb/source/format | 1 - .../packaging/connectors/rpm/connector.spec | 35 ---- 37 files changed, 493 insertions(+), 535 deletions(-) create mode 100644 stream-connectors/packaging/connectors/centreon-stream-connectors.yaml delete mode 100644 stream-connectors/packaging/connectors/deb/control delete mode 100644 stream-connectors/packaging/connectors/deb/copyright delete mode 100644 stream-connectors/packaging/connectors/deb/install delete mode 100644 stream-connectors/packaging/connectors/deb/rules delete mode 100644 stream-connectors/packaging/connectors/deb/source/format delete mode 100644 stream-connectors/packaging/connectors/rpm/connector.spec diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml index 0e97a8f7019..ca30601d5ae 100644 --- a/stream-connectors/.github/actions/package-nfpm/action.yml +++ b/stream-connectors/.github/actions/package-nfpm/action.yml @@ -16,6 +16,7 @@ inputs: release: description: The package release number required: false + default: "1" arch: description: The package architecture required: false @@ -34,6 +35,9 @@ inputs: rpm_gpg_signing_passphrase: description: The rpm gpg signing passphrase required: true + stability: + description: "Branch stability (stable, testing, unstable, canary)" + required: true runs: using: composite @@ -51,12 +55,18 @@ runs: RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | export VERSION="${{ inputs.version }}" + export RELEASE="${{ inputs.release }}" export ARCH="${{ inputs.arch }}" if [ "${{ inputs.package_extension }}" = "rpm" ]; then export DIST=".${{ inputs.distrib }}" else export DIST="" + if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then + export RELEASE="$RELEASE~${{ inputs.distrib }}" + elif [ "${{ inputs.stability }}" = "testing" ]; then + export RELEASE="1~${{ inputs.distrib }}" + fi fi luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) @@ -93,6 +103,6 @@ runs: name: Upload package artifacts uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: - name: packages-${{ inputs.distrib }} + name: packages-${{ inputs.nfpm_file_pattern }}-${{ inputs.distrib }} path: ./*.${{ inputs.package_extension}} retention-days: 1 diff --git a/stream-connectors/.github/workflows/lua-cffi.yml b/stream-connectors/.github/workflows/lua-cffi.yml index 0435edd729f..e421e29eb46 100644 --- a/stream-connectors/.github/workflows/lua-cffi.yml +++ b/stream-connectors/.github/workflows/lua-cffi.yml @@ -92,12 +92,14 @@ jobs: distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 - version: "0.2.3" + version: "0.2.4" + release: "1" commit_hash: ${{ github.sha }} - cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-cffi-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} diff --git a/stream-connectors/.github/workflows/lua-curl.yml b/stream-connectors/.github/workflows/lua-curl.yml index 4c43ead90d6..707d9eb591a 100644 --- a/stream-connectors/.github/workflows/lua-curl.yml +++ b/stream-connectors/.github/workflows/lua-curl.yml @@ -34,6 +34,9 @@ jobs: - package_extension: rpm image: packaging-stream-connectors-nfpm-alma9 distrib: el9 + - package_extension: deb + image: packaging-stream-connectors-nfpm-bullseye + distrib: bullseye runs-on: ubuntu-22.04 @@ -49,10 +52,6 @@ jobs: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Install dependencies - run: dnf install -y make gcc meson gcc-c++ cmake libcurl-devel - shell: bash - - name: Checkout sources of lua-curl uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: @@ -62,9 +61,14 @@ jobs: - name: Compile lua-curl and prepare packaging run: | - dnf install -y openssl-devel openssl libcurl-devel python3-pip cpanminus - - cpanm \ + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + dnf install -y make cmake gcc openssl openssl-devel libcurl-devel python3-pip cpanminus + else + apt-get update + apt-get install -y make cmake gcc openssl libssl-dev libcurl4-openssl-dev python3-pip cpanminus liblua5.3-dev + fi + + cpanm -v \ IPC::Cmd \ Digest::SHA \ Thread::Queue \ @@ -94,7 +98,12 @@ jobs: conan install . --build=missing --deployer=full_deploy - sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile + else + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread -I/usr/include/lua5.3#" Makefile + fi + make cd .. @@ -105,19 +114,31 @@ jobs: cp -p lua-curl-src/src/lua/cURL.lua dependencies/lua-curl/lua-curl/ shell: bash + - name: Update package name + run: | + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + NAME="lua-curl" + else + NAME="lua5.3-curl" + fi + sed -i "s/@NAME@/$NAME/g" ./dependencies/lua-curl/packaging/lua-curl.yaml + shell: bash + - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "dependencies/lua-curl/packaging/*.yaml" + nfpm_file_pattern: "dependencies/lua-curl/packaging/lua-curl.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 version: "0.3.13" + release: "7" commit_hash: ${{ github.sha }} - cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-curl-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} @@ -140,3 +161,25 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }} stability: ${{ needs.get-environment.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/deb-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }} + stability: ${{ needs.get-environment.outputs.stability }} diff --git a/stream-connectors/.github/workflows/lua-tz.yml b/stream-connectors/.github/workflows/lua-tz.yml index e210ec1b080..494c9d13363 100644 --- a/stream-connectors/.github/workflows/lua-tz.yml +++ b/stream-connectors/.github/workflows/lua-tz.yml @@ -70,12 +70,14 @@ jobs: distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: all - version: "0.4" + version: "0.5" + release: "1" commit_hash: ${{ github.sha }} - cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-tz-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index ae4a8647ffd..4dfeeb1a4f3 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -8,12 +8,14 @@ on: workflow_dispatch: pull_request: paths: + - packaging/connectors-lib/** - modules/centreon-stream-connectors-lib/** push: branches: - develop - master paths: + - packaging/connectors-lib/** - modules/centreon-stream-connectors-lib/** jobs: @@ -50,13 +52,15 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Package uses: ./.github/actions/package-nfpm with: nfpm_file_pattern: "packaging/connectors-lib/*.yaml" distrib: ${{ matrix.distrib }} + version: "3.6.1" + release: "1" package_extension: ${{ matrix.package_extension }} arch: all commit_hash: ${{ github.sha }} @@ -64,10 +68,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment] + needs: [get-environment, package] runs-on: ubuntu-22.04 strategy: matrix: diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index 03888b42226..88aa44186fe 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 id: filter with: base: ${{ github.ref }} @@ -53,7 +53,7 @@ jobs: package: if: ${{ needs.detect-changes.outputs.connectors != '[]' }} - needs: [detect-changes] + needs: [get-environment, detect-changes] runs-on: ubuntu-22.04 strategy: matrix: @@ -61,13 +61,13 @@ jobs: connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} include: - distrib: el8 - image: packaging-stream-connectors-alma8 + image: packaging-stream-connectors-nfpm-alma8 package_extension: rpm - distrib: el9 - image: packaging-stream-connectors-alma9 + image: packaging-stream-connectors-nfpm-alma9 package_extension: rpm - distrib: bullseye - image: packaging-stream-connectors-bullseye + image: packaging-stream-connectors-nfpm-bullseye package_extension: deb name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} @@ -78,12 +78,20 @@ jobs: password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Replace package and connector name variables + run: | + package_name="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" + sed -i "s/@PACKAGE_NAME@/$package_name/g" ./packaging/connectors/centreon-stream-connectors.yaml + connector_name="`basename ${{ matrix.connector_path }}`" + sed -i "s/@CONNECTOR_NAME@/$connector_name/g" ./packaging/connectors/centreon-stream-connectors.yaml + shell: bash - name: Add specific dependencies - id: list-dependencies run: | - MIN_LIB_VERSION="3.0.0" DEB_DEPENDENCIES="" RPM_DEPENDENCIES="" if [ "${{ matrix.connector_path }}" = "kafka" ]; then @@ -96,128 +104,34 @@ jobs: DEB_DEPENDENCIES="lua-tz" RPM_DEPENDENCIES="lua-tz" fi - echo "min_lib_version=$MIN_LIB_VERSION" >> $GITHUB_OUTPUT - echo "deb_dependencies=$DEB_DEPENDENCIES" >> $GITHUB_OUTPUT - echo "rpm_dependencies=$RPM_DEPENDENCIES" >> $GITHUB_OUTPUT + sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./packaging/connectors/centreon-stream-connectors.yaml shell: bash - - if: ${{ matrix.package_extension == 'rpm' }} - run: | - mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} - - PACKAGE_NAME="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" - PACKAGE_VERSION=`date '+%Y%m%d'` - - mkdir $PACKAGE_NAME - cp centreon-certified/${{ matrix.connector_path }}/*.lua $PACKAGE_NAME/ - - tar czf ~/rpmbuild/SOURCES/$PACKAGE_NAME.tar.gz $PACKAGE_NAME - - touch dependencies.txt - if [ ! -z "${{ steps.list-dependencies.outputs.rpm_dependencies }}" ]; then - for dependency in "${{ steps.list-dependencies.outputs.rpm_dependencies }}"; do - echo "Requires: $dependency" >> dependencies.txt - done - fi - sed -i '/Requires:/r dependencies.txt' packaging/connectors/rpm/connector.spec - - touch files.txt - for file in $PACKAGE_NAME/*.lua; do - echo "%{_datadir}/centreon-broker/lua/`basename $file`" >> files.txt - done - sed -i '/%files/r files.txt' packaging/connectors/rpm/connector.spec - - rpmbuild -ba packaging/connectors/rpm/connector.spec -D "PACKAGE_NAME $PACKAGE_NAME" -D "VERSION $PACKAGE_VERSION" -D "MIN_LIB_VERSION ${{ steps.list-dependencies.outputs.min_lib_version }}" - - mv ~/rpmbuild/RPMS/**/*.rpm ./ + - name: Export package version + id: package-version + run: echo "package_version=`date '+%Y%m%d'`" >> $GITHUB_OUTPUT shell: bash - - if: ${{ matrix.package_extension == 'deb' }} - run: | - PACKAGE_NAME="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" - PACKAGE_VERSION=`date '+%Y%m%d'` - - mkdir -p $PACKAGE_NAME-$PACKAGE_VERSION/debian - - cp centreon-certified/${{ matrix.connector_path }}/*.lua $PACKAGE_NAME-$PACKAGE_VERSION/ - - sed -i "s#@PACKAGE_NAME@#$PACKAGE_NAME#g" packaging/connectors/deb/control - sed -i "s#@MIN_LIB_VERSION@#${{ steps.list-dependencies.outputs.min_lib_version }}#g" packaging/connectors/deb/control - - touch dependencies.txt - if [ ! -z "${{ steps.list-dependencies.outputs.deb_dependencies }}" ]; then - for dependency in "${{ steps.list-dependencies.outputs.deb_dependencies }}"; do - echo " $dependency," >> dependencies.txt - done - fi - sed -i '/^Depends:/r dependencies.txt' packaging/connectors/deb/control - - for file in $PACKAGE_NAME-$PACKAGE_VERSION/*.lua; do - echo "`basename $file` /usr/share/centreon-broker/lua/`basename $file`" >> packaging/connectors/deb/install - done - - cp -rp packaging/connectors/deb/* $PACKAGE_NAME-$PACKAGE_VERSION/debian/ - - tar czf $PACKAGE_NAME-$PACKAGE_VERSION.tar.gz $PACKAGE_NAME-$PACKAGE_VERSION - - cd $PACKAGE_NAME-$PACKAGE_VERSION - - debmake -f "centreon" -e "contact@centreon.com" -y -r ${{ matrix.distrib }} - debuild-pbuilder --no-lintian - shell: bash - - - if: ${{ matrix.package_extension == 'deb' }} - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.${{ matrix.package_extension }} - key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} - - - if: ${{ matrix.package_extension == 'rpm' }} - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 + - name: Package + uses: ./.github/actions/package-nfpm with: - path: ./*.${{ matrix.package_extension }} - key: unsigned-${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} - - sign-rpm: - needs: [detect-changes, package] - runs-on: ubuntu-22.04 - strategy: - matrix: - distrib: [el8, el9] - connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} - name: sign rpm ${{ matrix.distrib }} ${{ matrix.connector_path }} - container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/rpm-signing:ubuntu - options: -t - credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - steps: - - run: apt-get install -y zstd - shell: bash - - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.rpm - key: unsigned-${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} - - - run: echo "HOME=/root" >> $GITHUB_ENV - shell: bash - - - run: rpmsign --addsign ./*.rpm - shell: bash - - - uses: actions/cache@e12d46a63a90f2fae62d114769bbf2a179198b5c # v3.3.3 - with: - path: ./*.rpm - key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + nfpm_file_pattern: "packaging/connectors/centreon-stream-connectors.yaml" + distrib: ${{ matrix.distrib }} + version: ${{ steps.package-version.outputs.package_version }} + release: "1" + package_extension: ${{ matrix.package_extension }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, detect-changes, sign-rpm] + needs: [get-environment, detect-changes, package] runs-on: ubuntu-22.04 strategy: matrix: diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua index 1fe32338664..ec1851862ee 100644 --- a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua @@ -166,7 +166,7 @@ function EventQueue:add(e) if e.service_id then xml_url = ifnil(broker_cache:get_notes_url(e.host_id, e.service_id), 'no notes url for this service') xml_service_severity = "" ..ifnil(broker_cache:get_severity(e.host_id, e.service_id), '0') .. "" - else + else xml_url = ifnil(broker_cache:get_action_url(e.host_id), 'no action url for this host') xml_notes = "" .. ifnil(broker_cache:get_notes(e.host_id), 'OS not set') .. "" end @@ -332,7 +332,7 @@ function write(e) return true end - -- Ignore SOFT + -- Ignore SOFT if e.state_type and e.state_type ~= 1 then broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") return true diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index 5d24c041a3c..695bcbc3777 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -45,18 +45,18 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/canopsis2-events.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.canopsis_user = params.canopsis_user self.sc_params.params.canopsis_password = params.canopsis_password @@ -72,7 +72,7 @@ function EventQueue.new(params) self.sc_params.params.timezone = params.timezone or "Europe/Paris" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement,downtime" - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -82,13 +82,13 @@ function EventQueue.new(params) if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then self.sc_params.params.connector_name_type = "poller" end - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) -- only load the custom code file, not executed yet - if self.sc_params.load_custom_code_file - and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) @@ -96,10 +96,10 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) - + local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) @@ -224,7 +224,7 @@ end function EventQueue:format_event_service() local event = self.sc_event.event - + self.sc_event.event.formated_event = { event_type = "check", source_type = "resource", @@ -346,8 +346,8 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end @@ -365,7 +365,7 @@ function EventQueue:build_payload(payload, event) else table.insert(payload, event) end - + return payload end @@ -408,7 +408,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -432,12 +432,12 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false @@ -451,10 +451,10 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:info("[EventQueue:send_data]: duplicated downtime event: " .. tostring(data)) retval = true else - self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -490,16 +490,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -507,7 +507,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua index c29fa9de9ee..6ee7b4bc446 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -44,18 +44,18 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/canopsis4-events.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.canopsis_authkey = params.canopsis_authkey self.sc_params.params.connector = params.connector or "centreon-stream" @@ -71,7 +71,7 @@ function EventQueue.new(params) self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement" self.sc_params.params.use_severity_as_state = params.use_severity_as_state or 0 - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -80,13 +80,13 @@ function EventQueue.new(params) if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then self.sc_params.params.connector_name_type = "poller" end - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) -- only load the custom code file, not executed yet - if self.sc_params.load_custom_code_file - and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) @@ -94,10 +94,10 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) - + local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) @@ -196,7 +196,7 @@ function EventQueue:get_state(event, severity) if severity and self.sc_params.params.use_severity_as_state == 1 then return severity end - + return self.centreon_to_canopsis_state[event.category][event.element][event.state] end @@ -231,7 +231,7 @@ end function EventQueue:format_event_service() local event = self.sc_event.event - + self.sc_event.event.formated_event = { event_type = "check", source_type = "resource", @@ -361,8 +361,8 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end end @@ -379,7 +379,7 @@ function EventQueue:build_payload(payload, event) else table.insert(payload, event) end - + return payload end @@ -394,9 +394,9 @@ function EventQueue:send_data(payload, queue_metadata) "content-type: application/json", "x-canopsis-authkey: " .. tostring(self.sc_params.params.canopsis_authkey) } - + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) - + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:notice("[send_data]: " .. tostring(payload)) @@ -422,7 +422,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -446,15 +446,15 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false - + if http_response_code == 200 then self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) @@ -468,7 +468,7 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -504,16 +504,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -521,7 +521,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua index 6a6bba15f63..eb45ad6bdec 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -43,20 +43,20 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/datadog-events.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + --params.max_buffer_size = 1 - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.api_key = params.api_key self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" @@ -64,11 +64,11 @@ function EventQueue.new(params) self.sc_params.params.http_server_url = params.http_server_url or "https://api.datadoghq.com" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) @@ -164,7 +164,7 @@ end function EventQueue:format_event_service() local event = self.sc_event.event - + self.sc_event.event.formated_event = { title = tostring(self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. event.cache.host.name .. ": " .. event.cache.service.description), text = event.output, @@ -205,7 +205,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. broker.json_encode(event) end - + return payload end @@ -244,7 +244,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -263,12 +263,12 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good @@ -278,7 +278,7 @@ function EventQueue:send_data(payload, queue_metadata) else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -314,16 +314,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -331,7 +331,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true @@ -358,4 +358,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index 287e9101caa..e6718848b03 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -44,20 +44,20 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/datadog-metrics.log" local log_level = params.log_level or 3 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + --params.max_buffer_size = 1 - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.api_key = params.api_key self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" @@ -71,7 +71,7 @@ function EventQueue.new(params) self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_%.]" self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -194,7 +194,7 @@ end -------------------------------------------------------------------------------- ---- EventQueue:build_metadata method -- @param metric {table} a single metric data --- @return tags {table} a table with formated metadata +-- @return tags {table} a table with formated metadata -------------------------------------------------------------------------------- function EventQueue:build_metadata(metric) local tags = {} @@ -251,7 +251,7 @@ function EventQueue:build_payload(payload, event) else table.insert(payload.series, event) end - + return payload end @@ -292,7 +292,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -311,12 +311,12 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good @@ -326,7 +326,7 @@ function EventQueue:send_data(payload, queue_metadata) else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -363,16 +363,16 @@ function write (event) if queue.sc_metrics:is_valid_metric_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -380,7 +380,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua index f4088871334..c4547613d51 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -72,7 +72,7 @@ function EventQueue.new(params) if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) end - + self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) @@ -90,7 +90,7 @@ function EventQueue.new(params) self.send_data_method = { [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end } - + self.build_payload_method = { [1] = function (payload, event) return self:build_payload(payload, event) end } @@ -164,23 +164,23 @@ function EventQueue:format_accepted_event() -------------------------------------------------------------------------------- -- EventQueue:add, add an event to the sending queue -------------------------------------------------------------------------------- - + function EventQueue:add() -- store event in self.events lists local category = self.sc_event.event.category local element = self.sc_event.event.element - + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) - + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end - + -------------------------------------------------------------------------------- -- EventQueue:build_payload, concatenate data so it is ready to be sent -- @param payload {string} json encoded string @@ -193,7 +193,7 @@ function EventQueue:format_accepted_event() else payload = payload .. self.http_post_metadata .. '\n' .. broker.json_encode(event) .. '\n' end - + return payload end @@ -208,7 +208,7 @@ function EventQueue:format_accepted_event() } self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) - + -- write payload in the logfile for test purpose if self.sc_params.params.send_data_test == 1 then self.sc_logger:info("[send_data]: " .. tostring(payload)) @@ -303,16 +303,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -320,7 +320,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua index 821ee296aaa..5339b832893 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua @@ -147,7 +147,7 @@ function EventQueue:build_index_template(params) description = "Timeseries index template for Centreon metrics", created_by_centreon = true } - + self.index_routing_path = { "host_name", "service_description", @@ -368,7 +368,7 @@ function EventQueue:validate_index_template(params) "metric_instance", "metric_subinstances" } - + if params.add_hostgroups_dimension == 1 then table.insert(required_index_mapping_properties, "host_groups") end @@ -522,7 +522,7 @@ function EventQueue:add_generic_optional_information(metric) for _, hg_info in ipairs(event.cache.hostgroups) do table.insert(hostgroups, hg_info.group_name) end - + self.sc_event.event.formated_event["host_groups"] = hostgroups end @@ -554,15 +554,15 @@ function EventQueue:handle_NaN(value) return nil end -function EventQueue:add_service_optional_information() - -- add servicegroups +function EventQueue:add_service_optional_information() + -- add servicegroups if self.sc_params.params.add_servicegroups_dimension == 1 then local servicegroups = {} for _, sg_info in ipairs(self.sc_event.event.cache.servicegroups) do table.insert(servicegroups, sg_info.group_name) end - + self.sc_event.event.formated_event["service_groups"] = servicegroups end end @@ -596,7 +596,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. '{"index":{}}\n' .. broker.json_encode(event) .. "\n" end - + return payload end @@ -620,7 +620,7 @@ function EventQueue:send_data(payload, queue_metadata) return true end end - + self.sc_logger:info("[EventQueue:send_data]: Elastic address is: " .. tostring(url)) self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload, basic_auth) @@ -641,7 +641,7 @@ function EventQueue:send_data(payload, queue_metadata) if (params.proxy_address ~= '') then if (params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, params.proxy_address .. ':' .. params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -667,22 +667,22 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- the gsub function is here to fix a bug with the broker method json_decode that crashes when a value is null. Internal issue: MON-20481 self.elastic_result = string.gsub(http_response_body, "null", "false") local decoded_elastic_result, error_json = broker.json_decode(self.elastic_result) - + if error_json then self.sc_logger:error("[EventQueue:send_data]: Couldn't decode json from elasticsearch. Error is: " .. tostring(error_json) .. ". Received json is: " .. tostring(http_response_body) .. ". Sent data is: " .. tostring(payload)) return false end - + if (http_response_code == 200 and not decoded_elastic_result.errors) then self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) return true @@ -734,16 +734,16 @@ function write (event) if queue.sc_metrics:is_valid_metric_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -751,7 +751,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true @@ -777,4 +777,4 @@ function flush() -- there are events in the queue but they were not ready to be send return false -end \ No newline at end of file +end diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua index d7b91f0f932..dc1c79e0e23 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua @@ -4,7 +4,7 @@ -- Tested with versions -- 7.1.1 -- --- References: +-- References: -- https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html -------------------------------------------------------------------------------- @@ -55,7 +55,7 @@ function EventQueue:flush() local http_result_body = {} local url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/_bulk" - local http_post_data = "" + local http_post_data = "" for _, raw_event in ipairs(self.events) do if raw_event.status then http_post_data = http_post_data .. '{"index":{"_index":"' .. self.elastic_index_status .. '"}}\n' .. @@ -70,7 +70,7 @@ function EventQueue:flush() for s in http_post_data:gmatch("[^\r\n]+") do broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") end - + http.TIMEOUT = self.http_timeout local req if self.http_server_protocol == "http" then @@ -147,9 +147,9 @@ function EventQueue:add(e) return false end previous_event = current_event - + broker_log:info(3, "EventQueue:add: " .. current_event) - + local type = "host" local hostname = get_hostname(e.host_id) if hostname == e.host_id then @@ -164,7 +164,7 @@ function EventQueue:add(e) if self.skip_anon_events == 1 then return false end end end - + if string.match(self.filter_type, "status") then self.events[#self.events + 1] = { timestamp = e.last_check, @@ -194,7 +194,7 @@ function EventQueue:add(e) broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) return false end - + for m,v in pairs(perfdata) do local instance = string.match(m, "(.*)#.*") if not instance then @@ -311,7 +311,7 @@ end local queue -- Fonction init() -function init(conf) +function init(conf) local log_level = 3 local log_path = "/var/log/centreon-broker/stream-connector-elastic-neb.log" for i,v in pairs(conf) do diff --git a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua index 205afc4e746..4e976e2ce72 100644 --- a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -22,19 +22,19 @@ function EventQueue.new(params) [4] = "scope_list" } - + self.fail = false - + -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" local log_level = params.log_level or 2 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true @@ -47,68 +47,68 @@ function EventQueue.new(params) self.sc_params.params.proxy_port = params.proxy_port self.sc_params.params.proxy_username = params.proxy_username self.sc_params.params.proxy_password = params.proxy_password - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - + self.sc_params.params.__internal_ts_host_last_flush = os.time() self.sc_params.params.__internal_ts_service_last_flush = os.time() self.sc_params.params.__internal_ts_ack_last_flush = os.time() self.sc_params.params.__internal_ts_dt_last_flush = os.time() self.sc_params.params.__internal_ts_ba_last_flush = os.time() - + self.sc_params.params.host_table = params.host_table or "hosts" self.sc_params.params.service_table = params.service_table or "services" self.sc_params.params.ack_table = params.ack_table or "acknowledgements" self.sc_params.params.downtime_table = params.downtime_table or "downtimes" self.sc_params.params.ba_table = params.ba_table or "bas" self.sc_params.params._sc_gbq_use_default_schemas = 1 - + local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements - + -- initiate EventQueue variables self.events = { [categories.neb.id] = {}, [categories.bam.id] = {} } - + self.events[categories.neb.id] = { [elements.acknowledgement.id] = {}, [elements.downtime.id] = {}, [elements.host_status.id] = {}, [elements.service_status.id] = {} } - + self.events[categories.bam.id] = { [elements.ba_status.id] = {} } - + self.flush = { [categories.neb.id] = {}, [categories.bam.id] = {} } - + self.flush[categories.neb.id] = { [elements.acknowledgement.id] = function () return self:flush_ack() end, [elements.downtime.id] = function () return self:flush_dt() end, [elements.host_status.id] = function () return self:flush_host() end, [elements.service_status.id] = function () return self:flush_service() end } - + self.flush[categories.bam.id] = { [elements.ba_status.id] = function () return self:flush_ba() end } - + self.sc_params.params.google_bq_api_url = params.google_bq_api_url or "https://content-bigquery.googleapis.com/bigquery/v2" - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.sc_oauth = sc_oauth.new(self.sc_params.params, self.sc_common, self.sc_logger) -- , self.sc_common, self.sc_logger) self.sc_bq = sc_bq.new(self.sc_params.params, self.sc_logger) self.sc_bq:get_tables_schema() - - + + -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -119,16 +119,16 @@ end -- @return true (boolean) -------------------------------------------------------------------------------- function EventQueue:format_event() - + self.sc_event.event.formated_event = {} self.sc_event.event.formated_event.json = {} - + for column, value in pairs(self.sc_bq.schemas[self.sc_event.event.category][self.sc_event.event.element]) do self.sc_event.event.formated_event.json[column] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) end - + self:add() - + return true end @@ -158,7 +158,7 @@ function EventQueue:flush_host () -- reset stored events list self.events[categories.neb.id][elements.host_status.id] = {} - + -- and update the timestamp self.sc_params.params.__internal_ts_host_last_flush = os.time() @@ -181,7 +181,7 @@ function EventQueue:flush_service () -- reset stored events list self.events[categories.neb.id][elements.service_status.id] = {} - + -- and update the timestamp self.sc_params.params.__internal_ts_service_last_flush = os.time() @@ -204,7 +204,7 @@ function EventQueue:flush_ack () -- reset stored events list self.events[categories.neb.id][elements.acknowledgement.id] = {} - + -- and update the timestamp self.sc_params.params.__internal_ts_ack_last_flush = os.time() @@ -227,7 +227,7 @@ function EventQueue:flush_dt () -- reset stored events list self.events[categories.neb.id][elements.downtime.id] = {} - + -- and update the timestamp self.sc_params.params.__internal_ts_dt_last_flush = os.time() @@ -250,7 +250,7 @@ function EventQueue:flush_ba () -- reset stored events list self.events[categories.bam.id][elements.ba_status.id] = {} - + -- and update the timestamp self.sc_params.params.__internal_ts_ba_last_flush = os.time() @@ -261,7 +261,7 @@ function EventQueue:flush_old_queues() local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements local current_time = os.time() - + -- flush old ack events if #self.events[categories.neb.id][elements.acknowledgement.id] > 0 and os.time() - self.sc_params.params.__internal_ts_ack_last_flush > self.sc_params.params.max_buffer_age then self:flush_ack() @@ -334,22 +334,22 @@ function EventQueue:call (data, table_name) -- initiate curl local request = curl.easy() :setopt_url(url) - :setopt_writefunction(function (response) + :setopt_writefunction(function (response) res = res .. response end) - + -- add postfields url params if data then request:setopt_postfields(data) end self.sc_logger:info("[EventQueue:call]: URL: " .. tostring(url)) - + -- set proxy address configuration if (self.sc_params.params.proxy_address ~= "" and self.sc_params.params.proxy_address) then if (self.sc_params.params.proxy_port ~= "" and self.sc_params.params.proxy_port) then request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:call]: proxy_port parameter is not set but proxy_address is used") end end @@ -391,7 +391,7 @@ function write(event) queue.sc_logger:error("Skipping event because a mandatory parameter is not set") return true end - + -- initiate event object queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua index 4ce1fd861e1..879ff852942 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua @@ -4,7 +4,7 @@ -- Tested with versions -- 1.4.3 -- --- References: +-- References: -- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ -- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ -------------------------------------------------------------------------------- @@ -14,7 +14,7 @@ -- You need an influxdb server -- You can install one with docker and these commands: -- docker pull influxdb --- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb -- You need to create a database -- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" -- @@ -64,7 +64,7 @@ function event_queue:new(o, conf) end -- Method: event_queue:flush --- Called when the max number of events or when the max age of buffer is reached +-- Called when the max number of events or when the max age of buffer is reached function event_queue:flush() broker_log:info(2, "event_queue:flush: Concatenating all the events as one string") -- we concatenate all the events @@ -82,7 +82,7 @@ function event_queue:flush() sink = ltn12.sink.table(http_result_body), -- request body needs to be formatted as a LTN12 source source = ltn12.source.string(http_post_data), - headers = { + headers = { -- mandatory for POST request with body ["content-length"] = string.len(http_post_data) } @@ -106,7 +106,7 @@ end -- Méthode event_queue:add function event_queue:add(e) local metric = e.name - -- time is a reserved word in influxDB so I rename it + -- time is a reserved word in influxDB so I rename it if metric == "time" then metric = "_"..metric end @@ -166,7 +166,7 @@ end -- return true if you want to handle this type of event (category, element) -- return false if you want to ignore them function filter(category, element) - if category == 3 and element == 1 then + if category == 3 and element == 1 then return true end return false diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index 84b2898ee41..e3c07f8202f 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -4,7 +4,7 @@ -- Tested with versions -- 1.4.3, 1.7.4, 1.7.6 -- --- References: +-- References: -- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ -- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ -------------------------------------------------------------------------------- @@ -24,7 +24,7 @@ -------------------------------------------------------------------------------- -- Access to the data: --- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" -- --data-urlencode "q=SELECT * from Cpu" -------------------------------------------------------------------------------- diff --git a/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua index 2e2eb6aae97..a0c61e1b756 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua @@ -47,18 +47,18 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/infuxdb2-metrics.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.bucket_api_key = params.bucket_api_key self.sc_params.params.bucket_id = params.bucket_id @@ -75,8 +75,8 @@ function EventQueue.new(params) self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 -- https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#special-characters self.sc_params.params.metric_name_regex = params.metric_name_regex or "([, =])" - self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "\\%1" - + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "\\%1" + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() @@ -187,7 +187,7 @@ function EventQueue:format_metric_service(metric) local params = self.sc_params.params self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") self.sc_event.event.formated_event = metric.metric_name .. ",type=service,service.name=" - .. self:escape_special_characters(self.sc_event.event.cache.service.description) + .. self:escape_special_characters(self.sc_event.event.cache.service.description) .. "," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check self:add() self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") @@ -196,7 +196,7 @@ end -------------------------------------------------------------------------------- ---- EventQueue:build_tags method -- @param metric {table} a single metric data --- @return tags {table} a table with formated metadata +-- @return tags {table} a table with formated metadata -------------------------------------------------------------------------------- function EventQueue:build_generic_tags(metric) local event = self.sc_event.event @@ -251,7 +251,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. "\n" .. event end - + return payload end @@ -259,8 +259,8 @@ function EventQueue:send_data(payload, queue_metadata) self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") local params = self.sc_params.params - local url = params.http_server_url .. tostring(params.influxdb2_api_endpoint) - .. "?bucket=" .. tostring(params.bucket_id) .. "&org=" .. tostring(params.org_name) + local url = params.http_server_url .. tostring(params.influxdb2_api_endpoint) + .. "?bucket=" .. tostring(params.bucket_id) .. "&org=" .. tostring(params.org_name) .. "&precision=" .. tostring(params.influxdb2_precision) queue_metadata.headers = { @@ -296,7 +296,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -315,12 +315,12 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false -- https://docs.influxdata.com/influxdb/cloud/api/#operation/PostWrite other than 204 is not good @@ -330,7 +330,7 @@ function EventQueue:send_data(payload, queue_metadata) else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -367,16 +367,16 @@ function write (event) if queue.sc_metrics:is_valid_metric_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -384,7 +384,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 4f80b72979b..45adf5a287c 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -30,7 +30,7 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/kafka-stream-connector.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) @@ -44,9 +44,9 @@ function EventQueue.new(params) self.sc_params.params.topic = params.topic self.sc_params.params.brokers = params.brokers self.sc_params.params.centreon_name = params.centreon_name - + -- overriding default parameters for this stream connector - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true @@ -54,15 +54,15 @@ function EventQueue.new(params) -- handle kafka params self.sc_params:get_kafka_params(self.sc_kafka_config, params) - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - + -- SEGFAULT ON EL8 (only usefull for debugging) -- self.sc_kafka_config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) -- self.sc_kafka_config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) - + -- initiate a kafka producer self.sc_kafka_producer = kafka_producer.new(self.sc_kafka_config) @@ -86,7 +86,7 @@ function EventQueue.new(params) self.sc_params:build_accepted_elements_info() self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) - + local categories = self.sc_params.params.bbdo.categories local elements = self.sc_params.params.bbdo.elements @@ -200,7 +200,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. ',' .. broker.json_encode(event) end - + return payload end @@ -264,16 +264,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -281,7 +281,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index c7fc5ee21a5..e34c1afc19e 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -40,18 +40,18 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/logstash-events.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.http_server_url = params.http_server_url self.sc_params.params.port = params.port @@ -59,11 +59,11 @@ function EventQueue.new(params) self.sc_params.params.password = params.password or "" self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" - + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file() @@ -169,7 +169,7 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end @@ -185,7 +185,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. broker.json_encode(event) end - + return payload end @@ -223,7 +223,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -252,10 +252,10 @@ function EventQueue:send_data(payload, queue_metadata) http_request:perform() -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false if http_response_code == 200 then @@ -264,7 +264,7 @@ function EventQueue:send_data(payload, queue_metadata) else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -300,23 +300,23 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua index 8c7d0c9de11..43e4c417f40 100644 --- a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua @@ -69,7 +69,7 @@ function init(conf) end broker_log:set_parameters(my_data.loglevel, my_data.logfile) broker_log:info(2, "init values :" .. - " logfile = " .. my_data.logfile .. + " logfile = " .. my_data.logfile .. " loglevel = " .. my_data.loglevel .. " ipaddr = " .. my_data.ipaddr .. " url = " .. my_data.url .. @@ -126,15 +126,15 @@ function write(d) if math.abs(d.last_check - d.last_hard_state_change) < 10 then --we keep only events with a state that changed from the previous check if d.state == d.last_hard_state then broker_log:info(3, "STATE CHANGE") - local reqbody = "\t" .. - "" .. service_desc .. "\t" .. - "" .. d.output .. "\t" .. - "" .. d.state .. "\t" .. - "" .. d.last_update .. "\t" .. - "" .. hostname .. "\t" .. - "" .. hostname .. "\t" .. - "" .. my_data.source_ci .. "\t" .. - "" .. d.service_id .. "\t" .. + local reqbody = "\t" .. + "" .. service_desc .. "\t" .. + "" .. d.output .. "\t" .. + "" .. d.state .. "\t" .. + "" .. d.last_update .. "\t" .. + "" .. hostname .. "\t" .. + "" .. hostname .. "\t" .. + "" .. my_data.source_ci .. "\t" .. + "" .. d.service_id .. "\t" .. "" table.insert(my_data.data, reqbody) end diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua index 71bb9afb7be..5f4cbba6d39 100644 --- a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -197,7 +197,7 @@ function EventQueue:add() self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event - self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) end diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua index 6e922cb0a99..ee630ef29e2 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua @@ -96,10 +96,10 @@ end -------------------------------------------------------------------------------- -- get_hostgroups: retrieve hostgroups from host_id -- @param {number} host_id, --- @return {array} hostgroups, +-- @return {array} hostgroups, -------------------------------------------------------------------------------- local function get_hostgroups (host_id) - if host_id == nil then + if host_id == nil then broker_log:warning(1, "get_hostgroup: host id is nil") return false end @@ -109,7 +109,7 @@ local function get_hostgroups (host_id) if not hostgroups then return false end - + return hostgroups end @@ -117,13 +117,13 @@ end -- get_severity: retrieve severity from host or service -- @param {number} host_id, -- @param {number} [optional] service_id --- @return {array} severity, +-- @return {array} severity, -------------------------------------------------------------------------------- local function get_severity (host_id, service_id) local service_id = service_id or nil local severity = nil - if host_id == nil then + if host_id == nil then broker_log:warning(1, "get_severity: host id is nil") return false end @@ -141,10 +141,10 @@ end -- get_ba_name: retrieve ba name from ba id -- @param {number} ba_id, -- @return {string} ba_name, the name of the ba --- @return {string} ba_description, the description of the ba +-- @return {string} ba_description, the description of the ba -------------------------------------------------------------------------------- local function get_ba_name (ba_id) - if ba_id == nil then + if ba_id == nil then broker_log:warning(1, "get_ba_name: ba id is nil") return false end @@ -165,7 +165,7 @@ end -- @return {array} bv_names, the bvs' description -------------------------------------------------------------------------------- local function get_bvs (ba_id) - if ba_id == nil then + if ba_id == nil then broker_log:warning(1, "get_bvs: ba id is nil") return false end @@ -205,13 +205,13 @@ end -------------------------------------------------------------------------------- local function split (text, separator) local hash = {} - + -- return empty string if text is nil if text == nil then broker_log:error(1, 'split: could not split text because it is nil') return '' end - + -- set default separator seperator = ifnil_or_empty(separator, ',') @@ -243,7 +243,7 @@ end -------------------------------------------------------------------------------- -- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration --- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups -- @param {table} hostHostgroups, the hostgroups associated to an host -- @return {boolean} -- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched @@ -256,7 +256,7 @@ local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) end end end - + return false end @@ -321,7 +321,7 @@ function EventQueue:new (conf) in_downtime = 0, max_buffer_size = 1, max_buffer_age = 5, - max_stored_events = 10, -- do not use values above 100 + max_stored_events = 10, -- do not use values above 100 skip_anon_events = 1, skip_nil_id = 1, element_mapping = {}, @@ -490,7 +490,7 @@ function EventQueue:new (conf) retval.get_bv = check_boolean_number_option_syntax(retval.get_bv, 1) local severity_to_priority = {} - + if retval.enable_severity == 1 then retval.priority_matching_list = split(retval.priority_matching, ',') @@ -499,12 +499,12 @@ function EventQueue:new (conf) if string.match(retval.opsgenie_priorities, severity_to_priority[1]) == nil then broker_log:warning(1, "EventQueue.new: severity is enabled but the priority configuration is wrong. configured matching: " .. retval.priority_matching_list .. - ", invalid parsed priority: " .. severity_to_priority[1] .. ", known Opsgenie priorities: " .. opsgenie_priorities .. + ", invalid parsed priority: " .. severity_to_priority[1] .. ", known Opsgenie priorities: " .. opsgenie_priorities .. ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") break end - retval.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + retval.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] end end @@ -543,7 +543,7 @@ function EventQueue:call (data, url_path, token) if (self.proxy_address ~= '') then if (self.proxy_port ~= '') then request:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) - else + else broker_log:error(1, "EventQueue:call: proxy_port parameter is not set but proxy_address is used") end end @@ -618,18 +618,18 @@ end -- is_valid_neb_event: check if the neb event is valid -- @return {table} validNebEvent, a table of boolean indexes validating the event -------------------------------------------------------------------------------- -function EventQueue:is_valid_neb_event () +function EventQueue:is_valid_neb_event () if self.currentEvent.element == 14 or self.currentEvent.element == 24 then -- prepare api info self.currentEvent.endpoint = '/v2/alerts' self.currentEvent.token = self.integration_api_token - self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) + self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) -- can't find hostname in cache if self.currentEvent.hostname == self.currentEvent.host_id and self.skip_anon_events == 1 then return false end - + -- can't find host_id in the event if self.currentEvent.hostname == 0 and self.skip_nil_id == 1 then return false @@ -657,8 +657,8 @@ function EventQueue:is_valid_neb_event () if not self:is_valid_hostgroup() then return false end - - if self.enable_severity == 1 then + + if self.enable_severity == 1 then if not self:set_priority() then return false end @@ -678,14 +678,14 @@ function EventQueue:is_valid_neb_event () self.sendData.alias = self:buildMessage(self.host_alert_alias, nil) elseif self.currentEvent.element == 24 then - + self.currentEvent.serviceDescription = get_service_description(self.currentEvent.host_id, self.currentEvent.service_id) -- can't find service description in cache if self.currentEvent.serviceDescription == self.currentEvent.service_id and self.skip_anon_events == 1 then return false end - + if not check_event_status(self.currentEvent.state, self.service_status) then return false end @@ -699,7 +699,7 @@ function EventQueue:is_valid_neb_event () self.sendData.description = self:buildMessage(self.service_alert_description, self.currentEvent.output) self.sendData.alias = self:buildMessage(self.service_alert_alias, nil) end - + return true end @@ -717,10 +717,10 @@ end -------------------------------------------------------------------------------- function EventQueue:is_valid_bam_event () if self.currentEvent.element == 1 then - broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 1') + broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 1') -- prepare api info self.currentEvent.endpoint = '/v1/incidents/create' - self.currentEvent.token = self.app_api_token + self.currentEvent.token = self.app_api_token -- check if ba event status is valid broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 2') @@ -747,7 +747,7 @@ function EventQueue:is_valid_bam_event () self.sendData.message = self:buildMessage(self.ba_incident_message, nil) return true end - end + end return false end @@ -828,7 +828,7 @@ end -------------------------------------------------------------------------------- -- EventQueue:add, add an event to the queue --- @param {table} eventData, the data related to the event +-- @param {table} eventData, the data related to the event -- @return {boolean} -------------------------------------------------------------------------------- function EventQueue:add () @@ -847,7 +847,7 @@ function EventQueue:flush () retval = self:send_data() self.events = {} - + -- and update the timestamp self.__internal_ts_last_flush = os.time() return retval @@ -863,7 +863,7 @@ function EventQueue:send_data () for _, raw_event in ipairs(self.events) do if counter == 0 then - data = broker.json_encode(raw_event) + data = broker.json_encode(raw_event) counter = counter + 1 else data = data .. ',' .. broker.json_encode(raw_event) @@ -915,7 +915,7 @@ end -------------------------------------------------------------------------------- function EventQueue:set_priority () local severity = nil - + -- get host severity if self.currentEvent.service_id == nil then broker_log:info(3, "EventQueue:set_priority: getting severity for host: " .. self.currentEvent.host_id) @@ -929,7 +929,7 @@ function EventQueue:set_priority () -- find the opsgenie priority depending on the found severity local matching_priority = self.priority_mapping[tostring(severity)] - -- drop event if no severity is found and opsgenie priority must be set + -- drop event if no severity is found and opsgenie priority must be set if matching_priority == nil and self.priority_must_be_set == 1 then broker_log:info(3, "EventQueue:set_priority: couldn't find a matching priority for severity: " .. tostring(severity) .. " and priority is mandatory. Dropping event") return false @@ -969,7 +969,7 @@ function write (event) return true end -- END OF FIX - + -- First, are there some old events waiting in the flush queue ? if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") @@ -1020,11 +1020,11 @@ end -- EventQueue:is_event_duplicated, create an id from the neb event and check if id is in an already sent events list -- @return {boolean} -------------------------------------------------------------------------------- -function EventQueue:is_event_duplicated() +function EventQueue:is_event_duplicated() local eventId = '' if self.currentEvent.element == 14 then eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.last_check) - else + else eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.service_id) .. '_' .. tostring(self.currentEvent.last_check) end @@ -1033,7 +1033,7 @@ function EventQueue:is_event_duplicated() return true end end - + return false end diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua index b4f98820a41..2bb86bc5fce 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -43,20 +43,20 @@ function EventQueue.new(params) -- set up log configuration local logfile = params.logfile or "/var/log/centreon-broker/opsgenie-events.log" local log_level = params.log_level or 1 - + -- initiate mandatory objects self.sc_logger = sc_logger.new(logfile, log_level) self.sc_common = sc_common.new(self.sc_logger) self.sc_broker = sc_broker.new(self.sc_logger) self.sc_params = sc_params.new(self.sc_common, self.sc_logger) - + -- checking mandatory parameters and setting a fail flag if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then self.fail = true end - + --params.max_buffer_size = 1 - + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs self.sc_params.params.app_api_token = params.app_api_token self.sc_params.params.integration_api_token = params.integration_api_token @@ -73,15 +73,15 @@ function EventQueue.new(params) self.sc_params.params.accepted_categories = params.accepted_categories or "neb" self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" self.sc_params.params.timestamp_conversion_format = params.timestamp_conversion_format or "%Y-%m-%d %H:%M:%S" - - + + -- apply users params and check syntax of standard ones self.sc_params:param_override(params) self.sc_params:check_params() - + -- need a queue for each type of event because ba status aren't sent on the same endpoint self.sc_params.params.send_mixed_events = 0 - + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) self.format_template = self.sc_params:load_event_format_file(true) @@ -132,16 +132,16 @@ function EventQueue.new(params) -- handle metadatas for queues self.sc_flush:add_queue_metadata( - categories.neb.id, - elements.host_status.id, + categories.neb.id, + elements.host_status.id, { api_endpoint = self.sc_params.params.alerts_api_endpoint, token = self.sc_params.params.app_api_token } ) self.sc_flush:add_queue_metadata( - categories.neb.id, - elements.service_status.id, + categories.neb.id, + elements.service_status.id, { api_endpoint = self.sc_params.params.alerts_api_endpoint, token = self.sc_params.params.app_api_token @@ -151,23 +151,23 @@ function EventQueue.new(params) -- handle opsgenie priority mapping local severity_to_priority = {} self.priority_mapping = {} - + if self.sc_params.params.enable_severity == 1 then self.priority_matching_list = self.sc_common:split(self.sc_params.params.priority_matching, ',') for _, priority_group in ipairs(self.priority_matching_list) do severity_to_priority = self.sc_common:split(priority_group, '=') - -- + -- if string.match(self.sc_params.params.opsgenie_priorities, severity_to_priority[1]) == nil then - self.sc_logger:error("[EvenQueue.new]: severity is enabled but the priority configuration is wrong. configured matching: " + self.sc_logger:error("[EvenQueue.new]: severity is enabled but the priority configuration is wrong. configured matching: " .. self.sc_params.params.priority_matching_list .. ", invalid parsed priority: " .. severity_to_priority[1] .. ", known Opsgenie priorities: " .. self.sc_params.params.opsgenie_priorities .. ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") break end - self.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + self.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] end end @@ -251,7 +251,7 @@ end function EventQueue:format_event_service() local event = self.sc_event.event local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] - + self.sc_event.event.formated_event = { message = string.sub(os.date(self.sc_params.params.timestamp_conversion_format, event.last_update) .. " " .. event.cache.host.name .. " // " .. event.cache.service.description .. " is " .. state, 1, 130), @@ -269,14 +269,14 @@ end function EventQueue:format_event_ba() local event = self.sc_event.event local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] - + self.sc_event.event.formated_event = { message = string.sub(event.cache.ba.ba_name .. " is " .. state .. ", health level reached " .. event.level_nominal, 1, 130) } if self.sc_params.params.enable_incident_tags == 1 then local tags = {} - + for _, bv_info in ipairs(event.cache.bvs) do -- can't have more than 20 tags if #tags < 50 then @@ -286,7 +286,7 @@ function EventQueue:format_event_ba() end local custom_tags = self.sc_common:split(self.sc_params.params.ba_incident_tags, ",") - for _, tag_name in ipairs(custom_tags) do + for _, tag_name in ipairs(custom_tags) do -- can't have more than 20 tags if #tags < 20 then self.sc_logger:info("[EventQueue:format_event_ba]: add custom tag: " .. tostring(tag_name) .. " to list of tags") @@ -329,7 +329,7 @@ function EventQueue:build_payload(payload, event) else payload = payload .. "," .. broker.json_encode(event) end - + return payload end @@ -340,7 +340,7 @@ function EventQueue:send_data(payload, queue_metadata) queue_metadata.headers = { "content-type: application/json", "accept: application/json", - "Authorization: GenieKey " .. queue_metadata.token + "Authorization: GenieKey " .. queue_metadata.token } self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) @@ -369,7 +369,7 @@ function EventQueue:send_data(payload, queue_metadata) if (self.sc_params.params.proxy_address ~= '') then if (self.sc_params.params.proxy_port ~= '') then http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) - else + else self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") end end @@ -388,12 +388,12 @@ function EventQueue:send_data(payload, queue_metadata) -- performing the HTTP request http_request:perform() - + -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() - + -- Handling the return code local retval = false @@ -405,7 +405,7 @@ function EventQueue:send_data(payload, queue_metadata) else self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) end - + return retval end @@ -441,16 +441,16 @@ function write (event) if queue.sc_event:is_valid_event() then queue:format_accepted_event() end - --- log why the event has been dropped + --- log why the event has been dropped else queue.sc_logger:debug("dropping event because element is not valid. Event element is: " .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) - end + end else queue.sc_logger:debug("dropping event because category is not valid. Event category is: " .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) end - + return flush() end @@ -458,7 +458,7 @@ end -- flush method is called by broker every now and then (more often when broker has nothing else to do) function flush() local queues_size = queue.sc_flush:get_queues_size() - + -- nothing to flush if queues_size == 0 then return true diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua index 350764f8888..96cc854fb86 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -147,8 +147,8 @@ function EventQueue:add(e) end end end - - local pdy_dedup_key + + local pdy_dedup_key if e.service_id then --to remain consistent in the alert handling even in the event of the loss of the broker cache, we should use the ids to link the events pdy_dedup_key = e.host_id .. "_" .. e.service_id else @@ -169,7 +169,7 @@ function EventQueue:add(e) -- basic management of "class" attribute local pdy_class - if e.service_id then + if e.service_id then pdy_class = "service" else pdy_class = "host" @@ -300,7 +300,7 @@ function EventQueue:flush() for s in http_post_data:gmatch("[^\r\n]+") do broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") end - + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") local http_response_body = "" @@ -336,7 +336,7 @@ function EventQueue:flush() http_request:perform() -- collecting results - http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) http_request:close() @@ -361,7 +361,7 @@ end local queue -- Fonction init() -function init(conf) +function init(conf) local log_level = 2 local log_path = "/var/log/centreon-broker/stream-connector-pagerduty.log" for i,v in pairs(conf) do diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua index 775e8b90534..883a72c28b5 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -179,11 +179,11 @@ function EventQueue:format_event_host() -- handle severity local host_severity = self.sc_broker:get_severity(event.host_id) - + if host_severity then pdy_custom_details['Hostseverity'] = host_severity end - + pdy_custom_details["Output"] = self.sc_common:ifnil_or_empty(event.output, "no output") self.sc_event.event.formated_event = { diff --git a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml index 21e20af20e0..d1ed85928f9 100644 --- a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml +++ b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml @@ -3,7 +3,7 @@ arch: "${ARCH}" platform: "linux" version_schema: "none" version: "${VERSION}" -release: "1${DIST}" +release: "${RELEASE}${DIST}" section: "default" priority: "optional" maintainer: "Centreon " diff --git a/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml index 2dd2798496b..727d40ede53 100644 --- a/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml +++ b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml @@ -1,9 +1,9 @@ -name: "lua-curl" +name: "@NAME@" arch: "${ARCH}" platform: "linux" version_schema: "none" version: "${VERSION}" -release: "6${DIST}" +release: "${RELEASE}${DIST}" section: "default" priority: "optional" maintainer: "Centreon " @@ -15,20 +15,38 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "../lua-curl" - dst: "/usr/lib64/lua/@luaver@" + - src: "../lua-curl/lcurl.so" + dst: "/usr/lib64/lua/@luaver@/lcurl.so" file_info: - mode: 0755 - - - src: "../lua-curl" - dst: "/usr/share/lua/@luaver@" + mode: 0644 + packager: rpm + - src: "../lua-curl/lcurl.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/5.3/lcurl.so" file_info: - mode: 0755 + mode: 0644 + packager: deb + + - src: "../lua-curl/cURL.lua" + dst: "/usr/share/lua/@luaver@/cURL.lua" + packager: rpm + - src: "../lua-curl/cURL.lua" + dst: "/usr/share/lua/5.3/cURL.lua" + packager: deb + + - src: "../lua-curl/cURL" + dst: "/usr/share/lua/@luaver@/cURL" + packager: rpm + - src: "../lua-curl/cURL" + dst: "/usr/share/lua/5.3/cURL" + packager: deb overrides: rpm: depends: - lua + deb: + depends: + - lua5.3 rpm: summary: lua curl diff --git a/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml index 75a2876d54e..b289235795e 100644 --- a/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml +++ b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml @@ -3,7 +3,7 @@ arch: "${ARCH}" platform: "linux" version_schema: "none" version: "${VERSION}" -release: "1${DIST}" +release: "${RELEASE}${DIST}" section: "default" priority: "optional" maintainer: "Centreon " diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 628310aa189..7f34c7973cf 100644 --- a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -2,8 +2,8 @@ name: "centreon-stream-connectors-lib" arch: "${ARCH}" platform: "linux" version_schema: "none" -version: "3.6.0" -release: "2${DIST}" +version: "${VERSION}" +release: "${RELEASE}${DIST}" section: "default" priority: "optional" maintainer: "Centreon " @@ -15,8 +15,16 @@ homepage: "https://www.centreon.com" license: "Apache-2.0" contents: - - src: "./modules/centreon-stream-connectors-lib" + - src: "../../modules/centreon-stream-connectors-lib" dst: "/usr/share/lua/@luaver@/centreon-stream-connectors-lib" + packager: rpm + + - src: "../../modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/5.3/centreon-stream-connectors-lib" + packager: deb + - src: "../../modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/5.4/centreon-stream-connectors-lib" + packager: deb overrides: rpm: @@ -24,12 +32,13 @@ overrides: - lua-socket >= 3.0 - centreon-broker-core >= 22.04.0 - lua-curl + - lua deb: depends: - "centreon-broker-core (>= 22.04.0)" - "lua-socket (>= 3.0~)" - - "lua-curl" - - "lua5.4" + - "lua5.3-curl" + - "lua5.3" rpm: summary: Centreon stream connectors lua modules diff --git a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml new file mode 100644 index 00000000000..ebcee0f67bd --- /dev/null +++ b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml @@ -0,0 +1,36 @@ +name: "@PACKAGE_NAME@" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Those modules provides helpful methods to create stream connectors for Centreon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../../centreon-certified/@CONNECTOR_NAME@" + dst: "/usr/share/centreon-broker/lua" + +overrides: + rpm: + depends: [ + centreon-stream-connectors-lib >= 3.0.0, + @RPM_DEPENDENCIES@ + ] + deb: + depends: [ + "centreon-stream-connectors-lib (>= 3.0.0~)", + @DEB_DEPENDENCIES@ + ] +rpm: + summary: Centreon stream connectors lua modules + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/packaging/connectors/deb/control b/stream-connectors/packaging/connectors/deb/control deleted file mode 100644 index a407c3e92eb..00000000000 --- a/stream-connectors/packaging/connectors/deb/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: @PACKAGE_NAME@ -Section: interpreters -Priority: optional -Maintainer: Centreon -Build-Depends: - debhelper-compat (=12) -Standards-Version: 4.5.0 -Homepage: https://wwww.centreon.com - -Package: @PACKAGE_NAME@ -Architecture: all -Depends: - centreon-stream-connectors-lib (>=@MIN_LIB_VERSION@) -Description: Centreon stream connectors lua modules diff --git a/stream-connectors/packaging/connectors/deb/copyright b/stream-connectors/packaging/connectors/deb/copyright deleted file mode 100644 index e874fbccb2b..00000000000 --- a/stream-connectors/packaging/connectors/deb/copyright +++ /dev/null @@ -1,23 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: centreon-stream-connectors-lib -Upstream-Contact: Centreon -Source: https://www.centreon.com - -Files: * -Copyright: 2023 Centreon -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - https://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the complete text of the Apache version 2.0 license - can be found in "/usr/share/common-licenses/Apache-2.0". - diff --git a/stream-connectors/packaging/connectors/deb/install b/stream-connectors/packaging/connectors/deb/install deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/stream-connectors/packaging/connectors/deb/rules b/stream-connectors/packaging/connectors/deb/rules deleted file mode 100644 index d1cbe832789..00000000000 --- a/stream-connectors/packaging/connectors/deb/rules +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/make -f - -export DEB_BUILD_MAINT_OPTIONS = hardening=+all - -%: - dh $@ - diff --git a/stream-connectors/packaging/connectors/deb/source/format b/stream-connectors/packaging/connectors/deb/source/format deleted file mode 100644 index 163aaf8d82b..00000000000 --- a/stream-connectors/packaging/connectors/deb/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/stream-connectors/packaging/connectors/rpm/connector.spec b/stream-connectors/packaging/connectors/rpm/connector.spec deleted file mode 100644 index e27629187f9..00000000000 --- a/stream-connectors/packaging/connectors/rpm/connector.spec +++ /dev/null @@ -1,35 +0,0 @@ -Name: %{PACKAGE_NAME} -Version: %{VERSION} -Release: 1%{?dist} -Summary: Centreon stream connectors lua modules - -Group: Applications/System -License: Apache-2.0 -URL: https://www.centreon.com -Packager: Centreon -Vendor: Centreon Entreprise Server (CES) Repository, http://yum.centreon.com/standard/ - -Source0: %{name}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch - -Requires: centreon-stream-connectors-lib >= %{MIN_LIB_VERSION} - -%description -Those modules provides helpful methods to create stream connectors for Centreon - -%prep -%setup -q -n %{name} - -%build - -%install -%{__install} -d $RPM_BUILD_ROOT%{_datadir}/centreon-broker/lua -%{__cp} -pr ./*.lua $RPM_BUILD_ROOT%{_datadir}/centreon-broker/lua - -%clean -%{__rm} -rf $RPM_BUILD_ROOT - -%files - -%changelog From 29b5138e75739094eda8da22207f271d96b8f4b1 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Wed, 31 Jan 2024 16:53:15 +0100 Subject: [PATCH 198/219] feat(packaging): package for debian wookworm and ubuntu jammy (#188) --- ...le.packaging-stream-connectors-nfpm-bookworm | 17 +++++++++++++++++ ...rfile.packaging-stream-connectors-nfpm-jammy | 17 +++++++++++++++++ .../.github/workflows/docker-packaging.yml | 2 +- .../.github/workflows/lua-cffi.yml | 17 ++++++++++++++--- .../.github/workflows/lua-curl.yml | 14 ++++++++++++-- stream-connectors/.github/workflows/lua-tz.yml | 10 ++++++++-- .../.github/workflows/stream-connectors-lib.yml | 10 ++++++++-- .../.github/workflows/stream-connectors.yml | 11 ++++++++--- .../bsm/bsm_connector-apiv1.lua | 3 +-- .../capensis/canopsis2-events-apiv2.lua | 1 - .../datadog/datadog-metrics-apiv2.lua | 1 - .../elasticsearch/elastic-neb-apiv1.lua | 1 - .../google/bigquery-events-apiv2.lua | 2 -- .../influxdb/influxdb-neb-apiv1.lua | 1 - .../kafka/kafka-events-apiv2.lua | 1 - .../logstash/logstash-events-apiv2.lua | 1 - .../centreon-certified/ndo/ndo-module-apiv1.lua | 1 - .../omi/omi_connector-apiv1.lua | 1 - .../opsgenie/opsgenie-events-apiv2.lua | 1 - .../pagerduty/pagerduty-apiv1.lua | 2 -- .../prometheus/prometheus-gateway-apiv1.lua | 1 - .../servicenow/servicenow-apiv1.lua | 3 +-- .../signl4/signl4-events-apiv2.lua | 1 - .../splunk/splunk-events-apiv2.lua | 1 - .../warp10/export-warp10-apiv1.lua | 2 +- 25 files changed, 88 insertions(+), 34 deletions(-) create mode 100644 stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm create mode 100644 stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm b/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm new file mode 100644 index 00000000000..b251daa2cb3 --- /dev/null +++ b/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm @@ -0,0 +1,17 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/debian:bookworm + +RUN bash -e <> conanfile.txt @@ -168,7 +178,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye] + distrib: [bullseye, bookworm] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/lua-tz.yml b/stream-connectors/.github/workflows/lua-tz.yml index 494c9d13363..15d252501b5 100644 --- a/stream-connectors/.github/workflows/lua-tz.yml +++ b/stream-connectors/.github/workflows/lua-tz.yml @@ -26,7 +26,7 @@ jobs: strategy: fail-fast: false matrix: - distrib: [el8, el9, bullseye] + distrib: [el8, el9, bullseye, bookworm, jammy] include: - package_extension: rpm image: packaging-stream-connectors-nfpm-alma8 @@ -37,6 +37,12 @@ jobs: - package_extension: deb image: packaging-stream-connectors-nfpm-bullseye distrib: bullseye + - package_extension: deb + image: packaging-stream-connectors-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-stream-connectors-nfpm-jammy + distrib: jammy runs-on: ubuntu-22.04 @@ -107,7 +113,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye] + distrib: [bullseye, bookworm] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index 4dfeeb1a4f3..92fde4a94a4 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -28,7 +28,7 @@ jobs: strategy: fail-fast: false matrix: - distrib: [el8, el9, bullseye] + distrib: [el8, el9, bullseye, bookworm, jammy] include: - package_extension: rpm image: packaging-stream-connectors-nfpm-alma8 @@ -39,6 +39,12 @@ jobs: - package_extension: deb image: packaging-stream-connectors-nfpm-bullseye distrib: bullseye + - package_extension: deb + image: packaging-stream-connectors-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-stream-connectors-nfpm-jammy + distrib: jammy runs-on: ubuntu-22.04 @@ -98,7 +104,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye] + distrib: [bullseye, bookworm] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index 88aa44186fe..431af1b0240 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -47,7 +47,6 @@ jobs: folders+=($BASE_NAME) done unique_folders=($(printf "%s\n" "${folders[@]}" | sort -u | tr '\n' ' ')) - echo $unique_folders echo "connectors=$(jq --compact-output --null-input '$ARGS.positional' --args -- ${unique_folders[@]})" >> $GITHUB_OUTPUT shell: bash @@ -57,7 +56,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [el8, el9, bullseye] + distrib: [el8, el9, bullseye, bookworm, jammy] connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} include: - distrib: el8 @@ -69,6 +68,12 @@ jobs: - distrib: bullseye image: packaging-stream-connectors-nfpm-bullseye package_extension: deb + - distrib: bookworm + image: packaging-stream-connectors-nfpm-bookworm + package_extension: deb + - distrib: jammy + image: packaging-stream-connectors-nfpm-jammy + package_extension: deb name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} container: @@ -158,7 +163,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye] + distrib: [bullseye, bookworm] connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua index ec1851862ee..d1f418f708d 100644 --- a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua @@ -1,5 +1,5 @@ -- --- Copyright 2018 Centreon +-- Copyright 2024 Centreon -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. @@ -370,4 +370,3 @@ function write(e) return true end - diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua index 695bcbc3777..911cabc7ce7 100644 --- a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -534,4 +534,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua index e6718848b03..b308d787620 100644 --- a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -407,4 +407,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua index dc1c79e0e23..238dfce1295 100644 --- a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua @@ -343,4 +343,3 @@ end function filter(category, element) return category == 1 and (element == 14 or element == 24) end - diff --git a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua index 4e976e2ce72..5ff02c7ec5b 100644 --- a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -108,7 +108,6 @@ function EventQueue.new(params) self.sc_bq = sc_bq.new(self.sc_params.params, self.sc_logger) self.sc_bq:get_tables_schema() - -- return EventQueue object setmetatable(self, { __index = EventQueue }) return self @@ -430,4 +429,3 @@ function write(event) return true end - diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua index e3c07f8202f..74ee93bb33d 100644 --- a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -282,4 +282,3 @@ end function filter(category, element) return category == 1 and (element == 14 or element == 24) end - diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua index 45adf5a287c..05b0513b4ec 100644 --- a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -308,4 +308,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua index e34c1afc19e..9e5c20d4235 100644 --- a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -343,4 +343,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua index b0176269aa7..91f22e49bc4 100644 --- a/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua +++ b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua @@ -330,4 +330,3 @@ ndo.data = { } return ndo - diff --git a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua index 43e4c417f40..38b77169d53 100644 --- a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua +++ b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua @@ -149,4 +149,3 @@ function write(d) end return true end - diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua index 2bb86bc5fce..5f149e9a5b9 100644 --- a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -485,4 +485,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua index 96cc854fb86..d2777539fe7 100644 --- a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -448,5 +448,3 @@ function write(e) return true end - - diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua index 6f87cd14618..c0a66e4f965 100644 --- a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -947,4 +947,3 @@ function write (event) return true end - diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua index eba8755917d..ceec49ab048 100644 --- a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua +++ b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua @@ -893,7 +893,6 @@ function EventQueue:is_event_duplicated() return true end end - + return false end - diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua index 216fc81aa7e..80e48e7e3d8 100644 --- a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -343,4 +343,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua index f7b3043af56..1fb7596a534 100644 --- a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -340,4 +340,3 @@ function flush() -- there are events in the queue but they were not ready to be send return false end - diff --git a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua index ac2d9cea029..0fce09f6c07 100644 --- a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua +++ b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua @@ -1,5 +1,5 @@ -- --- Copyright 2018 Centreon +-- Copyright 2024 Centreon -- -- Licensed under the Apache License, Version 2.0 (the "License"); -- you may not use this file except in compliance with the License. From c93a0a6fa6250469074f4a4f943ac7caec19a076 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Thu, 1 Feb 2024 10:13:09 +0100 Subject: [PATCH 199/219] fix(packaging): set default release number to 1 (#189) --- stream-connectors/.github/actions/package-nfpm/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml index ca30601d5ae..98981073642 100644 --- a/stream-connectors/.github/actions/package-nfpm/action.yml +++ b/stream-connectors/.github/actions/package-nfpm/action.yml @@ -64,7 +64,7 @@ runs: export DIST="" if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then export RELEASE="$RELEASE~${{ inputs.distrib }}" - elif [ "${{ inputs.stability }}" = "testing" ]; then + else export RELEASE="1~${{ inputs.distrib }}" fi fi From d92153c01853f4f3920edfc2165c3aeb963e907a Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 5 Feb 2024 13:56:54 +0100 Subject: [PATCH 200/219] fix(ci): do not deliver stable ubuntu packages (#190) --- .../.github/actions/deb-delivery/action.yml | 17 +++++++++++++---- .../.github/workflows/lua-cffi.yml | 2 +- .../.github/workflows/lua-curl.yml | 2 +- stream-connectors/.github/workflows/lua-tz.yml | 2 +- .../.github/workflows/stream-connectors-lib.yml | 2 +- .../.github/workflows/stream-connectors.yml | 2 +- 6 files changed, 18 insertions(+), 9 deletions(-) diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml index 0fd02f838bb..87e6f8e644e 100644 --- a/stream-connectors/.github/actions/deb-delivery/action.yml +++ b/stream-connectors/.github/actions/deb-delivery/action.yml @@ -20,27 +20,36 @@ inputs: runs: using: "composite" steps: - - name: Use cache DEB files + - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + name: Use cache DEB files uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.deb key: ${{ inputs.cache_key }} fail-on-cache-miss: true - - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 + - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 env: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - name: Publish DEBs + - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} + name: Publish DEBs run: | FILES="*.deb" + if [[ "${{ inputs.distrib }}" == "jammy" ]]; then + REPO_PREFIX="ubuntu" + else + REPO_PREFIX="apt" + fi + for FILE in $FILES; do echo "[DEBUG] - File: $FILE" ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) - jf rt upload "$FILE" "apt-plugins-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" + jf rt upload "$FILE" "${REPO_PREFIX}-plugins-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" done shell: bash diff --git a/stream-connectors/.github/workflows/lua-cffi.yml b/stream-connectors/.github/workflows/lua-cffi.yml index 14ee20ebc38..5142b766273 100644 --- a/stream-connectors/.github/workflows/lua-cffi.yml +++ b/stream-connectors/.github/workflows/lua-cffi.yml @@ -140,7 +140,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye, bookworm] + distrib: [bullseye, bookworm, jammy] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/lua-curl.yml b/stream-connectors/.github/workflows/lua-curl.yml index ce77634cf7b..fee59f71ae5 100644 --- a/stream-connectors/.github/workflows/lua-curl.yml +++ b/stream-connectors/.github/workflows/lua-curl.yml @@ -178,7 +178,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye, bookworm] + distrib: [bullseye, bookworm, jammy] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/lua-tz.yml b/stream-connectors/.github/workflows/lua-tz.yml index 15d252501b5..3491a54bc3d 100644 --- a/stream-connectors/.github/workflows/lua-tz.yml +++ b/stream-connectors/.github/workflows/lua-tz.yml @@ -113,7 +113,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye, bookworm] + distrib: [bullseye, bookworm, jammy] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/stream-connectors/.github/workflows/stream-connectors-lib.yml index 92fde4a94a4..383f41f6429 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/stream-connectors/.github/workflows/stream-connectors-lib.yml @@ -104,7 +104,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye, bookworm] + distrib: [bullseye, bookworm, jammy] name: deliver ${{ matrix.distrib }} steps: diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/stream-connectors/.github/workflows/stream-connectors.yml index 431af1b0240..e126c291670 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/stream-connectors/.github/workflows/stream-connectors.yml @@ -163,7 +163,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - distrib: [bullseye, bookworm] + distrib: [bullseye, bookworm, jammy] connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} From 91590f48e5f881b8ce0cfe737512fc0e55db1afc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Chapron?= <34628915+sc979@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:54:23 +0100 Subject: [PATCH 201/219] enh(ci): modify dependabot configuration (#191) --- stream-connectors/.github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stream-connectors/.github/dependabot.yml b/stream-connectors/.github/dependabot.yml index 3db635e65a3..6eeff4da184 100644 --- a/stream-connectors/.github/dependabot.yml +++ b/stream-connectors/.github/dependabot.yml @@ -3,7 +3,7 @@ updates: - package-ecosystem: github-actions directory: '/' schedule: - interval: weekly + interval: monthly open-pull-requests-limit: 10 labels: - 'dependencies' From ddec5119a47a9bca36566ceabb60d6f6cb1431c8 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 12 Feb 2024 15:35:04 +0100 Subject: [PATCH 202/219] chore(ci): add stream connectors workflows --- .github/CODEOWNERS | 12 +- .github/actions/package/action.yml | 4 + ...ile.packaging-stream-connectors-nfpm-alma8 | 0 ...ile.packaging-stream-connectors-nfpm-alma9 | 0 ....packaging-stream-connectors-nfpm-bookworm | 0 ....packaging-stream-connectors-nfpm-bullseye | 0 ...ile.packaging-stream-connectors-nfpm-jammy | 0 .github/workflows/docker-builder.yml | 4 +- .../docker-packaging-stream-connectors.yml | 10 +- .../workflows/lua-cffi.yml | 36 +++--- .../workflows/lua-curl.yml | 42 ++++--- .../.github => .github}/workflows/lua-tz.yml | 30 ++--- .../workflows/stream-connectors-lib.yml | 36 +++--- .../workflows/stream-connectors.yml | 42 ++++--- stream-connectors/.github/CODEOWNERS | 8 -- .../.github/actions/deb-delivery/action.yml | 55 --------- .../.github/actions/package-nfpm/action.yml | 108 ------------------ .../.github/actions/rpm-delivery/action.yml | 72 ------------ stream-connectors/.github/dependabot.yml | 10 -- .../.github/workflows/actionlint.yml | 75 ------------ .../.github/workflows/get-environment.yml | 40 ------- 21 files changed, 123 insertions(+), 461 deletions(-) rename {stream-connectors/.github => .github}/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 (100%) rename {stream-connectors/.github => .github}/docker/Dockerfile.packaging-stream-connectors-nfpm-alma9 (100%) rename {stream-connectors/.github => .github}/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm (100%) rename {stream-connectors/.github => .github}/docker/Dockerfile.packaging-stream-connectors-nfpm-bullseye (100%) rename {stream-connectors/.github => .github}/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy (100%) rename stream-connectors/.github/workflows/docker-packaging.yml => .github/workflows/docker-packaging-stream-connectors.yml (80%) rename {stream-connectors/.github => .github}/workflows/lua-cffi.yml (81%) rename {stream-connectors/.github => .github}/workflows/lua-curl.yml (81%) rename {stream-connectors/.github => .github}/workflows/lua-tz.yml (81%) rename {stream-connectors/.github => .github}/workflows/stream-connectors-lib.yml (77%) rename {stream-connectors/.github => .github}/workflows/stream-connectors.yml (79%) delete mode 100644 stream-connectors/.github/CODEOWNERS delete mode 100644 stream-connectors/.github/actions/deb-delivery/action.yml delete mode 100644 stream-connectors/.github/actions/package-nfpm/action.yml delete mode 100644 stream-connectors/.github/actions/rpm-delivery/action.yml delete mode 100644 stream-connectors/.github/dependabot.yml delete mode 100644 stream-connectors/.github/workflows/actionlint.yml delete mode 100644 stream-connectors/.github/workflows/get-environment.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 08a9bd35381..408ee0dfdef 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,9 @@ -* @centreon/owners-cpp +* @centreon/owners-cpp -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines +.github/** @centreon/owners-pipelines +packaging/** @centreon/owners-pipelines +selinux/** @centreon/owners-pipelines -tests/** @centreon/owners-robot-e2e +stream-connectors/** @centreon/owners-lua + +tests/** @centreon/owners-robot-e2e diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index b51c1ae496e..90aa456dbb7 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -16,6 +16,7 @@ inputs: release: description: The package release number required: false + default: "1" arch: description: The package architecture required: false @@ -68,6 +69,8 @@ runs: fi fi + luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) + export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" @@ -79,6 +82,7 @@ runs: source $DIRNAME/env/.env.${{ inputs.distrib }} fi cd $DIRNAME + sed -i "s/@luaver@/$luaver/g" $BASENAME sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" $BASENAME nfpm package --config $BASENAME --packager ${{ inputs.package_extension }} cd - diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 similarity index 100% rename from stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 rename to .github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma9 b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma9 similarity index 100% rename from stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma9 rename to .github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma9 diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm similarity index 100% rename from stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm rename to .github/docker/Dockerfile.packaging-stream-connectors-nfpm-bookworm diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bullseye b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bullseye similarity index 100% rename from stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-bullseye rename to .github/docker/Dockerfile.packaging-stream-connectors-nfpm-bullseye diff --git a/stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy similarity index 100% rename from stream-connectors/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy rename to .github/docker/Dockerfile.packaging-stream-connectors-nfpm-jammy diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 6824ed50c02..3e12cd1fe7a 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -11,10 +11,10 @@ on: - develop - dev-[2-9][0-9].[0-9][0-9].x paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' pull_request: paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' jobs: get-version: diff --git a/stream-connectors/.github/workflows/docker-packaging.yml b/.github/workflows/docker-packaging-stream-connectors.yml similarity index 80% rename from stream-connectors/.github/workflows/docker-packaging.yml rename to .github/workflows/docker-packaging-stream-connectors.yml index ad1b69f9c60..7fec28f5494 100644 --- a/stream-connectors/.github/workflows/docker-packaging.yml +++ b/.github/workflows/docker-packaging-stream-connectors.yml @@ -10,13 +10,17 @@ on: branches: - develop paths: - - ".github/docker/Dockerfile.packaging-*" + - ".github/docker/Dockerfile.packaging-stream-connectors-*" pull_request: paths: - - ".github/docker/Dockerfile.packaging-*" + - ".github/docker/Dockerfile.packaging-stream-connectors-*" jobs: + get-version: + uses: ./.github/workflows/get-version.yml + dockerize: + needs: [get-version] runs-on: ubuntu-22.04 strategy: @@ -42,4 +46,4 @@ jobs: build-args: "REGISTRY_URL=${{ vars.DOCKER_PROXY_REGISTRY_URL }}" pull: true push: true - tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/packaging-stream-connectors-nfpm-${{ matrix.distrib }}:latest + tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/packaging-stream-connectors-nfpm-${{ matrix.distrib }}:${{ needs.get-version.outputs.version }} diff --git a/stream-connectors/.github/workflows/lua-cffi.yml b/.github/workflows/lua-cffi.yml similarity index 81% rename from stream-connectors/.github/workflows/lua-cffi.yml rename to .github/workflows/lua-cffi.yml index 5142b766273..bd114ebaf20 100644 --- a/stream-connectors/.github/workflows/lua-cffi.yml +++ b/.github/workflows/lua-cffi.yml @@ -8,20 +8,22 @@ on: workflow_dispatch: pull_request: paths: - - dependencies/lua-cffi/** + - stream-connectors/dependencies/lua-cffi/** push: branches: - develop + - dev-[2-9][0-9].[0-9][0-9].x - master + - "[2-9][0-9].[0-9][0-9].x" paths: - - dependencies/lua-cffi/** + - stream-connectors/dependencies/lua-cffi/** jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml + get-version: + uses: ./.github/workflows/get-version.yml package: - needs: [get-environment] + needs: [get-version] strategy: fail-fast: false @@ -47,7 +49,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -92,14 +94,14 @@ jobs: fi ninja all cd ../.. - mkdir -p dependencies/lua-cffi/lua-cffi - mv cffi-lua-src/build/cffi.so dependencies/lua-cffi/lua-cffi/ + mkdir -p stream-connectors/dependencies/lua-cffi/lua-cffi + mv cffi-lua-src/build/cffi.so stream-connectors/dependencies/lua-cffi/lua-cffi/ shell: bash - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "dependencies/lua-cffi/packaging/*.yaml" + nfpm_file_pattern: "stream-connectors/dependencies/lua-cffi/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 @@ -110,11 +112,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -130,13 +132,14 @@ jobs: with: module_name: lua-cffi distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-cffi-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -152,6 +155,7 @@ jobs: with: module_name: lua-cffi distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-cffi-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml similarity index 81% rename from stream-connectors/.github/workflows/lua-curl.yml rename to .github/workflows/lua-curl.yml index fee59f71ae5..41d75469ab0 100644 --- a/stream-connectors/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -8,20 +8,22 @@ on: workflow_dispatch: pull_request: paths: - - dependencies/lua-curl/** + - stream-connectors/dependencies/lua-curl/** push: branches: - develop + - dev-[2-9][0-9].[0-9][0-9].x - master + - "[2-9][0-9].[0-9][0-9].x" paths: - - dependencies/lua-curl/** + - stream-connectors/dependencies/lua-curl/** jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml + get-version: + uses: ./.github/workflows/get-version.yml package: - needs: [get-environment] + needs: [get-version] strategy: fail-fast: false @@ -47,7 +49,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -118,10 +120,10 @@ jobs: cd .. - mkdir -p dependencies/lua-curl/lua-curl/ - cp -p lua-curl-src/lcurl.so dependencies/lua-curl/lua-curl/ - cp -rp lua-curl-src/src/lua/cURL dependencies/lua-curl/lua-curl/ - cp -p lua-curl-src/src/lua/cURL.lua dependencies/lua-curl/lua-curl/ + mkdir -p stream-connectors/dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/lcurl.so stream-connectors/dependencies/lua-curl/lua-curl/ + cp -rp lua-curl-src/src/lua/cURL stream-connectors/dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/src/lua/cURL.lua stream-connectors/dependencies/lua-curl/lua-curl/ shell: bash - name: Update package name @@ -131,13 +133,13 @@ jobs: else NAME="lua5.3-curl" fi - sed -i "s/@NAME@/$NAME/g" ./dependencies/lua-curl/packaging/lua-curl.yaml + sed -i "s/@NAME@/$NAME/g" ./stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml shell: bash - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "dependencies/lua-curl/packaging/lua-curl.yaml" + nfpm_file_pattern: "stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 @@ -148,11 +150,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -168,13 +170,14 @@ jobs: with: module_name: lua-curl distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -190,6 +193,7 @@ jobs: with: module_name: lua-curl distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml similarity index 81% rename from stream-connectors/.github/workflows/lua-tz.yml rename to .github/workflows/lua-tz.yml index 3491a54bc3d..ca1207ce375 100644 --- a/stream-connectors/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -12,16 +12,18 @@ on: push: branches: - develop + - dev-[2-9][0-9].[0-9][0-9].x - master + - "[2-9][0-9].[0-9][0-9].x" paths: - dependencies/lua-tz/** jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml + get-version: + uses: ./.github/workflows/get-version.yml package: - needs: [get-environment] + needs: [get-version] strategy: fail-fast: false @@ -47,7 +49,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -66,13 +68,13 @@ jobs: ref: "v0.4-1" - name: Prepare packaging of lua-tz - run: cp -r luatz-src/luatz dependencies/lua-tz/lua-tz + run: cp -r luatz-src/luatz stream-connectors/dependencies/lua-tz/lua-tz shell: bash - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "dependencies/lua-tz/packaging/*.yaml" + nfpm_file_pattern: "stream-connectors/dependencies/lua-tz/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: all @@ -83,11 +85,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -103,13 +105,14 @@ jobs: with: module_name: lua-tz distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-tz-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -125,6 +128,7 @@ jobs: with: module_name: lua-tz distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-tz-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml similarity index 77% rename from stream-connectors/.github/workflows/stream-connectors-lib.yml rename to .github/workflows/stream-connectors-lib.yml index 383f41f6429..79e3291e124 100644 --- a/stream-connectors/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -8,22 +8,24 @@ on: workflow_dispatch: pull_request: paths: - - packaging/connectors-lib/** - - modules/centreon-stream-connectors-lib/** + - stream-connectors/packaging/connectors-lib/** + - stream-connectors/modules/centreon-stream-connectors-lib/** push: branches: - develop + - dev-[2-9][0-9].[0-9][0-9].x - master + - "[2-9][0-9].[0-9][0-9].x" paths: - - packaging/connectors-lib/** - - modules/centreon-stream-connectors-lib/** + - stream-connectors/packaging/connectors-lib/** + - stream-connectors/modules/centreon-stream-connectors-lib/** jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml + get-version: + uses: ./.github/workflows/get-version.yml package: - needs: [get-environment] + needs: [get-version] strategy: fail-fast: false @@ -49,7 +51,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -63,7 +65,7 @@ jobs: - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "packaging/connectors-lib/*.yaml" + nfpm_file_pattern: "stream-connectors/packaging/connectors-lib/*.yaml" distrib: ${{ matrix.distrib }} version: "3.6.1" release: "1" @@ -74,11 +76,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -94,13 +96,14 @@ jobs: with: module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -116,6 +119,7 @@ jobs: with: module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml similarity index 79% rename from stream-connectors/.github/workflows/stream-connectors.yml rename to .github/workflows/stream-connectors.yml index e126c291670..ba8ac8c20ca 100644 --- a/stream-connectors/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -8,17 +8,19 @@ on: workflow_dispatch: pull_request: paths: - - centreon-certified/** + - stream-connectors/centreon-certified/** push: branches: - develop + - dev-[2-9][0-9].[0-9][0-9].x - master + - "[2-9][0-9].[0-9][0-9].x" paths: - - centreon-certified/** + - stream-connectors/centreon-certified/** jobs: - get-environment: - uses: ./.github/workflows/get-environment.yml + get-version: + uses: ./.github/workflows/get-version.yml detect-changes: runs-on: ubuntu-22.04 @@ -34,7 +36,7 @@ jobs: list-files: shell filters: | connectors: - - centreon-certified/** + - stream-connectors/centreon-certified/** - name: transform to directories id: list-connectors @@ -52,7 +54,7 @@ jobs: package: if: ${{ needs.detect-changes.outputs.connectors != '[]' }} - needs: [get-environment, detect-changes] + needs: [get-version, detect-changes] runs-on: ubuntu-22.04 strategy: matrix: @@ -77,7 +79,7 @@ jobs: name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:latest + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -90,9 +92,9 @@ jobs: - name: Replace package and connector name variables run: | package_name="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" - sed -i "s/@PACKAGE_NAME@/$package_name/g" ./packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@PACKAGE_NAME@/$package_name/g" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml connector_name="`basename ${{ matrix.connector_path }}`" - sed -i "s/@CONNECTOR_NAME@/$connector_name/g" ./packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@CONNECTOR_NAME@/$connector_name/g" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml shell: bash - name: Add specific dependencies @@ -109,8 +111,8 @@ jobs: DEB_DEPENDENCIES="lua-tz" RPM_DEPENDENCIES="lua-tz" fi - sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./packaging/connectors/centreon-stream-connectors.yaml - sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml shell: bash - name: Export package version @@ -121,7 +123,7 @@ jobs: - name: Package uses: ./.github/actions/package-nfpm with: - nfpm_file_pattern: "packaging/connectors/centreon-stream-connectors.yaml" + nfpm_file_pattern: "stream-connectors/packaging/connectors/centreon-stream-connectors.yaml" distrib: ${{ matrix.distrib }} version: ${{ steps.package-version.outputs.package_version }} release: "1" @@ -132,11 +134,11 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, detect-changes, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, detect-changes, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -153,13 +155,14 @@ jobs: with: module_name: stream-connectors distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} deliver-deb: - if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-environment.outputs.stability) }} - needs: [get-environment, detect-changes, package] + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, detect-changes, package] runs-on: ubuntu-22.04 strategy: matrix: @@ -176,6 +179,7 @@ jobs: with: module_name: stream-connectors distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.connector_path }}-${{ matrix.distrib }} - stability: ${{ needs.get-environment.outputs.stability }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/.github/CODEOWNERS b/stream-connectors/.github/CODEOWNERS deleted file mode 100644 index 9b986324e27..00000000000 --- a/stream-connectors/.github/CODEOWNERS +++ /dev/null @@ -1,8 +0,0 @@ -* @centreon/owners-lua - -*.md @centreon/owners-doc -*.mdx @centreon/owners-doc - -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines diff --git a/stream-connectors/.github/actions/deb-delivery/action.yml b/stream-connectors/.github/actions/deb-delivery/action.yml deleted file mode 100644 index 87e6f8e644e..00000000000 --- a/stream-connectors/.github/actions/deb-delivery/action.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: "deb-delivery" -description: "Deliver deb packages" -inputs: - module_name: - description: "The package module name" - required: true - distrib: - description: "The distribution used for packaging" - required: true - cache_key: - description: "The cached package key" - required: true - stability: - description: "The package stability (stable, testing, unstable)" - required: true - artifactory_token: - description: "Artifactory token" - required: true - -runs: - using: "composite" - steps: - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} - name: Use cache DEB files - uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 - with: - path: ./*.deb - key: ${{ inputs.cache_key }} - fail-on-cache-miss: true - - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 - env: - JF_URL: https://centreon.jfrog.io - JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - - if: ${{ ! (inputs.distrib == 'jammy' && inputs.stability == 'stable') }} - name: Publish DEBs - run: | - FILES="*.deb" - - if [[ "${{ inputs.distrib }}" == "jammy" ]]; then - REPO_PREFIX="ubuntu" - else - REPO_PREFIX="apt" - fi - - for FILE in $FILES; do - echo "[DEBUG] - File: $FILE" - - ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) - - jf rt upload "$FILE" "${REPO_PREFIX}-plugins-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" - done - shell: bash diff --git a/stream-connectors/.github/actions/package-nfpm/action.yml b/stream-connectors/.github/actions/package-nfpm/action.yml deleted file mode 100644 index 98981073642..00000000000 --- a/stream-connectors/.github/actions/package-nfpm/action.yml +++ /dev/null @@ -1,108 +0,0 @@ -name: package-nfpm -description: Package module using nfpm -inputs: - nfpm_file_pattern: - description: The pattern of the nfpm configuration file(s) - required: true - package_extension: - description: The package extension (deb or rpm) - required: true - distrib: - description: The package distrib - required: true - version: - description: The version - required: false - release: - description: The package release number - required: false - default: "1" - arch: - description: The package architecture - required: false - commit_hash: - description: The commit hash - required: true - cache_key: - description: The package files cache key - required: true - rpm_gpg_key: - description: The rpm gpg key - required: true - rpm_gpg_signing_key_id: - description: The rpm gpg signing key identifier - required: true - rpm_gpg_signing_passphrase: - description: The rpm gpg signing passphrase - required: true - stability: - description: "Branch stability (stable, testing, unstable, canary)" - required: true - -runs: - using: composite - - steps: - - name: Import gpg key - env: - RPM_GPG_SIGNING_KEY: ${{ inputs.rpm_gpg_key }} - run: echo -n "$RPM_GPG_SIGNING_KEY" > key.gpg - shell: bash - - - name: Build ${{ inputs.package_extension }} files - env: - RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} - RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} - run: | - export VERSION="${{ inputs.version }}" - export RELEASE="${{ inputs.release }}" - export ARCH="${{ inputs.arch }}" - - if [ "${{ inputs.package_extension }}" = "rpm" ]; then - export DIST=".${{ inputs.distrib }}" - else - export DIST="" - if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then - export RELEASE="$RELEASE~${{ inputs.distrib }}" - else - export RELEASE="1~${{ inputs.distrib }}" - fi - fi - - luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) - echo "luaver is $luaver" - if [ $luaver = "0" ]; then - echo "Cannot get lua version" - exit 1 - fi - - export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" - export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" - export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" - - for FILE in ${{ inputs.nfpm_file_pattern }}; do - DIRNAME=$(dirname $FILE) - BASENAME=$(basename $FILE) - cd $DIRNAME - sed -i "s/@luaver@/$luaver/g" $BASENAME - sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" $BASENAME - nfpm package --config $BASENAME --packager ${{ inputs.package_extension }} - cd - - mv $DIRNAME/*.${{ inputs.package_extension }} ./ - done - shell: bash - - - name: Cache packages - uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 - with: - path: ./*.${{ inputs.package_extension }} - key: ${{ inputs.cache_key }} - - # Update if condition to true to get packages as artifacts - - if: ${{ false }} - name: Upload package artifacts - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 - with: - name: packages-${{ inputs.nfpm_file_pattern }}-${{ inputs.distrib }} - path: ./*.${{ inputs.package_extension}} - retention-days: 1 diff --git a/stream-connectors/.github/actions/rpm-delivery/action.yml b/stream-connectors/.github/actions/rpm-delivery/action.yml deleted file mode 100644 index ad12396c92f..00000000000 --- a/stream-connectors/.github/actions/rpm-delivery/action.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: "rpm-delivery" -description: "Deliver rpm packages" -inputs: - module_name: - description: "The package module name" - required: true - distrib: - description: "The distribution used for packaging" - required: true - cache_key: - description: "The cached package key" - required: true - stability: - description: "The package stability (stable, testing, unstable)" - required: true - artifactory_token: - description: "Artifactory token" - required: true - -runs: - using: "composite" - steps: - - name: Use cache RPM files - uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 - with: - path: ./*.rpm - key: ${{ inputs.cache_key }} - fail-on-cache-miss: true - - - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 - env: - JF_URL: https://centreon.jfrog.io - JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - - name: Publish RPMs - run: | - FILES="*.rpm" - - echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" - - if [ -z "${{ inputs.module_name }}" ]; then - echo "module name is required" - exit 1 - fi - - if [ -z "${{ inputs.distrib }}" ]; then - echo "distrib is required" - exit 1 - fi - - mkdir noarch x86_64 - - for FILE in $FILES; do - echo "[DEBUG] - File: $FILE" - - ARCH=$(echo $FILE | grep -oP '(x86_64|noarch)') - - echo "[DEBUG] - Arch: $ARCH" - - mv "$FILE" "$ARCH" - done - - for ARCH in "noarch" "x86_64"; do - if [ "$(ls -A $ARCH)" ]; then - if [ "${{ inputs.stability }}" == "stable" ]; then - jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" --flat - else - jf rt upload "$ARCH/*.rpm" "rpm-plugins/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat - fi - fi - done - shell: bash diff --git a/stream-connectors/.github/dependabot.yml b/stream-connectors/.github/dependabot.yml deleted file mode 100644 index 6eeff4da184..00000000000 --- a/stream-connectors/.github/dependabot.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -updates: - - package-ecosystem: github-actions - directory: '/' - schedule: - interval: monthly - open-pull-requests-limit: 10 - labels: - - 'dependencies' - - 'gha' diff --git a/stream-connectors/.github/workflows/actionlint.yml b/stream-connectors/.github/workflows/actionlint.yml deleted file mode 100644 index 021d34926bf..00000000000 --- a/stream-connectors/.github/workflows/actionlint.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: actionlint - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - pull_request: - branches: - - develop - - master - - hotfix-* - - release-* - paths: - - ".github/**" - -jobs: - actionlint: - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Download actionlint - id: get_actionlint - run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) - shell: bash - - - name: Check workflow files - run: | - ${{ steps.get_actionlint.outputs.executable }} \ - -ignore 'label "common" is unknown' \ - -ignore 'label "veracode" is unknown' \ - -ignore '"github.head_ref" is potentially untrusted' \ - -shellcheck= \ - -pyflakes= \ - -color - shell: bash - yaml-lint: - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - name: Install Yaml - run: | - pip install yamllint==1.32.0 - - - name: Add Yaml Lint Rules - run: | - cat <>./yamllint_rules.yml - extends: default - - rules: - document-start: disable - line-length: disable - truthy: - check-keys: false - level: error - indentation: - spaces: 2 - indent-sequences: true - check-multi-line-strings: false - comments: - ignore-shebangs: true - min-spaces-from-content: 1 - comments-indentation: disable - new-lines: - type: unix - new-line-at-end-of-file: enable - EOF - - - name: Lint YAML files - run: | - yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ diff --git a/stream-connectors/.github/workflows/get-environment.yml b/stream-connectors/.github/workflows/get-environment.yml deleted file mode 100644 index 04db07b582f..00000000000 --- a/stream-connectors/.github/workflows/get-environment.yml +++ /dev/null @@ -1,40 +0,0 @@ -on: - workflow_call: - outputs: - stability: - description: "branch stability (stable, testing, unstable, canary)" - value: ${{ jobs.get-environment.outputs.stability }} - -jobs: - get-environment: - runs-on: ubuntu-22.04 - outputs: - stability: ${{ steps.get_environment.outputs.stability }} - - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - - id: get_environment - run: | - if [[ -z "$GITHUB_HEAD_REF" ]]; then - BRANCHNAME="$GITHUB_REF_NAME" - else - BRANCHNAME="$GITHUB_HEAD_REF" - fi - - case "$BRANCHNAME" in - develop | dev-[2-9][0-9].[0-9][0-9].x) - STABILITY="unstable" - ;; - release* | hotfix*) - STABILITY="testing" - ;; - master | [2-9][0-9].[0-9][0-9].x) - STABILITY="stable" - ;; - *) - STABILITY="canary" - ;; - esac - echo "stability=$STABILITY" >> $GITHUB_OUTPUT - shell: bash From f80f9f2b8e9b91d37d8345ef0d4ecd8dbf47108f Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 12 Feb 2024 15:57:52 +0100 Subject: [PATCH 203/219] fix(ci): set major and minor versions in workflows --- .github/actions/package/action.yml | 17 +++++++++++++++++ .github/workflows/centreon-collect.yml | 3 ++- .github/workflows/lua-cffi.yml | 3 ++- .github/workflows/lua-curl.yml | 3 ++- .github/workflows/lua-tz.yml | 3 ++- .github/workflows/package-collect.yml | 10 +++++++--- .github/workflows/robot-nightly.yml | 5 +++-- .github/workflows/stream-connectors-lib.yml | 5 +++-- .github/workflows/stream-connectors.yml | 3 ++- .../centreon-stream-connectors-lib.yaml | 7 +++++-- .../connectors/centreon-stream-connectors.yaml | 7 +++++-- 11 files changed, 50 insertions(+), 16 deletions(-) diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index 90aa456dbb7..b8a8bd788e5 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -10,6 +10,9 @@ inputs: distrib: description: The package distrib required: true + major_version: + description: The major version + required: false version: description: The package version required: false @@ -54,6 +57,7 @@ runs: RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | + export MAJOR_VERSION="${{ inputs.major_version }}" export VERSION="${{ inputs.version }}" export RELEASE="${{ inputs.release }}" export ARCH="${{ inputs.arch }}" @@ -69,6 +73,19 @@ runs: fi fi + MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) + MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) + BUMP_MAJOR_RIGHT=$(( MAJOR_RIGHT_PART + 1 )) + if [ "$MAJOR_RIGHT" = "04" ]; then + BUMP_MAJOR_LEFT="$MAJOR_LEFT" + BUMP_MAJOR_RIGHT="10" + else + BUMP_MAJOR_LEFT=$(( $MAJOR_LEFT + 1 )) + BUMP_MAJOR_RIGHT="04" + fi + + export NEXT_MAJOR_VERSION="$BUMP_MAJOR_LEFT.$BUMP_MAJOR_RIGHT" + luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index 7c9ae6aaeae..a336124d299 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -81,7 +81,8 @@ jobs: if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} uses: ./.github/workflows/package-collect.yml with: - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} diff --git a/.github/workflows/lua-cffi.yml b/.github/workflows/lua-cffi.yml index bd114ebaf20..41648dd60ec 100644 --- a/.github/workflows/lua-cffi.yml +++ b/.github/workflows/lua-cffi.yml @@ -99,12 +99,13 @@ jobs: shell: bash - name: Package - uses: ./.github/actions/package-nfpm + uses: ./.github/actions/package with: nfpm_file_pattern: "stream-connectors/dependencies/lua-cffi/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 + major_version: ${{ needs.get-version.outputs.version }} version: "0.2.4" release: "1" commit_hash: ${{ github.sha }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index 41d75469ab0..3e8d82bdb74 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -137,12 +137,13 @@ jobs: shell: bash - name: Package - uses: ./.github/actions/package-nfpm + uses: ./.github/actions/package with: nfpm_file_pattern: "stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: amd64 + major_version: ${{ needs.get-version.outputs.version }} version: "0.3.13" release: "7" commit_hash: ${{ github.sha }} diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index ca1207ce375..13ddaf48d2c 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -72,12 +72,13 @@ jobs: shell: bash - name: Package - uses: ./.github/actions/package-nfpm + uses: ./.github/actions/package with: nfpm_file_pattern: "stream-connectors/dependencies/lua-tz/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} arch: all + major_version: ${{ needs.get-version.outputs.version }} version: "0.5" release: "1" commit_hash: ${{ github.sha }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index 621f2635e78..3bed775ecb1 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -3,7 +3,10 @@ name: Centreon collect packaging on: workflow_call: inputs: - version: + major_version: + required: true + type: string + minor_version: required: true type: string img_version: @@ -102,7 +105,7 @@ jobs: cd selinux for MODULE in "centreon-engine" "centreon-broker"; do cd $MODULE - sed -i "s/@VERSION@/${{ inputs.version }}/g" $MODULE.te + sed -i "s/@VERSION@/${{ inputs.major_version }}.${{ inputs.minor_version }}/g" $MODULE.te make -f /usr/share/selinux/devel/Makefile cd - done @@ -172,7 +175,8 @@ jobs: nfpm_file_pattern: "packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - version: ${{ inputs.version }} + major_version: ${{ inputs.major_version }} + version: ${{ inputs.major_version }}.${{ inputs.minor_version }} release: ${{ inputs.release }} arch: ${{ matrix.arch }} commit_hash: ${{ inputs.commit_hash }} diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index 13db94f259a..e3458489a67 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -49,11 +49,12 @@ jobs: needs: [get-version] uses: ./.github/workflows/package-collect.yml with: - stability: ${{ needs.get-version.outputs.stability }} - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} + stability: ${{ needs.get-version.outputs.stability }} secrets: inherit robot-test: diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 79e3291e124..31c6ee91d13 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -63,11 +63,12 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Package - uses: ./.github/actions/package-nfpm + uses: ./.github/actions/package with: nfpm_file_pattern: "stream-connectors/packaging/connectors-lib/*.yaml" distrib: ${{ matrix.distrib }} - version: "3.6.1" + major_version: ${{ needs.get-version.outputs.version }} + version: ${{ needs.get-version.outputs.version }}.0 release: "1" package_extension: ${{ matrix.package_extension }} arch: all diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index ba8ac8c20ca..75ce1b03f16 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -121,10 +121,11 @@ jobs: shell: bash - name: Package - uses: ./.github/actions/package-nfpm + uses: ./.github/actions/package with: nfpm_file_pattern: "stream-connectors/packaging/connectors/centreon-stream-connectors.yaml" distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.version }} version: ${{ steps.package-version.outputs.package_version }} release: "1" package_extension: ${{ matrix.package_extension }} diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 7f34c7973cf..13f27f1d770 100644 --- a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -4,6 +4,7 @@ platform: "linux" version_schema: "none" version: "${VERSION}" release: "${RELEASE}${DIST}" +epoch: 1 section: "default" priority: "optional" maintainer: "Centreon " @@ -29,13 +30,15 @@ contents: overrides: rpm: depends: + - centreon-broker-core >= ${MAJOR_VERSION} + - centreon-broker-core < ${NEXT_MAJOR_VERSION} - lua-socket >= 3.0 - - centreon-broker-core >= 22.04.0 - lua-curl - lua deb: depends: - - "centreon-broker-core (>= 22.04.0)" + - "centreon-broker-core (>= ${MAJOR_VERSION}~)" + - "centreon-broker-core (<< ${NEXT_MAJOR_VERSION}~)" - "lua-socket (>= 3.0~)" - "lua5.3-curl" - "lua5.3" diff --git a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml index ebcee0f67bd..ebdc00cf35e 100644 --- a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml +++ b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml @@ -4,6 +4,7 @@ platform: "linux" version_schema: "none" version: "${VERSION}" release: "${RELEASE}${DIST}" +epoch: 1 section: "default" priority: "optional" maintainer: "Centreon " @@ -21,12 +22,14 @@ contents: overrides: rpm: depends: [ - centreon-stream-connectors-lib >= 3.0.0, + centreon-stream-connectors-lib >= ${MAJOR_VERSION}, + centreon-stream-connectors-lib < ${NEXT_MAJOR_VERSION}, @RPM_DEPENDENCIES@ ] deb: depends: [ - "centreon-stream-connectors-lib (>= 3.0.0~)", + "centreon-stream-connectors-lib (>= ${MAJOR_VERSION}~)", + "centreon-stream-connectors-lib (<< ${NEXT_MAJOR_VERSION}~)", @DEB_DEPENDENCIES@ ] rpm: From a0c87237df9fc2d6c67e7042440f48b2c0e520a1 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Mon, 12 Feb 2024 16:14:30 +0100 Subject: [PATCH 204/219] fix(ci): set major and minor versions in workflows --- .../packaging/connectors/centreon-stream-connectors.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml index ebdc00cf35e..f9e05dfe988 100644 --- a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml +++ b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml @@ -22,8 +22,8 @@ contents: overrides: rpm: depends: [ - centreon-stream-connectors-lib >= ${MAJOR_VERSION}, - centreon-stream-connectors-lib < ${NEXT_MAJOR_VERSION}, + "centreon-stream-connectors-lib >= ${MAJOR_VERSION}", + "centreon-stream-connectors-lib < ${NEXT_MAJOR_VERSION}", @RPM_DEPENDENCIES@ ] deb: From 183334868380a742b20dc002966f7aec389d3e7c Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 08:57:38 +0100 Subject: [PATCH 205/219] fix(packaging): update lua-curl and connectors versioning --- .github/workflows/lua-curl.yml | 4 ++-- .github/workflows/stream-connectors.yml | 2 +- .../connectors-lib/centreon-stream-connectors-lib.yaml | 6 ++++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index 3e8d82bdb74..f4321fc26dc 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -144,8 +144,8 @@ jobs: package_extension: ${{ matrix.package_extension }} arch: amd64 major_version: ${{ needs.get-version.outputs.version }} - version: "0.3.13" - release: "7" + version: ${{ needs.get-version.outputs.version }}.0 + release: "1" commit_hash: ${{ github.sha }} cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-curl-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index 75ce1b03f16..82a1809ed0b 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -126,7 +126,7 @@ jobs: nfpm_file_pattern: "stream-connectors/packaging/connectors/centreon-stream-connectors.yaml" distrib: ${{ matrix.distrib }} major_version: ${{ needs.get-version.outputs.version }} - version: ${{ steps.package-version.outputs.package_version }} + version: ${{ needs.get-version.outputs.version }}.${{ steps.package-version.outputs.package_version }} release: "1" package_extension: ${{ matrix.package_extension }} arch: all diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 13f27f1d770..95c93a43b1e 100644 --- a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -32,15 +32,17 @@ overrides: depends: - centreon-broker-core >= ${MAJOR_VERSION} - centreon-broker-core < ${NEXT_MAJOR_VERSION} + - lua-curl >= ${MAJOR_VERSION} + - lua-curl < ${NEXT_MAJOR_VERSION} - lua-socket >= 3.0 - - lua-curl - lua deb: depends: - "centreon-broker-core (>= ${MAJOR_VERSION}~)" - "centreon-broker-core (<< ${NEXT_MAJOR_VERSION}~)" + - "lua5.3-curl (>= ${MAJOR_VERSION}~)" + - "lua5.3-curl (<< ${NEXT_MAJOR_VERSION}~)" - "lua-socket (>= 3.0~)" - - "lua5.3-curl" - "lua5.3" rpm: From 7e809a9ae7d6930922c863e44a754d09c9ef52fa Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 09:56:15 +0100 Subject: [PATCH 206/219] test(ci): release bookworm unstable for testing purpose --- .github/actions/delivery/action.yml | 4 ++-- .github/workflows/get-version.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index 66ed19e6db8..151c4d090d1 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -46,7 +46,7 @@ runs: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - if: ${{ env.extfile == 'rpm' }} + - if: ${{ env.extfile == 'rpm' && inputs.distrib == 'bookworm' }} name: Publish RPMs run: | FILES="*.${{ env.extfile }}" @@ -89,7 +89,7 @@ runs: done shell: bash - - if: ${{ env.extfile == 'deb' }} + - if: ${{ env.extfile == 'deb' && inputs.distrib == 'bookworm' }} name: Publish DEBs run: | FILES="*.${{ env.extfile }}" diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml index 117ade3c9fb..9617579b4aa 100644 --- a/.github/workflows/get-version.yml +++ b/.github/workflows/get-version.yml @@ -106,7 +106,7 @@ jobs: ENV="production" ;; *) - STABILITY="canary" + STABILITY="unstable" ;; esac echo "stability=$STABILITY" >> $GITHUB_OUTPUT From 4172a8cf75be1f58301ce8d9195fb10feaa77016 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 10:02:41 +0100 Subject: [PATCH 207/219] fix(ci): fix delivery action name for stream connectors --- .github/workflows/lua-cffi.yml | 4 ++-- .github/workflows/lua-curl.yml | 4 ++-- .github/workflows/lua-tz.yml | 4 ++-- .github/workflows/stream-connectors-lib.yml | 4 ++-- .github/workflows/stream-connectors.yml | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/lua-cffi.yml b/.github/workflows/lua-cffi.yml index 41648dd60ec..797013e99ed 100644 --- a/.github/workflows/lua-cffi.yml +++ b/.github/workflows/lua-cffi.yml @@ -129,7 +129,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery + uses: ./.github/actions/delivery with: module_name: lua-cffi distrib: ${{ matrix.distrib }} @@ -152,7 +152,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages - uses: ./.github/actions/deb-delivery + uses: ./.github/actions/delivery with: module_name: lua-cffi distrib: ${{ matrix.distrib }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index f4321fc26dc..f8ba2d537b7 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -167,7 +167,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery + uses: ./.github/actions/delivery with: module_name: lua-curl distrib: ${{ matrix.distrib }} @@ -190,7 +190,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages - uses: ./.github/actions/deb-delivery + uses: ./.github/actions/delivery with: module_name: lua-curl distrib: ${{ matrix.distrib }} diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index 13ddaf48d2c..2ba39bf02f5 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -102,7 +102,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery + uses: ./.github/actions/delivery with: module_name: lua-tz distrib: ${{ matrix.distrib }} @@ -125,7 +125,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages - uses: ./.github/actions/deb-delivery + uses: ./.github/actions/delivery with: module_name: lua-tz distrib: ${{ matrix.distrib }} diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml index 31c6ee91d13..a94b688b734 100644 --- a/.github/workflows/stream-connectors-lib.yml +++ b/.github/workflows/stream-connectors-lib.yml @@ -93,7 +93,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery + uses: ./.github/actions/delivery with: module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} @@ -116,7 +116,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages - uses: ./.github/actions/deb-delivery + uses: ./.github/actions/delivery with: module_name: stream-connectors-lib distrib: ${{ matrix.distrib }} diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index 82a1809ed0b..e5d82385839 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -152,7 +152,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish RPM packages - uses: ./.github/actions/rpm-delivery + uses: ./.github/actions/delivery with: module_name: stream-connectors distrib: ${{ matrix.distrib }} @@ -176,7 +176,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Publish DEB packages - uses: ./.github/actions/deb-delivery + uses: ./.github/actions/delivery with: module_name: stream-connectors distrib: ${{ matrix.distrib }} From f4495acb5f2725c04e6e1220940cd169293f22a9 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 10:14:40 +0100 Subject: [PATCH 208/219] fix(packaging): update libffi dependency --- .../dependencies/lua-cffi/packaging/env/.env.bookworm | 1 + .../dependencies/lua-cffi/packaging/env/.env.bullseye | 1 + .../dependencies/lua-cffi/packaging/env/.env.jammy | 1 + stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye create mode 100644 stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm new file mode 100644 index 00000000000..ef2f930eaba --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi8 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye new file mode 100644 index 00000000000..ba6237e2c25 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi7 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy new file mode 100644 index 00000000000..ef2f930eaba --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi8 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml index d1ed85928f9..bb29f45fd0e 100644 --- a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml +++ b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml @@ -32,7 +32,7 @@ overrides: deb: depends: - "lua5.3" - - "libffi7" + - "${LIBFFI_DEPENDENCY}" - "libffi-dev" rpm: From 301f771df27a429c01dcee60a842de51674b2501 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 10:30:57 +0100 Subject: [PATCH 209/219] fix(packaging): do not add epoch for stream connectors lib --- .../packaging/connectors-lib/centreon-stream-connectors-lib.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml index 95c93a43b1e..888b00be78b 100644 --- a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -4,7 +4,6 @@ platform: "linux" version_schema: "none" version: "${VERSION}" release: "${RELEASE}${DIST}" -epoch: 1 section: "default" priority: "optional" maintainer: "Centreon " From 10af2bd9b84eb8ae2bf2cfc7560eb4dce314b639 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 10:38:46 +0100 Subject: [PATCH 210/219] fix(packaging): export all variables in env file --- .github/actions/package/action.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index b8a8bd788e5..85c875bf0eb 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -96,7 +96,9 @@ runs: DIRNAME=$(dirname $FILE) BASENAME=$(basename $FILE) if [ -f $DIRNAME/env/.env.${{ inputs.distrib }} ]; then + set -o allexport source $DIRNAME/env/.env.${{ inputs.distrib }} + set +o allexport fi cd $DIRNAME sed -i "s/@luaver@/$luaver/g" $BASENAME From 3af6e53bc8cda00d0cf65b3d249a6a14815e154a Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 10:55:44 +0100 Subject: [PATCH 211/219] enh(packaging): increase version number of lua-tz and lua-cffi --- .github/workflows/lua-cffi.yml | 2 +- .github/workflows/lua-tz.yml | 2 +- .github/workflows/stream-connectors.yml | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/lua-cffi.yml b/.github/workflows/lua-cffi.yml index 797013e99ed..da3c4f77e0c 100644 --- a/.github/workflows/lua-cffi.yml +++ b/.github/workflows/lua-cffi.yml @@ -106,7 +106,7 @@ jobs: package_extension: ${{ matrix.package_extension }} arch: amd64 major_version: ${{ needs.get-version.outputs.version }} - version: "0.2.4" + version: ${{ needs.get-version.outputs.version }}.0 release: "1" commit_hash: ${{ github.sha }} cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-cffi-${{ matrix.distrib }} diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index 2ba39bf02f5..d8c4ca920c1 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -79,7 +79,7 @@ jobs: package_extension: ${{ matrix.package_extension }} arch: all major_version: ${{ needs.get-version.outputs.version }} - version: "0.5" + version: ${{ needs.get-version.outputs.version }}.0 release: "1" commit_hash: ${{ github.sha }} cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-tz-${{ matrix.distrib }} diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index e5d82385839..e9b3c86ab02 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -102,14 +102,14 @@ jobs: DEB_DEPENDENCIES="" RPM_DEPENDENCIES="" if [ "${{ matrix.connector_path }}" = "kafka" ]; then - DEB_DEPENDENCIES="librdkafka1,lua-cffi" - RPM_DEPENDENCIES="librdkafka,lua-cffi" + DEB_DEPENDENCIES='librdkafka1,"lua-cffi (>= \${MAJOR_VERSION}~)","lua-cffi (<< \${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='librdkafka,lua-cffi >= \${MAJOR_VERSION},lua-cffi \${NEXT_MAJOR_VERSION}' elif [ "${{ matrix.connector_path }}" = "pagerduty" ]; then - DEB_DEPENDENCIES="lua-tz" - RPM_DEPENDENCIES="lua-tz" + DEB_DEPENDENCIES='lua-tz (>= \${MAJOR_VERSION}~)","lua-tz (<< \${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='lua-tz >= \${MAJOR_VERSION},lua-tz \${NEXT_MAJOR_VERSION}' elif [ "${{ matrix.connector_path }}" = "splunk" ]; then - DEB_DEPENDENCIES="lua-tz" - RPM_DEPENDENCIES="lua-tz" + DEB_DEPENDENCIES='lua-tz (>= \${MAJOR_VERSION}~)","lua-tz (<< \${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='lua-tz >= \${MAJOR_VERSION},lua-tz \${NEXT_MAJOR_VERSION}' fi sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml From c61be404d5caa7a9861d37610485eeb838b5fa5c Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 11:01:32 +0100 Subject: [PATCH 212/219] fix(packaging): do not escape $ in stream connectors packaging --- .github/workflows/stream-connectors.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index e9b3c86ab02..16e15d92b70 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -102,17 +102,19 @@ jobs: DEB_DEPENDENCIES="" RPM_DEPENDENCIES="" if [ "${{ matrix.connector_path }}" = "kafka" ]; then - DEB_DEPENDENCIES='librdkafka1,"lua-cffi (>= \${MAJOR_VERSION}~)","lua-cffi (<< \${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='librdkafka,lua-cffi >= \${MAJOR_VERSION},lua-cffi \${NEXT_MAJOR_VERSION}' + DEB_DEPENDENCIES='librdkafka1,"lua-cffi (>= ${MAJOR_VERSION}~)","lua-cffi (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='librdkafka,lua-cffi >= ${MAJOR_VERSION},lua-cffi ${NEXT_MAJOR_VERSION}' elif [ "${{ matrix.connector_path }}" = "pagerduty" ]; then - DEB_DEPENDENCIES='lua-tz (>= \${MAJOR_VERSION}~)","lua-tz (<< \${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='lua-tz >= \${MAJOR_VERSION},lua-tz \${NEXT_MAJOR_VERSION}' + DEB_DEPENDENCIES='lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='lua-tz >= ${MAJOR_VERSION},lua-tz ${NEXT_MAJOR_VERSION}' elif [ "${{ matrix.connector_path }}" = "splunk" ]; then - DEB_DEPENDENCIES='lua-tz (>= \${MAJOR_VERSION}~)","lua-tz (<< \${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='lua-tz >= \${MAJOR_VERSION},lua-tz \${NEXT_MAJOR_VERSION}' + DEB_DEPENDENCIES='lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='lua-tz >= ${MAJOR_VERSION},lua-tz ${NEXT_MAJOR_VERSION}' fi sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + + cat ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml shell: bash - name: Export package version From fe69dbe995b2e13b9b98c52223da9ed914661ddb Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 11:05:38 +0100 Subject: [PATCH 213/219] fix(packaging): missing double quote in stream connectors packaging --- .github/workflows/stream-connectors.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index 16e15d92b70..d8fe903f04d 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -103,13 +103,13 @@ jobs: RPM_DEPENDENCIES="" if [ "${{ matrix.connector_path }}" = "kafka" ]; then DEB_DEPENDENCIES='librdkafka1,"lua-cffi (>= ${MAJOR_VERSION}~)","lua-cffi (<< ${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='librdkafka,lua-cffi >= ${MAJOR_VERSION},lua-cffi ${NEXT_MAJOR_VERSION}' + RPM_DEPENDENCIES='librdkafka,"lua-cffi >= ${MAJOR_VERSION}","lua-cffi ${NEXT_MAJOR_VERSION}"' elif [ "${{ matrix.connector_path }}" = "pagerduty" ]; then - DEB_DEPENDENCIES='lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='lua-tz >= ${MAJOR_VERSION},lua-tz ${NEXT_MAJOR_VERSION}' + DEB_DEPENDENCIES='"lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='"lua-tz >= ${MAJOR_VERSION}","lua-tz ${NEXT_MAJOR_VERSION}"' elif [ "${{ matrix.connector_path }}" = "splunk" ]; then - DEB_DEPENDENCIES='lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' - RPM_DEPENDENCIES='lua-tz >= ${MAJOR_VERSION},lua-tz ${NEXT_MAJOR_VERSION}' + DEB_DEPENDENCIES='"lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='"lua-tz >= ${MAJOR_VERSION}","lua-tz ${NEXT_MAJOR_VERSION}"' fi sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml From d3830342dd716ce9f3e66b6350b1068abf92f4c1 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 11:10:59 +0100 Subject: [PATCH 214/219] fix(ci): trigger lua-tz build --- .github/workflows/lua-tz.yml | 4 ++-- .github/workflows/stream-connectors.yml | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index d8c4ca920c1..7a2e21d8f49 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -8,7 +8,7 @@ on: workflow_dispatch: pull_request: paths: - - dependencies/lua-tz/** + - stream-connectors/dependencies/lua-tz/** push: branches: - develop @@ -16,7 +16,7 @@ on: - master - "[2-9][0-9].[0-9][0-9].x" paths: - - dependencies/lua-tz/** + - stream-connectors/dependencies/lua-tz/** jobs: get-version: diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index d8fe903f04d..f2575780311 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -113,8 +113,6 @@ jobs: fi sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml - - cat ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml shell: bash - name: Export package version From 5a9f05b879452ccc62b273295ba08dbe133030ba Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 11:26:17 +0100 Subject: [PATCH 215/219] test(ci): deliver el9 unstable --- .github/actions/delivery/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index 151c4d090d1..c23e295c0e0 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -46,7 +46,7 @@ runs: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - if: ${{ env.extfile == 'rpm' && inputs.distrib == 'bookworm' }} + - if: ${{ env.extfile == 'rpm' && inputs.distrib == 'el9' }} name: Publish RPMs run: | FILES="*.${{ env.extfile }}" @@ -89,7 +89,7 @@ runs: done shell: bash - - if: ${{ env.extfile == 'deb' && inputs.distrib == 'bookworm' }} + - if: ${{ env.extfile == 'deb' && inputs.distrib == 'el9' }} name: Publish DEBs run: | FILES="*.${{ env.extfile }}" From 0f0f4408a512dbbcdbd9b91949f96a47477d09ef Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 11:40:39 +0100 Subject: [PATCH 216/219] fix(ci): unique yum dir for each stream connector --- .github/workflows/stream-connectors.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml index f2575780311..bccec035214 100644 --- a/.github/workflows/stream-connectors.yml +++ b/.github/workflows/stream-connectors.yml @@ -154,7 +154,7 @@ jobs: - name: Publish RPM packages uses: ./.github/actions/delivery with: - module_name: stream-connectors + module_name: stream-connector-${{ matrix.connector_path }} distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} @@ -178,7 +178,7 @@ jobs: - name: Publish DEB packages uses: ./.github/actions/delivery with: - module_name: stream-connectors + module_name: stream-connector-${{ matrix.connector_path }} distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} From 28e99eff94015045d5a9de56fdc6e9924be42eb3 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Tue, 13 Feb 2024 12:08:50 +0100 Subject: [PATCH 217/219] fix(ci): restore delivery changes --- .github/actions/delivery/action.yml | 4 ++-- .github/workflows/get-version.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index c23e295c0e0..66ed19e6db8 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -46,7 +46,7 @@ runs: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - if: ${{ env.extfile == 'rpm' && inputs.distrib == 'el9' }} + - if: ${{ env.extfile == 'rpm' }} name: Publish RPMs run: | FILES="*.${{ env.extfile }}" @@ -89,7 +89,7 @@ runs: done shell: bash - - if: ${{ env.extfile == 'deb' && inputs.distrib == 'el9' }} + - if: ${{ env.extfile == 'deb' }} name: Publish DEBs run: | FILES="*.${{ env.extfile }}" diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml index 9617579b4aa..117ade3c9fb 100644 --- a/.github/workflows/get-version.yml +++ b/.github/workflows/get-version.yml @@ -106,7 +106,7 @@ jobs: ENV="production" ;; *) - STABILITY="unstable" + STABILITY="canary" ;; esac echo "stability=$STABILITY" >> $GITHUB_OUTPUT From b80a484def5afdf38e1cdfece3e8733ad8abc774 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Thu, 15 Feb 2024 15:29:01 +0100 Subject: [PATCH 218/219] Update .github/actions/package/action.yml Co-authored-by: May <110405507+paul-oureib@users.noreply.github.com> --- .github/actions/package/action.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index 85c875bf0eb..30dcec46773 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -75,7 +75,6 @@ runs: MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) - BUMP_MAJOR_RIGHT=$(( MAJOR_RIGHT_PART + 1 )) if [ "$MAJOR_RIGHT" = "04" ]; then BUMP_MAJOR_LEFT="$MAJOR_LEFT" BUMP_MAJOR_RIGHT="10" From c5b0c22c673793940064499d08507ac80f337058 Mon Sep 17 00:00:00 2001 From: Kevin Duret Date: Thu, 15 Feb 2024 16:52:32 +0100 Subject: [PATCH 219/219] fix --- .github/workflows/lua-tz.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml index 7a2e21d8f49..45120b19bc3 100644 --- a/.github/workflows/lua-tz.yml +++ b/.github/workflows/lua-tz.yml @@ -65,7 +65,7 @@ jobs: with: repository: "daurnimator/luatz" path: "luatz-src" - ref: "v0.4-1" + ref: "e49b496e112ae1f0efdec24fc1c6a6f978f68014" # v0.4-1 - name: Prepare packaging of lua-tz run: cp -r luatz-src/luatz stream-connectors/dependencies/lua-tz/lua-tz