From 843ef10109a98a3a16e993536870dec3c9f3eeb1 Mon Sep 17 00:00:00 2001 From: tuntoja <58987095+tuntoja@users.noreply.github.com> Date: Thu, 21 Jul 2022 14:55:54 +0200 Subject: [PATCH 1/4] Merge release-debian-22.04.0 into 22.04.x (#323) * enh(broker/mysql_connection): unix socket is different on Debian or on centos7 REFS: MON-13926 * enh(debian/package): add a default config file for cbd on debian package REFS: MON-13830 * enh(build/debian):clib.so on lib REFS: MON-13782 * Add missing broker lib (#262) * fix(jenkins): update jenkinsfile * fix jenkinsfile * fix jenkinsfile * fix jenkinsfile * fix jenkinsfile * fix jenkinsfile * bypass debian build dwz process Co-authored-by: David Boucher Co-authored-by: rem31 <73845199+rem31@users.noreply.github.com> Co-authored-by: Luiz Costa Co-authored-by: Zakaria GUENNOUNE --- CHANGELOG.md | 7 +++++++ CMakeLists.txt | 32 ++++++++++++++++++++++++++++++ Jenkinsfile | 5 +++-- broker/CMakeLists.txt | 32 +++++++++++------------------- broker/core/src/database_config.cc | 2 +- ci/debian/rules | 2 ++ engine/CMakeLists.txt | 16 --------------- 7 files changed, 57 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 992c77d1acf..719234ce0f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,13 @@ Don't coredump if connection fail on process start They are inserted in bulk now. +*sql* + +The mysql socket is defined with: +* /var/run/mysqld/mysqld.sock on Debian and similar distribs +* /var/lib/mysql/mysql.sock on RH and similar distribs +* /tmp/mysql.sock on others + ### Clib #### Fixes diff --git a/CMakeLists.txt b/CMakeLists.txt index 58039e350e0..f3cfd2d29a1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,6 +39,38 @@ set(ALLOW_DUPLICATE_EXECUTABLE TRUE) set(BUILD_ARGS "-w" "dupbuild=warn") +# +# Get distributions name +# +if (CMAKE_SYSTEM_NAME STREQUAL "Linux") + file(STRINGS "/etc/os-release" release + REGEX "^ID") + foreach (l ${release}) + if (${l} MATCHES "ID_LIKE=.*") + string(REGEX REPLACE "ID_LIKE=\"(.*)\"" "\\1" like ${l}) + endif () + + if (${l} MATCHES "ID=.*") + string(REGEX REPLACE "ID=\"(.*)\"" "\\1" id ${l}) + endif () + endforeach () + string(TOLOWER "${like}" like) + string(TOLOWER "${id}" id) + + if (("${id}" MATCHES "debian") OR ("${like}" MATCHES "debian") OR ("${id}" MATCHES "ubuntu") OR ("${like}" MATCHES "ubuntu")) + set(OS_DISTRIBUTOR "Debian") + elseif (("${id}" MATCHES "centos") OR ("${like}" MATCHES "centos")) + set(OS_DISTRIBUTOR "CentOS") + else () + message(WARNING "lsb_release in not installed") + set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") + endif () +else () + set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") +endif () + +message(STATUS "${id} detected (compatible with ${OS_DISTRIBUTOR})") + # set -latomic if OS is Raspbian. if (CMAKE_SYSTEM_PROCESSOR MATCHES "arm") set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -latomic") diff --git a/Jenkinsfile b/Jenkinsfile index 13b9edf5a70..0ede90e00e5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,8 +6,8 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils */ env.PROJECT='centreon-collect' def serie = '22.04' -def maintenanceBranch = "${serie}.x" -def qaBranch = "dev-${serie}.x" +def maintenanceBranch = "master" +def qaBranch = "develop" def buildBranch = env.BRANCH_NAME env.REF_BRANCH = '${serie}.x' if (env.CHANGE_BRANCH) { @@ -22,6 +22,7 @@ if (env.BRANCH_NAME.startsWith('release-')) { env.REPO = 'testing' } else if ((env.BRANCH_NAME == env.REF_BRANCH) || (env.BRANCH_NAME == maintenanceBranch)) { env.BUILD = 'REFERENCE' + env.REPO = 'testing' } else if ((env.BRANCH_NAME == 'develop') || (env.BRANCH_NAME == qaBranch)) { env.BUILD = 'QA' env.REPO = 'unstable' diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt index 5d5f9d34609..c01866d0662 100644 --- a/broker/CMakeLists.txt +++ b/broker/CMakeLists.txt @@ -52,6 +52,18 @@ if (WITH_ASAN) set (CMAKE_LINKER_FLAGS_DEBUG "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") endif () +# Default MySQL socket +if (OS_DISTRIBUTOR STREQUAL "Debian" OR OS_DISTRIBUTOR STREQUAL "Ubuntu") + message(STATUS "deb based os") + add_definitions("-DMYSQL_SOCKET=\"/var/run/mysqld/mysqld.sock\"") +elseif (OS_DISTRIBUTOR STREQUAL "CentOS" OR OS_DISTRIBUTOR STREQUAL "RedHat") + message(STATUS "rpm based os") + add_definitions("-DMYSQL_SOCKET=\"/var/lib/mysql/mysql.sock\"") +else () + message(STATUS "other os: ${OS_DISTRIBUTOR}") + add_definitions("-DMYSQL_SOCKET=/tmp/mysql.sock") +endif () + include_directories("${PROJECT_SOURCE_DIR}/core/inc") include_directories("${PROJECT_SOURCE_DIR}/neb/inc") include_directories("${CMAKE_SOURCE_DIR}/engine/inc") @@ -126,26 +138,6 @@ include(cmake/tool.cmake) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") add_definitions("-DASIO_STANDALONE") -# -# Get distributions name -# -if (CMAKE_SYSTEM_NAME STREQUAL "Linux") - message(STATUS "Attempting to determine OS distributor.") - execute_process(COMMAND "lsb_release" "--short" "--id" - RESULT_VARIABLE RETCODE - OUTPUT_VARIABLE OS_DISTRIBUTOR - ERROR_QUIET) - if (RETCODE EQUAL 0) - string(REGEX REPLACE "\n$" "" OS_DISTRIBUTOR "${OS_DISTRIBUTOR}") - else () - message(WARNING "lsb_release in not installed") - set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") - endif () -elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux") - set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") -endif () - - # # Options. # diff --git a/broker/core/src/database_config.cc b/broker/core/src/database_config.cc index 1c0802f1a6e..91e8049e6cc 100644 --- a/broker/core/src/database_config.cc +++ b/broker/core/src/database_config.cc @@ -100,7 +100,7 @@ database_config::database_config(config::endpoint const& cfg) { if (it != end) _socket = it->second; else - _socket = "/var/lib/mysql/mysql.sock"; + _socket = MYSQL_SOCKET; } else _socket = ""; diff --git a/ci/debian/rules b/ci/debian/rules index c38b55893bf..dcd9feb2d7a 100755 --- a/ci/debian/rules +++ b/ci/debian/rules @@ -29,3 +29,5 @@ override_dh_auto_build: override_dh_auto_install: cd build && \ make -j9 install DESTDIR=../debian/tmp-centreon-collect + +override_dh_dwz: diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index 406116f3a93..5c642ff06a2 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -195,22 +195,6 @@ else () set(GROUP "root") endif () -# Check OS distributor. -if (CMAKE_SYSTEM_NAME STREQUAL "Linux") - message(STATUS "Attempting to determine OS distributor.") - execute_process(COMMAND "lsb_release" "--short" "--id" - RESULT_VARIABLE RETCODE - OUTPUT_VARIABLE OS_DISTRIBUTOR - ERROR_QUIET) - if (RETCODE EQUAL 0) - string(REGEX REPLACE "\n$" "" OS_DISTRIBUTOR "${OS_DISTRIBUTOR}") - else () - set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") - endif () -elseif () - set(OS_DISTRIBUTOR "${CMAKE_SYSTEM_NAME}") -endif () - # Set startup script to auto if not define. if (NOT WITH_STARTUP_SCRIPT) set(WITH_STARTUP_SCRIPT "auto") From 5dabf06001546b3a9fd3609c3d6686f4b7a547fe Mon Sep 17 00:00:00 2001 From: Charles Gautier <33026375+chgautier@users.noreply.github.com> Date: Wed, 31 Aug 2022 09:19:21 +0200 Subject: [PATCH 2/4] chore(release): merge release 22.04.1 into 22.04.x (#349) * enh(broker/mysql_connection): unix socket is different on Debian or on centos7 REFS: MON-13926 * Mon 13562 sonar atoi 22.04.x.new (#290) * Mon 13562 sonar atoi (#277) * enh(chore): update readme.md * enh(sonar) : replace atoi/strtol/strtoull by abseil SimpleAtoi * chore(readme) : update readme.md in tests * enh(sonar): replace atoi/strtoull bu SimpleAtoi abseil Co-authored-by: David Boucher * chore(readme) : update readme * cleanup(broker/mysql_result): false is better than 0 in this context Co-authored-by: David Boucher * enh(ccc): new grpc client At the moment, we can execute: ccc -p 31001 # to see what the server is ccc -p 31001 -l # to see the available methods REFS: MON-13947 * fix(engine): when display_name is empty, it should be replaced by description or hostname REFS: MON-13968 * enh(tests): database credentials configurable from resource.robot (#284) (#291) * enh(tests): database credentials configurable from resource.robot * enh(tests): remove console.log * rror: no keyword Kill Broker found, almost works * enh(tests): database credentials configurable from db_variables.robot * fix(tests) : replace DBNAME_STORAGE by correct DBNAME * fix(tests): remove personnal passwords and add str * fix(tests): remove bad str in front of variables from .robot * enh(tests): seems to work. Neet to change my credentials * enh(tests): default passwords in db-variables.robot * fix(engine): checkable::name() conflicted with host::name() * Mon 13901 cbd multiargs 22.04.x (#293) * enh(broker) : cbd with multiargs, robot test to do. Debug is unused * robot tests still don't work * enh(cbd) : robot tests for this ticket and update doc * fix(tests): find log in timeout can read the first line * fix(tests): 4 robot tests pass * fix(tests): correct clang-format * fix(cbd): main with better comments * Mon 13562 sonar unsigned bitwise operator (#294) (#295) * enh(sonar): avoid unsigned bitwise operator * enh(gitignore): ignore database credentials in tests folder * enh(tests): untrack db_variables.robot * fix(tests): fix bad commit on Find in log with timeout * fix(tests): restore db_variables.robot * fix(tests): remove gitignore in tests folder * enh(sonar): ignore postgresql in bam folder (#296) (#298) A security issue easily fixable. REFS: MON-13562 * enh(sonar): change reserved name to a non reserved one (#297) (#301) * enh(sonar): change reserved name to a non reserved one * enh(sonar): change exp to expr * fix(tests): bad call on lambda * fix(sonar): good lambda call on this file * fix(broker/bam): downtimes on kpi can be more than one * enh(tests/bam): test on bam and ignored downtimes on kpi implemented * chore(doc): CHANGELOG updated * doc(tests): README updated REFS: MON-14091 * fix(broker): rebuild/remove graphs has come back REFS: MON-14092 * enh(ccc): ccc is functional * enh(tests/ccc): new tests added * fix(ccc): info are output on stderr * enh(ccc): help input messages added * enh(ccc): help on functions works * cleanup(ccc): comments added REFS: MON-14191 * MON-14166 fix bbdo compression nego (#316) REFS:MON-14166 * Mon 13562 sonar atoi external command (#302) (#318) #REFS: MON-13562 -replace atoi/strtoul by abseil::SimpleAtoi -robot tests : BEATOI11, BEATOI12, BEATOI13, BEATOI21, BEATOI22, BEATOI23 -unit tests : SSHOptions in connectors/ssh/tests/options.cc * fix(broker/engine): grpc api can be changed throught configuration. Otherwise it is fixed to localhost (#321) REFS: MON-13904 * fix(broker/bam): detection of downtime end was bad (#319) * fix(tests/bam): sometimes broker is too long to stop * fix(tests/summary): new options -f and -s * fix(conanfile): openssl updated REFS: MON-14091 * fix(engine): bad rebase fixed here * enh(sonar) : replace new by make_shared or make_unique (#322) (#326) #REFS : MON-14198 enh(sonar) : replace new by make_shared or make_unique when it's already a smart pointer * fix(robot): fix robot BEATOI tests 22.04 (#325) * fix(robot): fix robot BEATOI tests * enh(robot) : tag atoi * Mon 14198 sonar dynamic allocations (#327) (#328) #REFS: MON-14198 -replace str::dup by string or sabsl::stringview to avoid new and delete -remove unused code in broker.cc -fix clear broker logs so robot memory doesn't crash * fix(broker/rebuild): error in sql query REFS: MON-14092 * fix(broker/rebuild): creation date of rrd file too late REFS: MON-14092 * fix(ci): update dockerfile centos7 for python38 (#337) * fix(ci/debian): set shell to centreon-engine user (#334) REFS: MON-14363 * fix(ci): issue with conan fixed * fix(ci/scripts): conan bad path * enh(engine/anomalydetection): Enable recheck for anomaly-detection services REFS: MON-14158 * fix(ci/tests): dateutil missing for robot * fix(ci): correct syntax Co-authored-by: David Boucher Co-authored-by: denliA <91119589+denliA@users.noreply.github.com> Co-authored-by: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> Co-authored-by: rem31 <73845199+rem31@users.noreply.github.com> Co-authored-by: Luiz Costa --- CHANGELOG.md | 47 +- CMakeLists.txt | 1 + bbdo/CMakeLists.txt | 236 ++--- bbdo/events.hh | 2 +- broker/bam/postgresql_v2/mod_bam.sql | 30 - .../bam/postgresql_v2/mod_bam_ba_groups.sql | 12 - broker/bam/postgresql_v2/mod_bam_impacts.sql | 9 - .../mod_bam_poller_relations.sql | 10 - .../mod_bam_reporting_ba_events.sql | 17 - .../postgresql_v2/mod_bam_reporting_bv.sql | 12 - .../mod_bam_reporting_kpi_events.sql | 18 - broker/bam/postgresql_v3/cfg_bam.sql | 36 - .../bam/postgresql_v3/cfg_bam_ba_groups.sql | 13 - broker/bam/postgresql_v3/cfg_bam_ba_types.sql | 12 - broker/bam/postgresql_v3/cfg_bam_boolean.sql | 14 - broker/bam/postgresql_v3/cfg_bam_impacts.sql | 9 - .../mod_bam_reporting_ba_events.sql | 17 - .../postgresql_v3/mod_bam_reporting_bv.sql | 12 - .../mod_bam_reporting_kpi_events.sql | 18 - broker/bam/src/ba.cc | 30 +- .../bam/src/configuration/bool_expression.cc | 6 +- broker/bam/src/kpi_service.cc | 16 +- broker/bam/test/ba/kpi_service.cc | 1 + .../inc/com/centreon/broker/broker_impl.hh | 6 +- .../inc/com/centreon/broker/config/state.hh | 3 + .../inc/com/centreon/broker/mapping/entry.hh | 9 +- broker/core/precomp_inc/precomp.hpp | 2 + broker/core/src/bbdo/stream.cc | 2 +- broker/core/src/broker.proto | 2 +- broker/core/src/broker_impl.cc | 2 +- broker/core/src/brokerrpc.cc | 5 +- broker/core/src/config/parser.cc | 25 +- broker/core/src/config/state.cc | 25 +- broker/core/src/database/mysql_result.cc | 94 +- broker/core/src/database/mysql_stmt.cc | 20 +- broker/core/src/main.cc | 87 +- broker/core/test/config/parser.cc | 109 +++ broker/neb/CMakeLists.txt | 131 +-- broker/neb/precomp_inc/precomp.hpp | 1 + broker/neb/src/callbacks.cc | 111 ++- .../inc/com/centreon/broker/rrd/backend.hh | 3 +- .../rrd/inc/com/centreon/broker/rrd/cached.hh | 5 +- .../inc/com/centreon/broker/rrd/creator.hh | 3 +- broker/rrd/inc/com/centreon/broker/rrd/lib.hh | 3 +- broker/rrd/src/creator.cc | 96 +- broker/rrd/src/lib.cc | 6 +- broker/rrd/src/output.cc | 28 +- .../com/centreon/broker/storage/internal.hh | 4 +- .../com/centreon/broker/storage/rebuilder.hh | 2 +- broker/storage/src/conflict_manager.cc | 7 +- broker/storage/src/main.cc | 5 +- broker/storage/src/rebuilder.cc | 8 +- broker/test/rebuild_graphs.cc | 2 +- .../centreon/broker/unified_sql/internal.hh | 4 +- .../centreon/broker/unified_sql/rebuilder.hh | 2 +- .../com/centreon/broker/unified_sql/stream.hh | 5 +- broker/unified_sql/src/main.cc | 5 +- broker/unified_sql/src/rebuilder.cc | 8 +- broker/unified_sql/src/stream.cc | 42 +- broker/unified_sql/src/stream_storage.cc | 173 ++-- ccc/CMakeLists.txt | 55 ++ ccc/client.cc | 356 ++++++++ ccc/client.hh | 62 ++ ccc/main.cc | 165 ++++ ci/debian/centreon-engine.postinst | 2 +- ci/debian/control | 9 + .../Dockerfile.collect-centos7-dependencies | 4 +- .../Dockerfile.collect-debian11-dependencies | 4 +- ci/release/Jenkinsfile | 2 +- ci/scripts/collect-sources-analysis.sh | 1 + ci/scripts/collect-test-robot.sh | 9 +- ci/scripts/collect-unit-tests.sh | 7 +- .../inc/com/centreon/exceptions/msg_fmt.hh | 0 clib/inc/com/centreon/timestamp.hh | 5 + clib/src/timestamp.cc | 15 + conanfile.txt | 14 +- connectors/CMakeLists.txt | 1 + connectors/ssh/CMakeLists.txt | 2 +- connectors/ssh/src/orders/options.cc | 34 +- connectors/ssh/test/options.cc | 46 + engine/CMakeLists.txt | 388 ++++---- engine/enginerpc/engine_impl.cc | 6 +- engine/enginerpc/precomp_inc/precomp.hh | 1 + .../com/centreon/engine/anomalydetection.hh | 17 +- engine/inc/com/centreon/engine/broker.hh | 11 +- .../inc/com/centreon/engine/check_result.hh | 41 +- engine/inc/com/centreon/engine/checkable.hh | 17 +- .../inc/com/centreon/engine/checks/checker.hh | 24 +- .../com/centreon/engine/commands/command.hh | 60 +- .../com/centreon/engine/commands/commands.hh | 2 +- .../com/centreon/engine/commands/connector.hh | 4 +- .../com/centreon/engine/commands/forward.hh | 4 +- .../inc/com/centreon/engine/commands/raw.hh | 5 +- .../com/centreon/engine/commands/result.hh | 7 + engine/inc/com/centreon/engine/common.hh | 2 +- .../centreon/engine/configuration/state.hh | 12 +- engine/inc/com/centreon/engine/host.hh | 7 +- engine/inc/com/centreon/engine/log_v2.hh | 1 + engine/inc/com/centreon/engine/nebstructs.hh | 15 +- engine/inc/com/centreon/engine/notifier.hh | 3 +- engine/inc/com/centreon/engine/service.hh | 8 +- engine/inc/com/centreon/engine/utils.hh | 11 + .../external_commands/precomp_inc/precomp.hh | 1 + engine/precomp_inc/precomp.hh | 1 + engine/scripts/centengine.sh.in | 8 +- engine/src/anomalydetection.cc | 711 +++++++++------ engine/src/broker.cc | 127 +-- engine/src/broker/compatibility.cc | 2 +- engine/src/broker/handle.cc | 2 +- engine/src/broker/loader.cc | 2 +- engine/src/check_result.cc | 65 +- engine/src/checkable.cc | 23 +- engine/src/checks/checker.cc | 213 +++-- engine/src/command_manager.cc | 24 +- engine/src/commands/command.cc | 79 +- engine/src/commands/commands.cc | 162 +++- engine/src/commands/connector.cc | 30 +- engine/src/commands/forward.cc | 9 +- engine/src/commands/processing.cc | 61 +- engine/src/commands/raw.cc | 85 +- engine/src/commands/result.cc | 23 + engine/src/compatibility/logging.cc | 8 +- engine/src/configuration/anomalydetection.cc | 4 +- .../configuration/applier/anomalydetection.cc | 20 +- engine/src/configuration/applier/command.cc | 4 +- engine/src/configuration/applier/connector.cc | 4 +- .../src/configuration/applier/contactgroup.cc | 3 +- engine/src/configuration/applier/host.cc | 14 +- .../configuration/applier/hostescalation.cc | 4 +- engine/src/configuration/applier/hostgroup.cc | 4 +- engine/src/configuration/applier/scheduler.cc | 14 +- .../applier/serviceescalation.cc | 4 +- .../src/configuration/applier/servicegroup.cc | 4 +- .../src/configuration/applier/timeperiod.cc | 4 +- engine/src/configuration/host.cc | 4 +- engine/src/configuration/object.cc | 36 +- engine/src/configuration/service.cc | 4 +- engine/src/configuration/state.cc | 45 +- engine/src/contact.cc | 2 +- engine/src/downtimes/downtime_manager.cc | 4 +- engine/src/downtimes/host_downtime.cc | 32 +- engine/src/events/loop.cc | 7 +- engine/src/events/timed_event.cc | 4 +- engine/src/host.cc | 836 +++++++++--------- engine/src/hostdependency.cc | 5 +- engine/src/log_v2.cc | 18 +- engine/src/macros.cc | 2 +- engine/src/macros/grab_host.cc | 8 +- engine/src/macros/grab_value.cc | 24 +- engine/src/main.cc | 10 +- engine/src/notifier.cc | 357 ++++---- engine/src/retention/dump.cc | 6 +- engine/src/retention/object.cc | 18 +- engine/src/sehandlers.cc | 16 +- engine/src/service.cc | 769 ++++++++-------- engine/src/utils.cc | 12 +- engine/src/xpddefault.cc | 4 +- engine/src/xsddefault.cc | 2 +- engine/tests/CMakeLists.txt | 89 +- engine/tests/checks/anomalydetection.cc | 404 +++++---- engine/tests/commands/connector.cc | 11 +- engine/tests/commands/simple-command.cc | 99 ++- .../configuration/applier/applier-global.cc | 36 + .../configuration/applier/applier-host.cc | 12 +- engine/tests/helper.cc | 10 +- engine/tests/macros/macro_hostname.cc | 6 +- .../notifications/host_normal_notification.cc | 3 + packaging/rpm/centreon-collect.spec | 15 + sonar-project.properties | 2 +- tests/.gitignore | 24 - tests/README.md | 125 ++- tests/bam/inherited_downtime.robot | 184 +++- tests/broker-engine/anomaly-detection.robot | 90 ++ tests/broker-engine/bbdo-protobuf.robot | 10 +- tests/broker-engine/compression.robot | 3 +- tests/broker-engine/downtimes.robot | 1 + tests/broker-engine/external-commands.robot | 673 ++++++++------ tests/broker-engine/hostgroups.robot | 3 +- .../hosts-with-notes-and-actions.robot | 1 + tests/broker-engine/log-v2_engine.robot | 21 +- tests/broker-engine/output-tables.robot | 1 + .../broker-engine/retention-duplicates.robot | 25 +- tests/broker-engine/reverse-connection.robot | 19 +- tests/broker-engine/rrd-from-db.robot | 195 ++++ tests/broker-engine/rrd.robot | 14 +- tests/broker-engine/scheduler.robot | 3 +- tests/broker-engine/servicegroups.robot | 1 + tests/broker-engine/services-increased.robot | 1 + tests/broker-engine/start-stop.robot | 173 +++- tests/broker-engine/tags.robot | 1 + tests/broker-engine/tls.robot | 90 +- tests/broker/command-line.robot | 28 +- tests/broker/grpc-stream.robot | 6 +- tests/broker/sql.robot | 40 +- tests/broker/start-stop.robot | 6 +- tests/ccc/ccc.robot | 266 ++++++ .../conf_engine/central-module.json | 12 +- .../conf_engine/central-module.json | 10 +- tests/engine/forced_checks.robot | 8 +- tests/init-sql.sh | 17 +- tests/resources/Broker.py | 461 ++++++---- tests/resources/Common.py | 254 +++--- tests/resources/Engine.py | 255 ++++-- tests/resources/db_conf.py | 61 +- tests/resources/db_variables.robot | 12 + tests/resources/resources.robot | 37 +- tests/severities/severities.robot | 8 +- tests/summary.py | 50 +- 208 files changed, 7011 insertions(+), 3871 deletions(-) delete mode 100644 broker/bam/postgresql_v2/mod_bam.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_ba_groups.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_impacts.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_poller_relations.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_reporting_ba_events.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_reporting_bv.sql delete mode 100644 broker/bam/postgresql_v2/mod_bam_reporting_kpi_events.sql delete mode 100644 broker/bam/postgresql_v3/cfg_bam.sql delete mode 100644 broker/bam/postgresql_v3/cfg_bam_ba_groups.sql delete mode 100644 broker/bam/postgresql_v3/cfg_bam_ba_types.sql delete mode 100644 broker/bam/postgresql_v3/cfg_bam_boolean.sql delete mode 100644 broker/bam/postgresql_v3/cfg_bam_impacts.sql delete mode 100644 broker/bam/postgresql_v3/mod_bam_reporting_ba_events.sql delete mode 100644 broker/bam/postgresql_v3/mod_bam_reporting_bv.sql delete mode 100644 broker/bam/postgresql_v3/mod_bam_reporting_kpi_events.sql create mode 100644 ccc/CMakeLists.txt create mode 100644 ccc/client.cc create mode 100644 ccc/client.hh create mode 100644 ccc/main.cc rename {broker/neb => clib}/inc/com/centreon/exceptions/msg_fmt.hh (100%) create mode 100644 connectors/ssh/test/options.cc mode change 100755 => 100644 engine/src/macros/grab_host.cc delete mode 100644 tests/.gitignore create mode 100644 tests/broker-engine/anomaly-detection.robot create mode 100644 tests/broker-engine/rrd-from-db.robot create mode 100644 tests/ccc/ccc.robot create mode 100644 tests/resources/db_variables.robot diff --git a/CHANGELOG.md b/CHANGELOG.md index 719234ce0f1..818ac397d90 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,23 +2,46 @@ ## 22.04.1 +### ccc + +First version of ccc. Here is a client that can connect to broker or engine +through the gRPC server. Its goal is then to execute available methods on +these interfaces. At the moment, it checks the connection, tells if it was +established on engine or broker and is also able to list available methods. + ### Broker +#### Enhancements + +*grpc* + +The gRPC api only listens by default on localhost. This is customizable with +the configuration file. + #### Fixes +*rrd* + +Rebuilding/removing graphs is reenabled through database and a broker reload. + *main* -s option works and can return errors if bad value entered -*stream gRPC* +*GRPC stream* -A gRPC stream connector did not stop correctly on cbd stop. +* Doesn't coredump if connection fails on start process. +* The gRPC stream connector did not stop correctly on cbd stop. *BAM* On BAM misconfiguration, cbd could crash. This is fixed now. That was due to an issue in mysql code with promises handling. +In a BA configured to ignore its kpi downtimes, if a kpi represented by a +service has two overlapping downtimes applied. When the first one is cancelled, +it is as if all the downtimes are removed. This is fixed with this new version. + *Debian* Default configuration files were not installed on a Debian fresh install. @@ -29,13 +52,8 @@ tags are well removed now. Columns notes, notes\_url and action\_url are resized. -*Debian* - -Default configuration files were not installed on a Debian fresh install. - -*GRPC stream* - -Don't coredump if connection fail on process start +*Compression* +In the bbdo negotiation, compression was never activated #### Enhancements @@ -60,8 +78,19 @@ Packaging did not follow Debian good practices. ### Engine +#### Bugfixes + +*resources* + +The display\_name of resources could be emptied in several case of reload. + #### Enhancements +*grpc* + +The gRPC api only listens by default on localhost. This is customizable with +the configuration file. + *comments* They are sent only once to broker. diff --git a/CMakeLists.txt b/CMakeLists.txt index f3cfd2d29a1..386186d3c01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -129,6 +129,7 @@ add_subdirectory(broker) add_subdirectory(clib) add_subdirectory(engine) add_subdirectory(connectors) +add_subdirectory(ccc) add_custom_target(test-broker COMMAND tests/ut_broker diff --git a/bbdo/CMakeLists.txt b/bbdo/CMakeLists.txt index b07fe02ea96..8990add96cb 100644 --- a/bbdo/CMakeLists.txt +++ b/bbdo/CMakeLists.txt @@ -1,73 +1,73 @@ -## -## Copyright 2021 Centreon -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## -## For more information : contact@centreon.com -## +# # +# # Copyright 2021 Centreon +# # +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # http://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# # +# # For more information : contact@centreon.com +# # set(protobuf_files - rebuild_message - remove_graph_message - service - host - severity - tag - ) + rebuild_message + remove_graph_message + service + host + severity + tag +) foreach(name IN LISTS protobuf_files) - set(proto_file "${name}.proto") - set(full_proto_file "${CMAKE_SOURCE_DIR}/bbdo/${name}.proto") - add_custom_command(OUTPUT "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h" - DEPENDS ${full_proto_file} - COMMENT "Generating interface files of the bbdo file ${proto_file}" - COMMAND ${Protobuf_PROTOC_EXECUTABLE} - ARGS --cpp_out=${CMAKE_SOURCE_DIR}/bbdo --proto_path=${CMAKE_SOURCE_DIR}/bbdo ${proto_file} - VERBATIM - ) + set(proto_file "${name}.proto") + set(full_proto_file "${CMAKE_SOURCE_DIR}/bbdo/${name}.proto") + add_custom_command(OUTPUT "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h" + DEPENDS ${full_proto_file} + COMMENT "Generating interface files of the bbdo file ${proto_file}" + COMMAND ${Protobuf_PROTOC_EXECUTABLE} + ARGS --cpp_out=${CMAKE_SOURCE_DIR}/bbdo --proto_path=${CMAKE_SOURCE_DIR}/bbdo ${proto_file} + VERBATIM + ) - add_custom_target("target_${name}" DEPENDS "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h") + add_custom_target("target_${name}" DEPENDS "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h") endforeach() add_library( - pb_service_lib STATIC - service.pb.cc - service.pb.h - ) -add_dependencies(pb_service_lib target_service) + pb_service_lib STATIC + service.pb.cc + service.pb.h +) +add_dependencies(pb_service_lib target_service pb_tag_lib) set_target_properties(pb_service_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) add_library( - pb_host_lib STATIC - host.pb.cc - host.pb.h - ) + pb_host_lib STATIC + host.pb.cc + host.pb.h +) add_dependencies(pb_host_lib target_host) set_target_properties(pb_host_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) add_library( - pb_severity_lib STATIC - severity.pb.cc - severity.pb.h - ) + pb_severity_lib STATIC + severity.pb.cc + severity.pb.h +) add_dependencies(pb_severity_lib target_host) set_target_properties(pb_severity_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) add_library( - pb_tag_lib STATIC - tag.pb.cc - tag.pb.h - ) + pb_tag_lib STATIC + tag.pb.cc + tag.pb.h +) add_dependencies(pb_tag_lib target_host) set_target_properties(pb_tag_lib PROPERTIES POSITION_INDEPENDENT_CODE ON) @@ -75,87 +75,87 @@ macro(get_protobuf_files name) set_source_files_properties("${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" PROPERTIES GENERATED TRUE) set_source_files_properties("${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h" PROPERTIES GENERATED TRUE) set(proto_${name} - "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" - "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h") + "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.cc" + "${CMAKE_SOURCE_DIR}/bbdo/${name}.pb.h") endmacro() macro(get_protobuf_accessor name) - set(proto_file "${name}.proto") - set(full_proto_file "${CMAKE_SOURCE_DIR}/bbdo/${name}.proto") - add_custom_command(OUTPUT "${CMAKE_SOURCE_DIR}/bbdo/${name}_accessor.hh" - DEPENDS ${full_proto_file} - COMMENT "Generating accessor to protobuf message ${proto_file}" - COMMAND python3 - ARGS ${full_proto_file} - VERBATIM - ) + set(proto_file "${name}.proto") + set(full_proto_file "${CMAKE_SOURCE_DIR}/bbdo/${name}.proto") + add_custom_command(OUTPUT "${CMAKE_SOURCE_DIR}/bbdo/${name}_accessor.hh" + DEPENDS ${full_proto_file} + COMMENT "Generating accessor to protobuf message ${proto_file}" + COMMAND python3 + ARGS ${full_proto_file} + VERBATIM + ) endmacro() include_directories("${CMAKE_SOURCE_DIR}/broker/core/inc") add_library( - bbdo_bbdo STATIC - "bbdo/ack.cc" - "bbdo/version_response.cc" - "bbdo/stop.cc" - "bbdo/ack.hh" - "bbdo/version_response.hh" - "bbdo/stop.hh" - ) + bbdo_bbdo STATIC + "bbdo/ack.cc" + "bbdo/version_response.cc" + "bbdo/stop.cc" + "bbdo/ack.hh" + "bbdo/version_response.hh" + "bbdo/stop.hh" +) set_target_properties(bbdo_bbdo PROPERTIES POSITION_INDEPENDENT_CODE ON) target_precompile_headers(bbdo_bbdo PRIVATE precomp_inc/precomp.hpp) add_library( - bbdo_storage STATIC - "storage/index_mapping.cc" - "storage/metric_mapping.cc" - "storage/metric.cc" - "storage/rebuild.cc" - "storage/remove_graph.cc" - "storage/status.cc" - "storage/metric.hh" - "storage/rebuild.hh" - "storage/remove_graph.hh" - "storage/status.hh" - ) + bbdo_storage STATIC + "storage/index_mapping.cc" + "storage/metric_mapping.cc" + "storage/metric.cc" + "storage/rebuild.cc" + "storage/remove_graph.cc" + "storage/status.cc" + "storage/metric.hh" + "storage/rebuild.hh" + "storage/remove_graph.hh" + "storage/status.hh" +) set_target_properties(bbdo_storage PROPERTIES POSITION_INDEPENDENT_CODE ON) -target_precompile_headers(bbdo_storage PRIVATE precomp_inc/precomp.hpp) +target_precompile_headers(bbdo_storage REUSE_FROM bbdo_bbdo) add_dependencies(bbdo_storage table_max_size) add_library( - bbdo_bam STATIC - "bam/ba_duration_event.cc" - "bam/dimension_ba_bv_relation_event.hh" - "bam/dimension_kpi_event.cc" - "bam/dimension_timeperiod.hh" - "bam/kpi_status.cc" - "bam/ba_duration_event.hh" - "bam/dimension_ba_event.cc" - "bam/dimension_kpi_event.hh" - "bam/dimension_truncate_table_signal.cc" - "bam/kpi_status.hh" - "bam/ba_event.cc" - "bam/dimension_ba_event.hh" - "bam/dimension_timeperiod.cc" - "bam/dimension_truncate_table_signal.hh" - "bam/rebuild.cc" - "bam/ba_event.hh" - "bam/dimension_ba_timeperiod_relation.cc" - "bam/dimension_timeperiod_exception.cc" - "bam/inherited_downtime.cc" - "bam/rebuild.hh" - "bam/ba_status.cc" - "bam/dimension_ba_timeperiod_relation.hh" - "bam/dimension_timeperiod_exception.hh" - "bam/inherited_downtime.hh" - "bam/ba_status.hh" - "bam/dimension_bv_event.cc" - "bam/dimension_timeperiod_exclusion.cc" - "bam/kpi_event.cc" - "bam/dimension_ba_bv_relation_event.cc" - "bam/dimension_bv_event.hh" - "bam/dimension_timeperiod_exclusion.hh" - "bam/kpi_event.hh" - ) + bbdo_bam STATIC + "bam/ba_duration_event.cc" + "bam/dimension_ba_bv_relation_event.hh" + "bam/dimension_kpi_event.cc" + "bam/dimension_timeperiod.hh" + "bam/kpi_status.cc" + "bam/ba_duration_event.hh" + "bam/dimension_ba_event.cc" + "bam/dimension_kpi_event.hh" + "bam/dimension_truncate_table_signal.cc" + "bam/kpi_status.hh" + "bam/ba_event.cc" + "bam/dimension_ba_event.hh" + "bam/dimension_timeperiod.cc" + "bam/dimension_truncate_table_signal.hh" + "bam/rebuild.cc" + "bam/ba_event.hh" + "bam/dimension_ba_timeperiod_relation.cc" + "bam/dimension_timeperiod_exception.cc" + "bam/inherited_downtime.cc" + "bam/rebuild.hh" + "bam/ba_status.cc" + "bam/dimension_ba_timeperiod_relation.hh" + "bam/dimension_timeperiod_exception.hh" + "bam/inherited_downtime.hh" + "bam/ba_status.hh" + "bam/dimension_bv_event.cc" + "bam/dimension_timeperiod_exclusion.cc" + "bam/kpi_event.cc" + "bam/dimension_ba_bv_relation_event.cc" + "bam/dimension_bv_event.hh" + "bam/dimension_timeperiod_exclusion.hh" + "bam/kpi_event.hh" +) set_target_properties(bbdo_bam PROPERTIES POSITION_INDEPENDENT_CODE ON) -target_precompile_headers(bbdo_bam PRIVATE precomp_inc/precomp.hpp) +target_precompile_headers(bbdo_bam REUSE_FROM bbdo_bbdo) add_dependencies(bbdo_bam table_max_size) diff --git a/bbdo/events.hh b/bbdo/events.hh index 67a8ce1ab3d..53ddda28905 100644 --- a/bbdo/events.hh +++ b/bbdo/events.hh @@ -84,7 +84,7 @@ enum data_element { de_version_response = 1, de_ack = 2, de_stop = 3, - de_rebuild_rrd_graphs = 4, + de_rebuild_graphs = 4, de_remove_graphs = 5, }; } diff --git a/broker/bam/postgresql_v2/mod_bam.sql b/broker/bam/postgresql_v2/mod_bam.sql deleted file mode 100644 index 4b146b464d8..00000000000 --- a/broker/bam/postgresql_v2/mod_bam.sql +++ /dev/null @@ -1,30 +0,0 @@ --- --- Business Activities. --- -CREATE TABLE mod_bam ( - ba_id serial, - name varchar(254) default NULL, - state_source int default NULL, - - description varchar(254) default NULL, - level_w float default NULL, - level_c float default NULL, - sla_month_percent_w float default NULL, - sla_month_percent_c float default NULL, - sla_month_duration_w int default NULL, - sla_month_duration_c int default NULL, - current_level float default NULL, - downtime float default NULL, - acknowledged float default NULL, - activate enum('1','0') NOT NULL default '0', - last_state_change int default NULL, - current_status smallint default NULL, - in_downtime boolean default NULL, - must_be_rebuild enum('0', '1', '2') NOT NULL default '0', - id_reporting_period int default NULL, - - PRIMARY KEY (ba_id), - UNIQUE (name), - FOREIGN KEY (id_reporting_period) REFERENCES timeperiod (tp_id) - ON DELETE SET NULL -); diff --git a/broker/bam/postgresql_v2/mod_bam_ba_groups.sql b/broker/bam/postgresql_v2/mod_bam_ba_groups.sql deleted file mode 100644 index b403430175c..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_ba_groups.sql +++ /dev/null @@ -1,12 +0,0 @@ --- --- BA Groups (aka BV). --- -CREATE TABLE mod_bam_ba_groups ( - id_ba_group serial, - - ba_group_name varchar(255) default NULL, - ba_group_description varchar(255) default NULL, - visible enum('0', '1') NOT NULL default '1', - - PRIMARY KEY (id_ba_group) -); diff --git a/broker/bam/postgresql_v2/mod_bam_impacts.sql b/broker/bam/postgresql_v2/mod_bam_impacts.sql deleted file mode 100644 index 115d2f00f97..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_impacts.sql +++ /dev/null @@ -1,9 +0,0 @@ --- --- Impacts of KPI / boolean expressions. --- -CREATE TABLE mod_bam_impacts ( - id_impact serial, - impact float NOT NULL, - - PRIMARY KEY (id_impact) -); diff --git a/broker/bam/postgresql_v2/mod_bam_poller_relations.sql b/broker/bam/postgresql_v2/mod_bam_poller_relations.sql deleted file mode 100644 index f6906805c55..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_poller_relations.sql +++ /dev/null @@ -1,10 +0,0 @@ --- --- BA/poller relation table. --- -CREATE TABLE mod_bam_poller_relations ( - ba_id int NOT NULL, - poller_id int NOT NULL, - - FOREIGN KEY (ba_id) REFERENCES mod_bam (ba_id) - ON DELETE CASCADE -) ENGINE=InnoDB CHARACTER SET utf8; diff --git a/broker/bam/postgresql_v2/mod_bam_reporting_ba_events.sql b/broker/bam/postgresql_v2/mod_bam_reporting_ba_events.sql deleted file mode 100644 index efe6e9e71a6..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_reporting_ba_events.sql +++ /dev/null @@ -1,17 +0,0 @@ --- --- BA events. --- -CREATE TABLE mod_bam_reporting_ba_events ( - ba_event_id serial, - ba_id int NOT NULL, - start_time int NOT NULL, - - first_level double default NULL, - end_time int default NULL, - status smallint default NULL, - in_downtime boolean default NULL, - - PRIMARY KEY (ba_event_id), - KEY (ba_id, start_time), - KEY (ba_id, end_time) -); diff --git a/broker/bam/postgresql_v2/mod_bam_reporting_bv.sql b/broker/bam/postgresql_v2/mod_bam_reporting_bv.sql deleted file mode 100644 index f1bda16dbdc..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_reporting_bv.sql +++ /dev/null @@ -1,12 +0,0 @@ --- --- Business Views. --- -CREATE TABLE mod_bam_reporting_bv ( - bv_id serial, - bv_name varchar(45) default NULL, - - bv_description text default NULL, - - PRIMARY KEY (bv_id), - UNIQUE (bv_name) -); diff --git a/broker/bam/postgresql_v2/mod_bam_reporting_kpi_events.sql b/broker/bam/postgresql_v2/mod_bam_reporting_kpi_events.sql deleted file mode 100644 index 9cd4fc54a40..00000000000 --- a/broker/bam/postgresql_v2/mod_bam_reporting_kpi_events.sql +++ /dev/null @@ -1,18 +0,0 @@ --- --- KPI events. --- -CREATE TABLE mod_bam_reporting_kpi_events ( - kpi_event_id serial, - kpi_id int NOT NULL, - start_time int NOT NULL, - - end_time int default NULL, - status smallint default NULL, - in_downtime boolean default NULL, - impact_level smallint default NULL, - first_output text default NULL, - first_perfdata varchar(45) default NULL, - - PRIMARY KEY (kpi_event_id), - KEY (kpi_id, start_time) -); diff --git a/broker/bam/postgresql_v3/cfg_bam.sql b/broker/bam/postgresql_v3/cfg_bam.sql deleted file mode 100644 index 0acfe7599a0..00000000000 --- a/broker/bam/postgresql_v3/cfg_bam.sql +++ /dev/null @@ -1,36 +0,0 @@ --- --- Business Activities. --- -CREATE TABLE cfg_bam ( - ba_id serial, - name varchar(254) default NULL, - state_source int default NULL, - - description varchar(254) default NULL, - level_w float default NULL, - level_c float default NULL, - sla_month_percent_w float default NULL, - sla_month_percent_c float default NULL, - sla_month_duration_w int default NULL, - sla_month_duration_c int default NULL, - current_level float default NULL, - downtime float default NULL, - acknowledged float default NULL, - activate enum('1','0') NOT NULL default '0', - last_state_change int default NULL, - current_status smallint default NULL, - in_downtime boolean default NULL, - must_be_rebuild enum('0', '1', '2') NOT NULL default '0', - id_reporting_period int default NULL, - ba_type_id int NOT NULL, - organization_id int NOT NULL, - - PRIMARY KEY (ba_id), - UNIQUE (name), - FOREIGN KEY (id_reporting_period) REFERENCES timeperiod (tp_id) - ON DELETE SET NULL, - FOREIGN KEY (ba_type_id) REFERENCES cfg_bam_ba_types (ba_type_id) - ON DELETE RESTRICT, - FOREIGN KEY (organization_id) REFERENCES cfg_organizations (organization_id) - ON DELETE CASCADE -); diff --git a/broker/bam/postgresql_v3/cfg_bam_ba_groups.sql b/broker/bam/postgresql_v3/cfg_bam_ba_groups.sql deleted file mode 100644 index 45b316d244d..00000000000 --- a/broker/bam/postgresql_v3/cfg_bam_ba_groups.sql +++ /dev/null @@ -1,13 +0,0 @@ --- --- BA Groups (aka BV). --- -CREATE TABLE cfg_bam_ba_groups ( - id_ba_group serial, - ba_group_name varchar(255) NOT NULL, - - ba_group_description varchar(255) default NULL, - visible enum('0', '1') NOT NULL default '1', - - PRIMARY KEY (id_ba_group), - UNIQUE (ba_group_name) -); diff --git a/broker/bam/postgresql_v3/cfg_bam_ba_types.sql b/broker/bam/postgresql_v3/cfg_bam_ba_types.sql deleted file mode 100644 index ae62570fd75..00000000000 --- a/broker/bam/postgresql_v3/cfg_bam_ba_types.sql +++ /dev/null @@ -1,12 +0,0 @@ --- --- Business Activities types. --- -CREATE TABLE cfg_bam_ba_types ( - ba_type_id serial, - name varchar(255) default NULL, - slug varchar(255) default NULL, - description varchar(255) default NULL, - - PRIMARY KEY (ba_type_id), - KEY (name) -); diff --git a/broker/bam/postgresql_v3/cfg_bam_boolean.sql b/broker/bam/postgresql_v3/cfg_bam_boolean.sql deleted file mode 100644 index c6a7e96dd09..00000000000 --- a/broker/bam/postgresql_v3/cfg_bam_boolean.sql +++ /dev/null @@ -1,14 +0,0 @@ --- --- BAM boolean expressions. --- -CREATE TABLE cfg_bam_boolean ( - boolean_id serial, - name varchar(255) NOT NULL, - - expression text NOT NULL, - bool_state boolean NOT NULL default 1, - activate boolean NOT NULL default 0, - - PRIMARY KEY (boolean_id), - UNIQUE (name) -); diff --git a/broker/bam/postgresql_v3/cfg_bam_impacts.sql b/broker/bam/postgresql_v3/cfg_bam_impacts.sql deleted file mode 100644 index 168fe9b7bf1..00000000000 --- a/broker/bam/postgresql_v3/cfg_bam_impacts.sql +++ /dev/null @@ -1,9 +0,0 @@ --- --- Impacts of KPI / boolean expressions. --- -CREATE TABLE cfg_bam_impacts ( - id_impact serial, - impact float NOT NULL, - - PRIMARY KEY (id_impact) -); diff --git a/broker/bam/postgresql_v3/mod_bam_reporting_ba_events.sql b/broker/bam/postgresql_v3/mod_bam_reporting_ba_events.sql deleted file mode 100644 index d892312875e..00000000000 --- a/broker/bam/postgresql_v3/mod_bam_reporting_ba_events.sql +++ /dev/null @@ -1,17 +0,0 @@ --- --- BA events. --- -CREATE TABLE mod_bam_reporting_ba_events ( - ba_event_id serial, - ba_id int NOT NULL, - start_time int NOT NULL, - - first_level double default NULL, - end_time int default NULL, - status smallint default NULL, - in_downtime boolean default NULL, - - PRIMARY KEY (ba_event_id), - UNIQUE (ba_id, start_time), - KEY (ba_id, end_time) -); diff --git a/broker/bam/postgresql_v3/mod_bam_reporting_bv.sql b/broker/bam/postgresql_v3/mod_bam_reporting_bv.sql deleted file mode 100644 index f1bda16dbdc..00000000000 --- a/broker/bam/postgresql_v3/mod_bam_reporting_bv.sql +++ /dev/null @@ -1,12 +0,0 @@ --- --- Business Views. --- -CREATE TABLE mod_bam_reporting_bv ( - bv_id serial, - bv_name varchar(45) default NULL, - - bv_description text default NULL, - - PRIMARY KEY (bv_id), - UNIQUE (bv_name) -); diff --git a/broker/bam/postgresql_v3/mod_bam_reporting_kpi_events.sql b/broker/bam/postgresql_v3/mod_bam_reporting_kpi_events.sql deleted file mode 100644 index e5757d6026f..00000000000 --- a/broker/bam/postgresql_v3/mod_bam_reporting_kpi_events.sql +++ /dev/null @@ -1,18 +0,0 @@ --- --- KPI events. --- -CREATE TABLE mod_bam_reporting_kpi_events ( - kpi_event_id serial, - kpi_id int NOT NULL, - start_time int NOT NULL, - - end_time int default NULL, - status smallint default NULL, - in_downtime boolean default NULL, - impact_level smallint default NULL, - first_output text default NULL, - first_perfdata varchar(45) default NULL, - - PRIMARY KEY (kpi_event_id), - UNIQUE (kpi_id, start_time) -); diff --git a/broker/bam/src/ba.cc b/broker/bam/src/ba.cc index 7cfbfd6e87a..d67136d100c 100644 --- a/broker/bam/src/ba.cc +++ b/broker/bam/src/ba.cc @@ -1,5 +1,5 @@ /* -** Copyright 2014-2016, 2021 Centreon +** Copyright 2014-2016, 2021-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -40,19 +40,6 @@ auto normalize = [](double d) -> double { return d; }; -static bool _every_kpi_in_dt( - std::unordered_map& imp) { - if (imp.empty()) - return false; - - for (auto it = imp.begin(), end = imp.end(); it != end; ++it) { - if (!it->first->in_downtime()) - return false; - } - - return true; -} - /** * Constructor. * @@ -312,6 +299,19 @@ state ba::get_state_hard() { return state_ok; }; + auto every_kpi_in_dt = + [](std::unordered_map& imp) -> bool { + if (imp.empty()) + return false; + + for (auto it = imp.begin(), end = imp.end(); it != end; ++it) { + if (!it->first->in_downtime()) + return false; + } + + return true; + }; + switch (_state_source) { case configuration::ba::state_source_impact: if (!_valid) @@ -326,7 +326,7 @@ state ba::get_state_hard() { case configuration::ba::state_source_best: case configuration::ba::state_source_worst: if (_dt_behaviour == configuration::ba::dt_ignore_kpi && - _every_kpi_in_dt(_impacts)) + every_kpi_in_dt(_impacts)) state = state_ok; else state = _computed_hard_state; diff --git a/broker/bam/src/configuration/bool_expression.cc b/broker/bam/src/configuration/bool_expression.cc index 5adb7774459..86a44b14ed3 100644 --- a/broker/bam/src/configuration/bool_expression.cc +++ b/broker/bam/src/configuration/bool_expression.cc @@ -141,10 +141,10 @@ void bool_expression::set_name(std::string const& name) { /** * Set expression * - * @param[in] exp Set the textual value for the expression. + * @param[in] expr Set the textual value for the expression. */ -void bool_expression::set_expression(std::string const& exp) { - _expression = exp; +void bool_expression::set_expression(std::string const& expr) { + _expression = expr; } /** diff --git a/broker/bam/src/kpi_service.cc b/broker/bam/src/kpi_service.cc index de52b9ae22e..beed8334fb8 100644 --- a/broker/bam/src/kpi_service.cc +++ b/broker/bam/src/kpi_service.cc @@ -1,5 +1,5 @@ /* -** Copyright 2014-2015, 2021 Centreon +** Copyright 2014-2015, 2021-2022 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. @@ -181,7 +181,7 @@ bool kpi_service::is_acknowledged() const { * @param[out] visitor Object that will receive events. */ void kpi_service::service_update( - std::shared_ptr const& status, + const std::shared_ptr& status, io::stream* visitor) { if (status && status->host_id == _host_id && status->service_id == _service_id) { @@ -298,18 +298,21 @@ void kpi_service::service_update( * @param[in] dt * @param[out] visitor Object that will receive events. */ -void kpi_service::service_update(std::shared_ptr const& dt, +void kpi_service::service_update(const std::shared_ptr& dt, io::stream* visitor) { assert(dt && dt->host_id == _host_id && dt->service_id == _service_id); // Update information. - _downtimed = dt->was_started && dt->actual_end_time.is_null(); - if (_downtime_ids.contains(dt->internal_id) && !dt->was_cancelled) { + bool downtimed = dt->was_started && dt->actual_end_time.is_null(); + if (!_downtimed && downtimed) + _downtimed = true; + + if (_downtime_ids.contains(dt->internal_id) && dt->deletion_time.is_null()) { log_v2::bam()->trace("Downtime {} already handled in this kpi service", dt->internal_id); return; } - if (_downtimed) { + if (downtimed) { log_v2::bam()->trace("adding in kpi service the impacting downtime {}", dt->internal_id); _downtime_ids.insert(dt->internal_id); @@ -317,6 +320,7 @@ void kpi_service::service_update(std::shared_ptr const& dt, log_v2::bam()->trace("removing from kpi service the impacting downtime {}", dt->internal_id); _downtime_ids.erase(dt->internal_id); + _downtimed = !_downtime_ids.empty(); } if (!_event || _event->in_downtime != _downtimed) { diff --git a/broker/bam/test/ba/kpi_service.cc b/broker/bam/test/ba/kpi_service.cc index eaab1cf37ad..cfe27c45fbd 100644 --- a/broker/bam/test/ba/kpi_service.cc +++ b/broker/bam/test/ba/kpi_service.cc @@ -968,6 +968,7 @@ TEST_F(BamBA, KpiServiceDt) { std::cout << "service_update 1" << std::endl; kpis[0]->service_update(dt, _visitor.get()); + dt->deletion_time = now + 2 + 10 * i + 5; dt->actual_end_time = now + 2 + 10 * i + 5; dt->was_cancelled = true; std::cout << "service_update 2" << std::endl; diff --git a/broker/core/inc/com/centreon/broker/broker_impl.hh b/broker/core/inc/com/centreon/broker/broker_impl.hh index bba0f60efb5..06ea8cc3f16 100644 --- a/broker/core/inc/com/centreon/broker/broker_impl.hh +++ b/broker/core/inc/com/centreon/broker/broker_impl.hh @@ -28,11 +28,11 @@ CCB_BEGIN() /** - * Here is a declaration of pb_rebuild_rrd_graphs which is a bbdo event we use + * Here is a declaration of pb_rebuild_graphs which is a bbdo event we use * to ask rebuild of metrics. MetricIds is a vector of metric ids to rebuild. */ namespace bbdo { -using pb_rebuild_rrd_graphs = - io::protobuf; +using pb_rebuild_graphs = + io::protobuf; using pb_remove_graphs = io::protobuf; } // namespace bbdo diff --git a/broker/core/inc/com/centreon/broker/config/state.hh b/broker/core/inc/com/centreon/broker/config/state.hh index 141566c18a8..b82d2ea4ce4 100644 --- a/broker/core/inc/com/centreon/broker/config/state.hh +++ b/broker/core/inc/com/centreon/broker/config/state.hh @@ -37,6 +37,7 @@ namespace config { class state { int _broker_id; uint16_t _rpc_port; + std::string _listen_address; std::string _broker_name; std::tuple _bbdo_version; std::string _cache_directory; @@ -72,6 +73,8 @@ class state { int broker_id() const noexcept; void rpc_port(uint16_t port) noexcept; uint16_t rpc_port(void) const noexcept; + void listen_address(const std::string& listen_address) noexcept; + const std::string& listen_address() const noexcept; void broker_name(std::string const& name); const std::string& broker_name() const noexcept; void bbdo_version(std::tuple&& v); diff --git a/broker/core/inc/com/centreon/broker/mapping/entry.hh b/broker/core/inc/com/centreon/broker/mapping/entry.hh index 052c4fad865..c194b6a8fa4 100644 --- a/broker/core/inc/com/centreon/broker/mapping/entry.hh +++ b/broker/core/inc/com/centreon/broker/mapping/entry.hh @@ -239,14 +239,7 @@ class entry { other._source = nullptr; } - ~entry() noexcept { - // This is not the better fix, but entries are static objects - // only destroyed at the end of the program. - // if (_source) { - // delete _source; - // _source = nullptr; - // } - } + ~entry() noexcept = default; entry& operator=(entry const&) = delete; uint32_t get_attribute() const { return _attribute; } bool get_bool(const io::data& d) const; diff --git a/broker/core/precomp_inc/precomp.hpp b/broker/core/precomp_inc/precomp.hpp index 67327c37310..0141c8a79d1 100644 --- a/broker/core/precomp_inc/precomp.hpp +++ b/broker/core/precomp_inc/precomp.hpp @@ -58,4 +58,6 @@ #include +#include + #endif diff --git a/broker/core/src/bbdo/stream.cc b/broker/core/src/bbdo/stream.cc index 48424939e8e..9de6b3a0dac 100644 --- a/broker/core/src/bbdo/stream.cc +++ b/broker/core/src/bbdo/stream.cc @@ -809,7 +809,7 @@ void stream::negotiate(stream::negotiation_type neg) { proto_it = io::protocols::instance().begin(), proto_end = io::protocols::instance().end(); proto_it != proto_end; ++proto_it) - if (proto_it->first == ext->name()) { + if (boost::iequals(proto_it->first, ext->name())) { std::shared_ptr s{ proto_it->second.endpntfactry->new_stream( _substream, neg == negotiate_second, ext->options())}; diff --git a/broker/core/src/broker.proto b/broker/core/src/broker.proto index 80e77f5c639..b9ccb073dcd 100644 --- a/broker/core/src/broker.proto +++ b/broker/core/src/broker.proto @@ -146,7 +146,7 @@ message BrokerStats { } message IndexIds { - repeated uint64 index_id = 1; + repeated uint64 index_ids = 1; } message ToRemove { diff --git a/broker/core/src/broker_impl.cc b/broker/core/src/broker_impl.cc index 963b7dd842e..d166f4e44d3 100644 --- a/broker/core/src/broker_impl.cc +++ b/broker/core/src/broker_impl.cc @@ -238,7 +238,7 @@ grpc::Status broker_impl::RebuildRRDGraphs(grpc::ServerContext* context ::google::protobuf::Empty* response __attribute__((unused))) { multiplexing::publisher pblshr; - auto e{std::make_shared(*request)}; + auto e{std::make_shared(*request)}; pblshr.write(e); return grpc::Status::OK; } diff --git a/broker/core/src/brokerrpc.cc b/broker/core/src/brokerrpc.cc index decfa24fb50..edc5088bedd 100644 --- a/broker/core/src/brokerrpc.cc +++ b/broker/core/src/brokerrpc.cc @@ -39,9 +39,8 @@ brokerrpc::brokerrpc(const std::string& address, /* Lets' register the rebuild_metrics bbdo event. This is needed to send the * rebuild message. */ - e.register_event(make_type(io::bbdo, bbdo::de_rebuild_rrd_graphs), - "rebuild_rrd_graphs", - &bbdo::pb_rebuild_rrd_graphs::operations); + e.register_event(make_type(io::bbdo, bbdo::de_rebuild_graphs), + "rebuild_graphs", &bbdo::pb_rebuild_graphs::operations); /* Lets' register the to_remove bbdo event.*/ e.register_event(make_type(io::bbdo, bbdo::de_remove_graphs), diff --git a/broker/core/src/config/parser.cc b/broker/core/src/config/parser.cc index 0cb0ddfc6a8..7c8a3a03a3a 100644 --- a/broker/core/src/config/parser.cc +++ b/broker/core/src/config/parser.cc @@ -128,10 +128,27 @@ state parser::parse(std::string const& file) { &json::is_number, &json::get)) ; else if (it.key() == "grpc" && it.value().is_object()) { - if (json_document["centreonBroker"]["grpc"]["rpc_port"].is_number()) { - retval.rpc_port(static_cast( - json_document["centreonBroker"]["grpc"]["rpc_port"] - .get())); + if (json_document["centreonBroker"]["grpc"].contains("rpc_port")) { + if (json_document["centreonBroker"]["grpc"]["rpc_port"] + .is_number()) { + retval.rpc_port(static_cast( + json_document["centreonBroker"]["grpc"]["rpc_port"] + .get())); + } else + throw msg_fmt( + "The rpc_port value in the grpc object should be an integer"); + } + if (json_document["centreonBroker"]["grpc"].contains( + "listen_address")) { + if (json_document["centreonBroker"]["grpc"]["listen_address"] + .is_string()) { + retval.listen_address( + json_document["centreonBroker"]["grpc"]["listen_address"] + .get()); + } else + throw msg_fmt( + "The listen_address value in the grpc object should be a " + "string"); } } else if (it.key() == "bbdo_version" && it.value().is_string()) { std::string version = json_document["centreonBroker"]["bbdo_version"] diff --git a/broker/core/src/config/state.cc b/broker/core/src/config/state.cc index 9db297abff1..b9359d4a8bc 100644 --- a/broker/core/src/config/state.cc +++ b/broker/core/src/config/state.cc @@ -45,6 +45,7 @@ state::state() state::state(const state& other) : _broker_id(other._broker_id), _rpc_port(other._rpc_port), + _listen_address{other._listen_address}, _broker_name(other._broker_name), _cache_directory(other._cache_directory), _command_file(other._command_file), @@ -74,6 +75,7 @@ state& state::operator=(state const& other) { if (this != &other) { _broker_id = other._broker_id; _rpc_port = other._rpc_port; + _listen_address = other._listen_address; _broker_name = other._broker_name; _cache_directory = other._cache_directory; _command_file = other._command_file; @@ -96,6 +98,7 @@ state& state::operator=(state const& other) { void state::clear() { _broker_id = 0; _rpc_port = 0; + _listen_address.resize(0); _broker_name.clear(); _cache_directory.clear(); _command_file.clear(); @@ -397,10 +400,30 @@ std::string const& state::poller_name() const noexcept { void state::rpc_port(uint16_t port) noexcept { _rpc_port = port; } -uint16_t state::rpc_port(void) const noexcept { +uint16_t state::rpc_port() const noexcept { return _rpc_port; } +/** + * @brief Force the interface address to listen from for the gRPC API. + * + * @param listen_address An address or a hostname ("127.0.0.1", "localhost", + * ...) + */ +void state::listen_address(const std::string& listen_address) noexcept { + _listen_address = listen_address; +} + +/** + * @brief Access to the configured listen address or an empty string if not + * defined. The behavior of broker in the latter is to listen from localhost. + * + * @return The listen address for the gRPC API. + */ +const std::string& state::listen_address() const noexcept { + return _listen_address; +} + state::log& state::log_conf() { return _log_conf; } diff --git a/broker/core/src/database/mysql_result.cc b/broker/core/src/database/mysql_result.cc index b4959b6ccfa..e60607e0c61 100644 --- a/broker/core/src/database/mysql_result.cc +++ b/broker/core/src/database/mysql_result.cc @@ -19,6 +19,8 @@ #include "com/centreon/exceptions/msg_fmt.hh" +#include + using namespace com::centreon::exceptions; using namespace com::centreon::broker; using namespace com::centreon::broker::database; @@ -107,13 +109,19 @@ bool mysql_result::value_as_bool(int idx) { bool retval; if (_bind) retval = _bind->value_as_bool(idx); - else if (_row) - retval = _row[idx] ? strtol(_row[idx], nullptr, 10) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtob(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be a boolean, the current value " + "is '{}'", + idx, _row[idx]); + } else + retval = false; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } - /** * Accessor to a column string value * @@ -143,9 +151,16 @@ float mysql_result::value_as_f32(int idx) { float retval; if (_bind) retval = _bind->value_as_f32(idx); - else if (_row) - retval = _row[idx] ? atof(_row[idx]) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtof(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be a float, the current value " + "is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } @@ -161,9 +176,16 @@ double mysql_result::value_as_f64(int idx) { double retval; if (_bind) retval = _bind->value_as_f64(idx); - else if (_row) - retval = _row[idx] ? atof(_row[idx]) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtod(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be a double, the current value " + "is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } @@ -179,9 +201,16 @@ int mysql_result::value_as_i32(int idx) { int retval; if (_bind) retval = _bind->value_as_i32(idx); - else if (_row) - retval = _row[idx] ? strtol(_row[idx], nullptr, 10) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtoi(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be an integer, the current value " + "is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } @@ -197,9 +226,16 @@ uint32_t mysql_result::value_as_u32(int idx) { uint32_t retval; if (_bind) retval = _bind->value_as_u32(idx); - else if (_row) - retval = _row[idx] ? strtoul(_row[idx], nullptr, 10) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtoi(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be an unsigned integer, the " + "current value is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } @@ -215,9 +251,16 @@ int64_t mysql_result::value_as_i64(int idx) { int64_t retval; if (_bind) retval = _bind->value_as_i64(idx); - else if (_row) - retval = _row[idx] ? strtoll(_row[idx], nullptr, 10) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtoi(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be an integer, the current value " + "is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } @@ -233,9 +276,16 @@ uint64_t mysql_result::value_as_u64(int idx) { uint64_t retval; if (_bind) retval = _bind->value_as_u64(idx); - else if (_row) - retval = _row[idx] ? strtoull(_row[idx], nullptr, 10) : 0; - else + else if (_row) { + if (_row[idx]) { + if (!absl::SimpleAtoi(_row[idx], &retval)) + throw msg_fmt( + "mysql: result at index {} should be an unsigned integer, the " + "current value is '{}'", + idx, _row[idx]); + } else + retval = 0; + } else throw msg_fmt("mysql: No row fetched in result"); return retval; } diff --git a/broker/core/src/database/mysql_stmt.cc b/broker/core/src/database/mysql_stmt.cc index 27bcb04e4c4..57e3c77d283 100644 --- a/broker/core/src/database/mysql_stmt.cc +++ b/broker/core/src/database/mysql_stmt.cc @@ -429,7 +429,7 @@ void mysql_stmt::operator<<(io::data const& d) { void mysql_stmt::bind_value_as_i32(int range, int value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_i32(range, value); } @@ -458,7 +458,7 @@ void mysql_stmt::bind_value_as_i32(std::string const& name, int value) { void mysql_stmt::bind_value_as_u32(int range, uint32_t value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_u32(range, value); } @@ -493,7 +493,7 @@ void mysql_stmt::bind_value_as_u32(std::string const& name, uint32_t value) { */ void mysql_stmt::bind_value_as_i64(int range, int64_t value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_i64(range, value); } @@ -534,7 +534,7 @@ void mysql_stmt::bind_value_as_i64(std::string const& name, int64_t value) { */ void mysql_stmt::bind_value_as_u64(int range, uint64_t value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_u64(range, value); } @@ -575,7 +575,7 @@ void mysql_stmt::bind_value_as_u64(std::string const& name, uint64_t value) { */ void mysql_stmt::bind_value_as_f32(int range, float value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_f32(range, value); } @@ -610,7 +610,7 @@ void mysql_stmt::bind_value_as_f32(std::string const& name, float value) { */ void mysql_stmt::bind_value_as_f64(int range, double value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_f64(range, value); } @@ -639,7 +639,7 @@ void mysql_stmt::bind_value_as_f64(std::string const& name, double value) { void mysql_stmt::bind_value_as_tiny(int range, char value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_tiny(range, value); } @@ -668,7 +668,7 @@ void mysql_stmt::bind_value_as_tiny(std::string const& name, char value) { void mysql_stmt::bind_value_as_bool(int range, bool value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_bool(range, value); } @@ -697,7 +697,7 @@ void mysql_stmt::bind_value_as_bool(std::string const& name, bool value) { void mysql_stmt::bind_value_as_str(int range, const fmt::string_view& value) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_str(range, value); } @@ -726,7 +726,7 @@ void mysql_stmt::bind_value_as_str(std::string const& name, void mysql_stmt::bind_value_as_null(int range) { if (!_bind) - _bind.reset(new database::mysql_bind(_param_count)); + _bind = std::make_unique(_param_count); _bind->set_value_as_null(range); } diff --git a/broker/core/src/main.cc b/broker/core/src/main.cc index c80eca362ca..dd78623fce2 100644 --- a/broker/core/src/main.cc +++ b/broker/core/src/main.cc @@ -55,7 +55,6 @@ static std::atomic_bool gl_term{false}; static struct option long_options[] = {{"pool_size", required_argument, 0, 's'}, {"check", no_argument, 0, 'c'}, - {"debug", no_argument, 0, 'd'}, {"diagnose", no_argument, 0, 'D'}, {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, 'h'}, @@ -143,6 +142,7 @@ int main(int argc, char* argv[]) { int opt, option_index = 0, n_thread = 0; std::string broker_name{"unknown"}; uint16_t default_port{51000}; + std::string default_listen_address{"localhost"}; // Set configuration update handler. if (signal(SIGHUP, hup_handler) == SIG_ERR) { @@ -165,38 +165,37 @@ int main(int argc, char* argv[]) { try { // Check the command line. - bool check(false); - bool debug(false); - bool diagnose(false); - bool help(false); - bool version(false); - - opt = getopt_long(argc, argv, "s:cdDvh", long_options, &option_index); - switch (opt) { - case 's': - if (!absl::SimpleAtoi(optarg, &n_thread)) { - throw msg_fmt("The option -s expects a positive integer"); - } - break; - case 'c': - check = true; - break; - case 'd': - debug = true; - break; - case 'D': - diagnose = true; - break; - case 'h': - help = true; - break; - case 'v': - version = true; - break; - default: - break; + bool check{false}; + bool diagnose{false}; + bool help{false}; + bool version{false}; + + while ((opt = getopt_long(argc, argv, "s:cDvh", long_options, + &option_index)) != -1) { + switch (opt) { + case 's': + if (!absl::SimpleAtoi(optarg, &n_thread)) { + throw msg_fmt("The option -s expects a positive integer"); + } + break; + case 'c': + check = true; + break; + case 'D': + diagnose = true; + break; + case 'h': + help = true; + break; + case 'v': + version = true; + break; + default: + throw msg_fmt( + "Enter allowed options : [-s ] [-c] [-D] [-h] [-v]"); + break; + } } - if (optind < argc) while (optind < argc) gl_mainconfigfiles.push_back(argv[optind++]); @@ -212,14 +211,14 @@ int main(int argc, char* argv[]) { diag.generate(gl_mainconfigfiles); } else if (help) { log_v2::core()->info( - "USAGE: {} [-t] [-c] [-d] [-D] [-h] [-v] []", argv[0]); - - log_v2::core()->info(" -t Set x threads."); - log_v2::core()->info(" -c Check configuration file."); - log_v2::core()->info(" -d Enable debug mode."); - log_v2::core()->info(" -D Generate a diagnostic file."); - log_v2::core()->info(" -h Print this help."); - log_v2::core()->info(" -v Print Centreon Broker version."); + "USAGE: {} [-s ] [-c] [-D] [-h] [-v] []", + argv[0]); + + log_v2::core()->info(" '-s' Set poolsize threads."); + log_v2::core()->info(" '-c' Check configuration file."); + log_v2::core()->info(" '-D' Generate a diagnostic file."); + log_v2::core()->info(" '-h' Print this help."); + log_v2::core()->info(" '-v' Print Centreon Broker version."); log_v2::core()->info("Centreon Broker {}", CENTREON_BROKER_VERSION); log_v2::core()->info("Copyright 2009-2021 Centreon"); log_v2::core()->info( @@ -230,7 +229,8 @@ int main(int argc, char* argv[]) { retval = 0; } else if (gl_mainconfigfiles.empty()) { log_v2::core()->error( - "USAGE: {} [-c] [-d] [-D] [-h] [-v] []\n\n", argv[0]); + "USAGE: {} [-s ] [-c] [-D] [-h] [-v] []\n\n", + argv[0]); return 1; } else { log_v2::core()->info("Centreon Broker {}", CENTREON_BROKER_VERSION); @@ -261,12 +261,15 @@ int main(int argc, char* argv[]) { gl_state = conf; } + if (!gl_state.listen_address().empty()) + default_listen_address = gl_state.listen_address(); + if (gl_state.rpc_port() == 0) default_port += gl_state.broker_id(); else default_port = gl_state.rpc_port(); std::unique_ptr > rpc( - new brokerrpc("0.0.0.0", default_port, broker_name), + new brokerrpc(default_listen_address, default_port, broker_name), [](brokerrpc* rpc) { rpc->shutdown(); delete rpc; diff --git a/broker/core/test/config/parser.cc b/broker/core/test/config/parser.cc index 5d11489e847..4a748861547 100644 --- a/broker/core/test/config/parser.cc +++ b/broker/core/test/config/parser.cc @@ -882,3 +882,112 @@ TEST(parser, unifiedSqlVsStorageSql) { // Remove temporary file. ::remove(config_file.c_str()); } + +TEST(parser, grpc_full) { + // File name. + std::string config_file(misc::temp_path()); + + // Open file. + FILE* file_stream(fopen(config_file.c_str(), "w")); + if (!file_stream) + throw msg_fmt("could not open '{}'", config_file); + // Data. + std::string data; + data = + "{\n" + " \"centreonBroker\": {\n" + " \"broker_id\": 1,\n" + " \"broker_name\": \"central-broker-master\",\n" + " \"poller_id\": 1,\n" + " \"poller_name\": \"Central\",\n" + " \"module_directory\": " + "\"/etc\",\n" + " \"log_timestamp\": true,\n" + " \"event_queue_max_size\": 100000,\n" + " \"command_file\": \"/var/lib/centreon-broker/command.sock\",\n" + " \"cache_directory\": \"/tmp\",\n" + " \"log\": {\n" + " \"directory\": \"/tmp\"\n" + " },\n" + " \"grpc\": {\n" + " \"rpc_port\": 51001,\n" + " \"listen_address\": \"10.0.2.26\"\n" + " }\n" + " }\n" + "}\n"; + + // Write data. + if (fwrite(data.c_str(), data.size(), 1, file_stream) != 1) + throw msg_fmt("could not write content of '{}'", config_file); + + // Close file. + fclose(file_stream); + + // Parse. + config::parser p; + config::state s{p.parse(config_file)}; + + // Remove temporary file. + ::remove(config_file.c_str()); + + // Check global params + ASSERT_EQ(s.rpc_port(), 51001); + ASSERT_EQ(s.listen_address(), std::string("10.0.2.26")); + ASSERT_EQ(s.broker_id(), 1); + ASSERT_EQ(s.broker_name(), "central-broker-master"); + ASSERT_EQ(s.poller_id(), 1); + ASSERT_EQ(s.module_directory(), "/etc"); + ASSERT_EQ(s.event_queue_max_size(), 100000); + ASSERT_EQ(s.command_file(), "/var/lib/centreon-broker/command.sock"); + ASSERT_EQ(s.cache_directory(), "/tmp/"); + ASSERT_EQ(s.log_conf().directory, "/tmp"); + ASSERT_EQ(s.log_conf().max_size, 0u); +} + +TEST(parser, grpc_in_error) { + // File name. + std::string config_file(misc::temp_path()); + + // Open file. + FILE* file_stream(fopen(config_file.c_str(), "w")); + if (!file_stream) + throw msg_fmt("could not open '{}'", config_file); + // Data. + std::string data; + data = + "{\n" + " \"centreonBroker\": {\n" + " \"broker_id\": 1,\n" + " \"broker_name\": \"central-broker-master\",\n" + " \"poller_id\": 1,\n" + " \"poller_name\": \"Central\",\n" + " \"module_directory\": " + "\"/etc\",\n" + " \"log_timestamp\": true,\n" + " \"event_queue_max_size\": 100000,\n" + " \"command_file\": \"/var/lib/centreon-broker/command.sock\",\n" + " \"cache_directory\": \"/tmp\",\n" + " \"log\": {\n" + " \"directory\": \"/tmp\"\n" + " },\n" + " \"grpc\": {\n" + " \"rpc_port\": \"foo\",\n" + " \"listen_address\": \"10.0.2.26\"\n" + " }\n" + " }\n" + "}\n"; + + // Write data. + if (fwrite(data.c_str(), data.size(), 1, file_stream) != 1) + throw msg_fmt("could not write content of '{}'", config_file); + + // Close file. + fclose(file_stream); + + // Parse. + config::parser p; + ASSERT_THROW(p.parse(config_file), std::exception); + + // Remove temporary file. + ::remove(config_file.c_str()); +} diff --git a/broker/neb/CMakeLists.txt b/broker/neb/CMakeLists.txt index a0c33026238..a87f4d336ef 100644 --- a/broker/neb/CMakeLists.txt +++ b/broker/neb/CMakeLists.txt @@ -1,20 +1,20 @@ -## -## Copyright 2009-2013,2015 Centreon -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## http://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## -## For more information : contact@centreon.com -## +# # +# # Copyright 2009-2013,2015 Centreon +# # +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # http://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. +# # +# # For more information : contact@centreon.com +# # # Global options. set(INC_DIR "${PROJECT_SOURCE_DIR}/neb/inc") @@ -24,6 +24,7 @@ include_directories("${INC_DIR}") # NEB sources. set(NEB_SOURCES + # Sources. # Headers. ${SRC_DIR}/acknowledgement.cc @@ -100,20 +101,21 @@ set(NEB_SOURCES add_library(nebbase STATIC ${NEB_SOURCES}) add_dependencies(nebbase table_max_size target_service target_host target_severity target_tag) target_link_libraries(nebbase - CONAN_PKG::protobuf - pb_service_lib - pb_host_lib - pb_severity_lib - pb_tag_lib) + CONAN_PKG::protobuf + pb_service_lib + pb_host_lib + pb_severity_lib + pb_tag_lib) set(NEBBASE_CXXFLAGS "${NEBBASE_CXXFLAGS} -fPIC") set_property(TARGET nebbase PROPERTY COMPILE_FLAGS ${NEBBASE_CXXFLAGS}) -target_precompile_headers(nebbase PRIVATE precomp_inc/nebbase_precomp.hpp) +target_precompile_headers(nebbase PRIVATE precomp_inc/precomp.hpp) # Centreon Broker module. set(NEB "10-neb") set(NEB "${NEB}" PARENT_SCOPE) add_library("${NEB}" SHARED + # Main source. "${SRC_DIR}/broker.cc" "${SRC_DIR}/node_id.cc" @@ -126,9 +128,10 @@ add_library("${NEB}" SHARED # Flags needed to include all symbols in binary. target_link_libraries(${NEB} nebbase CONAN_PKG::spdlog) -# "-Wl,--whole-archive" nebbase "-Wl,--no-whole-archive") + +# "-Wl,--whole-archive" nebbase "-Wl,--no-whole-archive") set_target_properties("${NEB}" PROPERTIES PREFIX "") -target_precompile_headers(${NEB} PRIVATE precomp_inc/neb_precomp.hpp) +target_precompile_headers(${NEB} REUSE_FROM nebbase) install(TARGETS "${NEB}" LIBRARY DESTINATION "${PREFIX_MODULES}" ) @@ -136,6 +139,7 @@ install(TARGETS "${NEB}" # Centreon Engine/Nagios module. set(CBMOD "cbmod") add_library("${CBMOD}" SHARED + # Sources. "${PROJECT_SOURCE_DIR}/core/src/config/applier/init.cc" "${SRC_DIR}/callback.cc" @@ -144,6 +148,7 @@ add_library("${CBMOD}" SHARED "${SRC_DIR}/internal.cc" "${SRC_DIR}/neb.cc" "${SRC_DIR}/set_log_data.cc" + # Headers. "${INC_DIR}/com/centreon/broker/neb/callback.hh" "${INC_DIR}/com/centreon/broker/neb/callbacks.hh" @@ -157,52 +162,54 @@ get_property(CBMOD_DEFINES list(APPEND CBMOD_DEFINES CBMOD) set_property(TARGET "${CBMOD}" PROPERTY COMPILE_DEFINITIONS "${CBMOD_DEFINES}") -if (CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") # Flags needed to include all symbols in shared library. target_link_libraries("${CBMOD}" "-Wl,--whole-archive" "rokerbase" "-Wl,--no-whole-archive" CONAN_PKG::nlohmann_json CONAN_PKG::spdlog CONAN_PKG::asio) -else () +else() target_link_libraries("${CBMOD}" "rokerbase" CONAN_PKG::nlohmann_json CONAN_PKG::spdlog CONAN_PKG::asio) -endif () -set_target_properties("${CBMOD}" PROPERTIES PREFIX "") -target_precompile_headers(${CBMOD} PRIVATE precomp_inc/precomp.hpp) +endif() +set_target_properties("${CBMOD}" PROPERTIES PREFIX "") +target_precompile_headers(${CBMOD} REUSE_FROM nebbase) # Testing. -if (WITH_TESTING) +if(WITH_TESTING) set( - TESTS_SOURCES - ${TESTS_SOURCES} - ${SRC_DIR}/set_log_data.cc - #Actual tests - ${TEST_DIR}/custom_variable.cc - ${TEST_DIR}/custom_variable_status.cc - ${TEST_DIR}/event_handler.cc - ${TEST_DIR}/flapping_status.cc - ${TEST_DIR}/host.cc - ${TEST_DIR}/host_check.cc - ${TEST_DIR}/host_dependency.cc - ${TEST_DIR}/host_parent.cc - ${TEST_DIR}/host_status.cc - ${TEST_DIR}/instance.cc - ${TEST_DIR}/instance_status.cc - ${TEST_DIR}/log_entry.cc - ${TEST_DIR}/module.cc - ${TEST_DIR}/randomize.cc - ${TEST_DIR}/randomize.hh - ${TEST_DIR}/service.cc - ${TEST_DIR}/service_check.cc - ${TEST_DIR}/service_dependency.cc - ${TEST_DIR}/service_status.cc - ${TEST_DIR}/set_log_data.cc - PARENT_SCOPE - ) + TESTS_SOURCES + ${TESTS_SOURCES} + ${SRC_DIR}/set_log_data.cc + + # Actual tests + ${TEST_DIR}/custom_variable.cc + ${TEST_DIR}/custom_variable_status.cc + ${TEST_DIR}/event_handler.cc + ${TEST_DIR}/flapping_status.cc + ${TEST_DIR}/host.cc + ${TEST_DIR}/host_check.cc + ${TEST_DIR}/host_dependency.cc + ${TEST_DIR}/host_parent.cc + ${TEST_DIR}/host_status.cc + ${TEST_DIR}/instance.cc + ${TEST_DIR}/instance_status.cc + ${TEST_DIR}/log_entry.cc + ${TEST_DIR}/module.cc + ${TEST_DIR}/randomize.cc + ${TEST_DIR}/randomize.hh + ${TEST_DIR}/service.cc + ${TEST_DIR}/service_check.cc + ${TEST_DIR}/service_dependency.cc + ${TEST_DIR}/service_status.cc + ${TEST_DIR}/set_log_data.cc + PARENT_SCOPE + ) set( - TESTS_LIBRARIES - ${TESTS_LIBRARIES} - ${NEB} - PARENT_SCOPE - ) + TESTS_LIBRARIES + ${TESTS_LIBRARIES} + ${NEB} + PARENT_SCOPE + ) endif() # Install rules. diff --git a/broker/neb/precomp_inc/precomp.hpp b/broker/neb/precomp_inc/precomp.hpp index 4d5fe22f887..0ce18579f4c 100644 --- a/broker/neb/precomp_inc/precomp.hpp +++ b/broker/neb/precomp_inc/precomp.hpp @@ -37,6 +37,7 @@ #include #include +#include #include #include diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index dfe29aca1a9..23050893427 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -300,11 +300,11 @@ int neb::callback_custom_variable(int callback_type, void* data) { // Host custom variable. if (NEBTYPE_HOSTCUSTOMVARIABLE_ADD == cvar->type) { engine::host* hst(static_cast(cvar->object_ptr)); - if (hst && !hst->get_name().empty()) { + if (hst && !hst->name().empty()) { // Fill custom variable event. - uint64_t host_id = engine::get_host_id(hst->get_name()); + uint64_t host_id = engine::get_host_id(hst->name()); if (host_id != 0) { - std::shared_ptr new_cvar(new custom_variable); + auto new_cvar{std::make_shared()}; new_cvar->enabled = true; new_cvar->host_id = host_id; new_cvar->modified = false; @@ -324,10 +324,10 @@ int neb::callback_custom_variable(int callback_type, void* data) { } } else if (NEBTYPE_HOSTCUSTOMVARIABLE_DELETE == cvar->type) { engine::host* hst(static_cast(cvar->object_ptr)); - if (hst && !hst->get_name().empty()) { - uint32_t host_id = engine::get_host_id(hst->get_name()); + if (hst && !hst->name().empty()) { + uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0) { - std::shared_ptr old_cvar(new custom_variable); + auto old_cvar{std::make_shared()}; old_cvar->enabled = false; old_cvar->host_id = host_id; old_cvar->name = misc::string::check_string_utf8(cvar->var_name); @@ -352,7 +352,7 @@ int neb::callback_custom_variable(int callback_type, void* data) { p = engine::get_host_and_service_id(svc->get_hostname(), svc->get_description()); if (p.first && p.second) { - std::shared_ptr new_cvar(new custom_variable); + auto new_cvar{std::make_shared()}; new_cvar->enabled = true; new_cvar->host_id = p.first; new_cvar->modified = false; @@ -378,7 +378,7 @@ int neb::callback_custom_variable(int callback_type, void* data) { std::pair p{engine::get_host_and_service_id( svc->get_hostname(), svc->get_description())}; if (p.first && p.second) { - std::shared_ptr old_cvar(new custom_variable); + auto old_cvar{std::make_shared()}; old_cvar->enabled = false; old_cvar->host_id = p.first; old_cvar->modified = true; @@ -453,7 +453,7 @@ int neb::callback_dependency(int callback_type, void* data) { } // Generate service dependency event. - std::shared_ptr hst_dep(new host_dependency); + auto hst_dep{std::make_shared()}; hst_dep->host_id = host_id; hst_dep->dependent_host_id = dep_host_id; hst_dep->enabled = (nsadd->type != NEBTYPE_HOSTDEPENDENCY_DELETE); @@ -513,7 +513,7 @@ int neb::callback_dependency(int callback_type, void* data) { } // Generate service dependency event. - std::shared_ptr svc_dep(new service_dependency); + auto svc_dep{std::make_shared()}; svc_dep->host_id = ids.first; svc_dep->service_id = ids.second; svc_dep->dependent_host_id = dep_ids.first; @@ -682,11 +682,11 @@ int neb::callback_event_handler(int callback_type, void* data) { try { // In/Out variables. nebstruct_event_handler_data const* event_handler_data; - std::shared_ptr event_handler(new neb::event_handler); + auto event_handler = std::make_shared(); // Fill output var. event_handler_data = static_cast(data); - if (event_handler_data->command_args) + if (!event_handler_data->command_args.empty()) event_handler->command_args = misc::string::check_string_utf8(event_handler_data->command_args); if (event_handler_data->command_line) @@ -781,8 +781,7 @@ int neb::callback_external_command(int callback_type, void* data) { uint64_t host_id = engine::get_host_id(host); if (host_id != 0) { // Fill custom variable. - std::shared_ptr cvs{ - new neb::custom_variable_status}; + auto cvs = std::make_shared(); cvs->host_id = host_id; cvs->modified = true; cvs->name = var_name; @@ -818,8 +817,7 @@ int neb::callback_external_command(int callback_type, void* data) { engine::get_host_and_service_id(host, service)}; if (p.first && p.second) { // Fill custom variable. - std::shared_ptr cvs{ - new neb::custom_variable_status}; + auto cvs{std::make_shared()}; cvs->host_id = p.first; cvs->modified = true; cvs->name = var_name; @@ -862,8 +860,7 @@ int neb::callback_flapping_status(int callback_type, void* data) { try { // In/Out variables. nebstruct_flapping_data const* flapping_data; - std::shared_ptr flapping_status( - new neb::flapping_status); + auto flapping_status{std::make_shared()}; // Fill output var. flapping_data = static_cast(data); @@ -923,7 +920,7 @@ int neb::callback_group(int callback_type, void* data) { engine::hostgroup const* host_group( static_cast(group_data->object_ptr)); if (!host_group->get_group_name().empty()) { - std::shared_ptr new_hg(new neb::host_group); + auto new_hg{std::make_shared()}; new_hg->poller_id = config::applier::state::instance().poller_id(); new_hg->id = host_group->get_id(); new_hg->enabled = (group_data->type != NEBTYPE_HOSTGROUP_DELETE && @@ -947,7 +944,7 @@ int neb::callback_group(int callback_type, void* data) { engine::servicegroup const* service_group( static_cast(group_data->object_ptr)); if (!service_group->get_group_name().empty()) { - std::shared_ptr new_sg(new neb::service_group); + auto new_sg{std::make_shared()}; new_sg->poller_id = config::applier::state::instance().poller_id(); new_sg->id = service_group->get_id(); new_sg->enabled = (group_data->type != NEBTYPE_SERVICEGROUP_DELETE && @@ -1001,13 +998,13 @@ int neb::callback_group_member(int callback_type, void* data) { static_cast(member_data->object_ptr)); engine::hostgroup const* hg( static_cast(member_data->group_ptr)); - if (!hst->get_name().empty() && !hg->get_group_name().empty()) { + if (!hst->name().empty() && !hg->get_group_name().empty()) { // Output variable. - std::shared_ptr hgm(new neb::host_group_member); + auto hgm{std::make_shared()}; hgm->group_id = hg->get_id(); hgm->group_name = misc::string::check_string_utf8(hg->get_group_name()); hgm->poller_id = config::applier::state::instance().poller_id(); - uint32_t host_id = engine::get_host_id(hst->get_name()); + uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0 && hgm->group_id != 0) { hgm->host_id = host_id; if (member_data->type == NEBTYPE_HOSTGROUPMEMBER_DELETE) { @@ -1039,8 +1036,7 @@ int neb::callback_group_member(int callback_type, void* data) { if (!svc->get_description().empty() && !sg->get_group_name().empty() && !svc->get_hostname().empty()) { // Output variable. - std::shared_ptr sgm( - new neb::service_group_member); + auto sgm{std::make_shared()}; sgm->group_id = sg->get_id(); sgm->group_name = misc::string::check_string_utf8(sg->get_group_name()); sgm->poller_id = config::applier::state::instance().poller_id(); @@ -1152,8 +1148,8 @@ int neb::callback_host(int callback_type, void* data) { my_host->freshness_threshold = h->get_freshness_threshold(); my_host->has_been_checked = h->has_been_checked(); my_host->high_flap_threshold = h->get_high_flap_threshold(); - if (!h->get_name().empty()) - my_host->host_name = misc::string::check_string_utf8(h->get_name()); + if (!h->name().empty()) + my_host->host_name = misc::string::check_string_utf8(h->name()); if (!h->get_icon_image().empty()) my_host->icon_image = misc::string::check_string_utf8(h->get_icon_image()); @@ -1234,9 +1230,8 @@ int neb::callback_host(int callback_type, void* data) { /* No need to send this service custom variables changes, custom variables * are managed in a different loop. */ } else - log_v2::neb()->error( - "callbacks: host '{}' has no ID (yet) defined", - (!h->get_name().empty() ? h->get_name() : "(unknown)")); + log_v2::neb()->error("callbacks: host '{}' has no ID (yet) defined", + (!h->name().empty() ? h->name() : "(unknown)")); } // Avoid exception propagation to C code. catch (...) { @@ -1305,19 +1300,18 @@ int neb::callback_pb_host(int callback_type, void* data) { assert(1 == 0); } - uint64_t host_id = engine::get_host_id(eh->get_name()); + uint64_t host_id = engine::get_host_id(eh->name()); if (host_id != 0) { hst.set_host_id(host_id); // Send host event. log_v2::neb()->info("callbacks: new host {} ('{}') on instance {}", - hst.host_id(), eh->get_name(), + hst.host_id(), eh->name(), config::applier::state::instance().poller_id()); neb::gl_publisher.write(h); } else - log_v2::neb()->error( - "callbacks: host '{}' has no ID (yet) defined", - (!eh->get_name().empty() ? eh->get_name() : "(unknown)")); + log_v2::neb()->error("callbacks: host '{}' has no ID (yet) defined", + (!eh->name().empty() ? eh->name() : "(unknown)")); } else { auto h{std::make_shared()}; Host& host = h.get()->mut_obj(); @@ -1373,8 +1367,8 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_freshness_threshold(eh->get_freshness_threshold()); host.set_checked(eh->has_been_checked()); host.set_high_flap_threshold(eh->get_high_flap_threshold()); - if (!eh->get_name().empty()) - host.set_name(misc::string::check_string_utf8(eh->get_name())); + if (!eh->name().empty()) + host.set_name(misc::string::check_string_utf8(eh->name())); if (!eh->get_icon_image().empty()) host.set_icon_image( misc::string::check_string_utf8(eh->get_icon_image())); @@ -1462,9 +1456,8 @@ int neb::callback_pb_host(int callback_type, void* data) { /* No need to send this service custom variables changes, custom variables * are managed in a different loop. */ } else - log_v2::neb()->error( - "callbacks: host '{}' has no ID (yet) defined", - (!eh->get_name().empty() ? eh->get_name() : "(unknown)")); + log_v2::neb()->error("callbacks: host '{}' has no ID (yet) defined", + (!eh->name().empty() ? eh->name() : "(unknown)")); } return 0; } @@ -1498,7 +1491,7 @@ int neb::callback_host_check(int callback_type, void* data) { log_v2::neb()->info("callbacks: generating host check event"); try { - std::shared_ptr host_check(new neb::host_check); + auto host_check{std::make_shared()}; // Fill output var. engine::host* h(static_cast(hcdata->object_ptr)); @@ -1547,7 +1540,7 @@ int neb::callback_host_status(int callback_type, void* data) { try { // In/Out variables. - std::shared_ptr host_status(new neb::host_status); + auto host_status{std::make_shared()}; // Fill output var. const engine::host* h = static_cast( @@ -1573,12 +1566,12 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->execution_time = h->get_execution_time(); host_status->flap_detection_enabled = h->flap_detection_enabled(); host_status->has_been_checked = h->has_been_checked(); - if (h->get_name().empty()) + if (h->name().empty()) throw msg_fmt("unnamed host"); { - host_status->host_id = engine::get_host_id(h->get_name()); + host_status->host_id = engine::get_host_id(h->name()); if (host_status->host_id == 0) - throw msg_fmt("could not find ID of host '{}'", h->get_name()); + throw msg_fmt("could not find ID of host '{}'", h->name()); } host_status->is_flapping = h->get_is_flapping(); host_status->last_check = h->get_last_check(); @@ -1669,7 +1662,7 @@ int neb::callback_pb_host_status(int callback_type, void* data) noexcept { hscr.set_host_id(eh->get_host_id()); if (hscr.host_id() == 0) - log_v2::neb()->error("could not find ID of host '{}'", eh->get_name()); + log_v2::neb()->error("could not find ID of host '{}'", eh->name()); if (eh->problem_has_been_acknowledged()) hscr.set_acknowledgement_type( @@ -1749,7 +1742,7 @@ int neb::callback_log(int callback_type, void* data) { try { // In/Out variables. nebstruct_log_data const* log_data; - std::shared_ptr le(new neb::log_entry); + auto le{std::make_shared()}; // Fill output var. log_data = static_cast(data); @@ -1790,7 +1783,7 @@ int neb::callback_module(int callback_type, void* data) { try { // In/Out variables. nebstruct_module_data const* module_data; - std::shared_ptr me(new neb::module); + auto me{std::make_shared()}; // Fill output var. module_data = static_cast(data); @@ -1899,7 +1892,7 @@ int neb::callback_process(int callback_type, void* data) { } // Output variable. - std::shared_ptr instance(new neb::instance); + auto instance{std::make_shared()}; instance->poller_id = config::applier::state::instance().poller_id(); instance->engine = "Centreon Engine"; instance->is_running = true; @@ -1918,7 +1911,7 @@ int neb::callback_process(int callback_type, void* data) { } else if (NEBTYPE_PROCESS_EVENTLOOPEND == process_data->type) { log_v2::neb()->info("callbacks: generating process end event"); // Output variable. - std::shared_ptr instance(new neb::instance); + auto instance{std::make_shared()}; // Fill output var. instance->poller_id = config::applier::state::instance().poller_id(); @@ -1962,7 +1955,7 @@ int neb::callback_program_status(int callback_type, void* data) { try { // In/Out variables. nebstruct_program_status_data const* program_status_data; - std::shared_ptr is(new neb::instance_status); + auto is{std::make_shared()}; // Fill output var. program_status_data = static_cast(data); @@ -1975,10 +1968,10 @@ int neb::callback_program_status(int callback_type, void* data) { is->check_services_freshness = check_service_freshness; is->event_handler_enabled = program_status_data->event_handlers_enabled; is->flap_detection_enabled = program_status_data->flap_detection_enabled; - if (program_status_data->global_host_event_handler) + if (!program_status_data->global_host_event_handler.empty()) is->global_host_event_handler = misc::string::check_string_utf8( program_status_data->global_host_event_handler); - if (program_status_data->global_service_event_handler) + if (!program_status_data->global_service_event_handler.empty()) is->global_service_event_handler = misc::string::check_string_utf8( program_status_data->global_service_event_handler); is->last_alive = time(nullptr); @@ -2032,12 +2025,12 @@ int neb::callback_relation(int callback_type, void* data) { int host_id; int parent_id; { - host_id = engine::get_host_id(relation->dep_hst->get_name()); - parent_id = engine::get_host_id(relation->hst->get_name()); + host_id = engine::get_host_id(relation->dep_hst->name()); + parent_id = engine::get_host_id(relation->hst->name()); } if (host_id && parent_id) { // Generate parent event. - std::shared_ptr new_host_parent(new host_parent); + auto new_host_parent{std::make_shared()}; new_host_parent->enabled = (relation->type != NEBTYPE_PARENT_DELETE); new_host_parent->host_id = host_id; new_host_parent->parent_id = parent_id; @@ -2542,9 +2535,7 @@ int neb::callback_service_check(int callback_type, void* data) { try { // In/Out variables. - std::shared_ptr service_check( - std::make_shared()); - + auto service_check{std::make_shared()}; // Fill output var. engine::service* s{static_cast(scdata->object_ptr)}; if (scdata->command_line) { @@ -2831,7 +2822,7 @@ int neb::callback_service_status(int callback_type, void* data) { try { // In/Out variables. - auto service_status = std::make_shared(); + auto service_status{std::make_shared()}; // Fill output var. engine::service const* s{static_cast( diff --git a/broker/rrd/inc/com/centreon/broker/rrd/backend.hh b/broker/rrd/inc/com/centreon/broker/rrd/backend.hh index 19926637035..034255b057e 100644 --- a/broker/rrd/inc/com/centreon/broker/rrd/backend.hh +++ b/broker/rrd/inc/com/centreon/broker/rrd/backend.hh @@ -49,7 +49,8 @@ class backend { uint32_t length, time_t from, uint32_t step, - short value_type = 0) = 0; + short value_type = 0, + bool without_cache = false) = 0; virtual void remove(std::string const& filename) = 0; virtual void update(time_t t, std::string const& value) = 0; virtual void update(const std::deque& pts) = 0; diff --git a/broker/rrd/inc/com/centreon/broker/rrd/cached.hh b/broker/rrd/inc/com/centreon/broker/rrd/cached.hh index ede7bbe6821..3ab49cefdfd 100644 --- a/broker/rrd/inc/com/centreon/broker/rrd/cached.hh +++ b/broker/rrd/inc/com/centreon/broker/rrd/cached.hh @@ -77,7 +77,8 @@ class cached : public backend { uint32_t length, time_t from, uint32_t step, - short value_type = 0) { + short value_type = 0, + bool without_cache = false) { // Close previous file. this->close(); @@ -87,7 +88,7 @@ class cached : public backend { /* We are unfortunately forced to use librrd to create RRD file as ** rrdcached does not support RRD file creation. */ - _lib.open(filename, length, from, step, value_type); + _lib.open(filename, length, from, step, value_type, without_cache); } /** diff --git a/broker/rrd/inc/com/centreon/broker/rrd/creator.hh b/broker/rrd/inc/com/centreon/broker/rrd/creator.hh index 7f15897ec89..ab92a07e842 100644 --- a/broker/rrd/inc/com/centreon/broker/rrd/creator.hh +++ b/broker/rrd/inc/com/centreon/broker/rrd/creator.hh @@ -43,7 +43,8 @@ class creator { uint32_t length, time_t from, uint32_t step, - short value_type); + short value_type, + bool without_cache = false); private: struct tmpl_info { diff --git a/broker/rrd/inc/com/centreon/broker/rrd/lib.hh b/broker/rrd/inc/com/centreon/broker/rrd/lib.hh index f076cbafb79..251ffe7b4d4 100644 --- a/broker/rrd/inc/com/centreon/broker/rrd/lib.hh +++ b/broker/rrd/inc/com/centreon/broker/rrd/lib.hh @@ -48,7 +48,8 @@ class lib : public backend { uint32_t length, time_t from, uint32_t step, - short value_type = 0) override; + short value_type = 0, + bool without_cache = false) override; void remove(std::string const& filename) override; void update(time_t t, std::string const& value) override; void update(const std::deque& pts) override; diff --git a/broker/rrd/src/creator.cc b/broker/rrd/src/creator.cc index 9bb5fe5edd9..e4433d886b2 100644 --- a/broker/rrd/src/creator.cc +++ b/broker/rrd/src/creator.cc @@ -83,67 +83,73 @@ void creator::clear() { * @param[in] step Specifies the base interval in seconds with * which data will be fed into the RRD. * @param[in] value_type Type of the metric. + * @param[in] without_cache We force the creation of the file (needed by the + * rebuild). */ void creator::create(std::string const& filename, uint32_t length, time_t from, uint32_t step, - short value_type) { + short value_type, + bool without_cache) { // Fill template informations. if (!step) step = 5 * 60; // Default to every 5 minutes. if (!length) length = 31 * 24 * 60 * 60; // Default to one month long. - tmpl_info info; - info.length = length; - info.step = step; - info.value_type = value_type; + if (!without_cache) { + tmpl_info info; + info.length = length; + info.step = step; + info.value_type = value_type; - // Find fd informations. - std::map::const_iterator it(_fds.find(info)); - // Is in the cache, just duplicate file. - if (it != _fds.end()) - _duplicate(filename, it->second); - // Not is the cache, but we have enough space in the cache. - // Create new entry. - else if (_fds.size() < _cache_size) { - std::string tmpl_filename(fmt::format("{}/tmpl_{}_{}_{}.rrd", _tmpl_path, - length, step, value_type)); + // Find fd informations. + std::map::const_iterator it(_fds.find(info)); + // Is in the cache, just duplicate file. + if (it != _fds.end()) + _duplicate(filename, it->second); + // Not in the cache, but we have enough space in the cache. + // Create new entry. + else if (_fds.size() < _cache_size) { + std::string tmpl_filename(fmt::format("{}/tmpl_{}_{}_{}.rrd", _tmpl_path, + length, step, value_type)); - // Create new template. - _open(tmpl_filename, length, from, step, value_type); + // Create new template. + _open(tmpl_filename, length, from, step, value_type); - // Get template file size. - struct stat s; - if (stat(tmpl_filename.c_str(), &s) < 0) { - char const* msg(strerror(errno)); - throw exceptions::open( - "RRD: could not create template file '{}" - "': {}", - tmpl_filename, msg); - } + // Get template file size. + struct stat s; + if (stat(tmpl_filename.c_str(), &s) < 0) { + char const* msg(strerror(errno)); + throw exceptions::open( + "RRD: could not create template file '{}" + "': {}", + tmpl_filename, msg); + } - // Get template file fd. - int in_fd(open(tmpl_filename.c_str(), O_RDONLY)); - if (in_fd < 0) { - char const* msg(strerror(errno)); - throw exceptions::open( - "RRD: could not open template file '{}" - "': {}", - tmpl_filename, msg); - } + // Get template file fd. + int in_fd(open(tmpl_filename.c_str(), O_RDONLY)); + if (in_fd < 0) { + char const* msg(strerror(errno)); + throw exceptions::open( + "RRD: could not open template file '{}" + "': {}", + tmpl_filename, msg); + } - // Store fd informations into the cache. - fd_info fdinfo; - fdinfo.fd = in_fd; - fdinfo.size = s.st_size; - _fds[info] = fdinfo; + // Store fd informations into the cache. + fd_info fdinfo; + fdinfo.fd = in_fd; + fdinfo.size = s.st_size; + _fds[info] = fdinfo; - _duplicate(filename, fdinfo); - } - // No more space in the cache, juste create rrd file. - else - _open(filename, length, from, step, value_type); + _duplicate(filename, fdinfo); + } + // No more space in the cache, juste create rrd file. + else + _open(filename, length, from - 1, step, value_type); + } else + _open(filename, length, from - 1, step, value_type); } /** diff --git a/broker/rrd/src/lib.cc b/broker/rrd/src/lib.cc index 6b2263b0620..a32adbfc71b 100644 --- a/broker/rrd/src/lib.cc +++ b/broker/rrd/src/lib.cc @@ -101,13 +101,14 @@ void lib::open(std::string const& filename, uint32_t length, time_t from, uint32_t step, - short value_type) { + short value_type, + bool without_cache) { // Close previous file. this->close(); // Remember informations for further operations. _filename = filename; - _creator.create(filename, length, from, step, value_type); + _creator.create(filename, length, from, step, value_type, without_cache); } /** @@ -167,6 +168,7 @@ void lib::update(const std::deque& pts) { argv[pts.size()] = nullptr; auto it = pts.begin(); for (uint32_t i = 0; i < pts.size(); i++) { + log_v2::rrd()->trace("insertion of {} in rrd file", *it); argv[i] = it->data(); ++it; } diff --git a/broker/rrd/src/output.cc b/broker/rrd/src/output.cc index b4361c09a3a..5d2fb003663 100644 --- a/broker/rrd/src/output.cc +++ b/broker/rrd/src/output.cc @@ -452,23 +452,21 @@ void output::_rebuild_data(const RebuildMessage& rm) { query.emplace_back(fmt::format("{}:{}", pt.ctime(), static_cast(pt.value()))); break; + default: + log_v2::rrd()->debug("data_source_type = {} is not managed", + data_source_type); } if (!query.empty()) { - try { - _backend.open(path); - } catch (const exceptions::open& ex) { - log_v2::rrd()->debug("RRD file '{}' does not exist", path); - time_t start_time; - if (!p.second.pts().empty()) - start_time = p.second.pts()[0].ctime() - 1; - else - start_time = std::time(nullptr); - log_v2::rrd()->trace("'{}' start date set to {}", path, start_time); - uint32_t interval{p.second.check_interval() ? p.second.check_interval() - : 60}; - _backend.open(path, p.second.rrd_retention(), start_time, interval, - p.second.data_source_type()); - } + time_t start_time; + if (!p.second.pts().empty()) + start_time = p.second.pts()[0].ctime() - 1; + else + start_time = std::time(nullptr); + log_v2::rrd()->trace("'{}' start date set to {}", path, start_time); + uint32_t interval{p.second.check_interval() ? p.second.check_interval() + : 60}; + _backend.open(path, p.second.rrd_retention(), start_time, interval, + p.second.data_source_type(), true); log_v2::rrd()->trace("{} points added to file '{}'", query.size(), path); _backend.update(query); } else diff --git a/broker/storage/inc/com/centreon/broker/storage/internal.hh b/broker/storage/inc/com/centreon/broker/storage/internal.hh index 398573b4286..bdf364cbba3 100644 --- a/broker/storage/inc/com/centreon/broker/storage/internal.hh +++ b/broker/storage/inc/com/centreon/broker/storage/internal.hh @@ -31,8 +31,8 @@ namespace bbdo { /** * Here is a declaration of pb_rebuild_metrics which is a bbdo event we use to * ask rebuild of metrics. MetricIds is a vector of metric ids to rebuild. */ -using pb_rebuild_rrd_graphs = - io::protobuf; +using pb_rebuild_graphs = + io::protobuf; using pb_remove_graphs = io::protobuf; } // namespace bbdo diff --git a/broker/storage/inc/com/centreon/broker/storage/rebuilder.hh b/broker/storage/inc/com/centreon/broker/storage/rebuilder.hh index 79f95ac15d9..f3670d869bc 100644 --- a/broker/storage/inc/com/centreon/broker/storage/rebuilder.hh +++ b/broker/storage/inc/com/centreon/broker/storage/rebuilder.hh @@ -66,7 +66,7 @@ class rebuilder { ~rebuilder() noexcept = default; rebuilder(const rebuilder&) = delete; rebuilder& operator=(const rebuilder&) = delete; - void rebuild_rrd_graphs(const std::shared_ptr& d); + void rebuild_graphs(const std::shared_ptr& d); }; } // namespace storage diff --git a/broker/storage/src/conflict_manager.cc b/broker/storage/src/conflict_manager.cc index b0b461e2c3c..a730719c915 100644 --- a/broker/storage/src/conflict_manager.cc +++ b/broker/storage/src/conflict_manager.cc @@ -619,8 +619,8 @@ void conflict_manager::_callback() { type == neb::service_status::static_type()) _storage_process_service_status(tpl); else if (std::get<1>(tpl) == storage && - type == make_type(io::bbdo, bbdo::de_rebuild_rrd_graphs)) { - _rebuilder->rebuild_rrd_graphs(d); + type == make_type(io::bbdo, bbdo::de_rebuild_graphs)) { + _rebuilder->rebuild_graphs(d); *std::get<2>(tpl) = true; } else if (std::get<1>(tpl) == storage && type == make_type(io::bbdo, bbdo::de_remove_graphs)) { @@ -954,7 +954,8 @@ void conflict_manager::remove_graphs(const std::shared_ptr& d) { if (!ids.obj().metric_ids().empty()) { std::promise promise_metrics; - std::future future_metrics = promise_metrics.get_future(); + std::future future_metrics = + promise_metrics.get_future(); ms.run_query_and_get_result( fmt::format("SELECT index_id,metric_id,metric_name FROM metrics " diff --git a/broker/storage/src/main.cc b/broker/storage/src/main.cc index a116ca1331e..74a16d572e1 100644 --- a/broker/storage/src/main.cc +++ b/broker/storage/src/main.cc @@ -104,9 +104,8 @@ void broker_module_init(void const* arg) { /* Let's register the rebuild_metrics bbdo event. This is needed to send * the rebuild message from the gRPC interface. */ - e.register_event(make_type(io::bbdo, bbdo::de_rebuild_rrd_graphs), - "rebuild_metrics", - &bbdo::pb_rebuild_rrd_graphs::operations); + e.register_event(make_type(io::bbdo, bbdo::de_rebuild_graphs), + "rebuild_metrics", &bbdo::pb_rebuild_graphs::operations); /* Let's register the message to start rebuilds, send rebuilds and * terminate rebuilds. This is pb_rebuild_message. */ diff --git a/broker/storage/src/rebuilder.cc b/broker/storage/src/rebuilder.cc index 62d286f3f86..6f63e2cdf07 100644 --- a/broker/storage/src/rebuilder.cc +++ b/broker/storage/src/rebuilder.cc @@ -55,13 +55,13 @@ rebuilder::rebuilder(const database_config& db_cfg, * * @param d The BBDO message with all the metric ids to rebuild. */ -void rebuilder::rebuild_rrd_graphs(const std::shared_ptr& d) { +void rebuilder::rebuild_graphs(const std::shared_ptr& d) { asio::post(pool::io_context(), [this, data = d] { - const bbdo::pb_rebuild_rrd_graphs& ids = - *static_cast(data.get()); + const bbdo::pb_rebuild_graphs& ids = + *static_cast(data.get()); std::string ids_str{ - fmt::format("{}", fmt::join(ids.obj().index_id(), ","))}; + fmt::format("{}", fmt::join(ids.obj().index_ids(), ","))}; log_v2::sql()->debug( "Metric rebuild: Rebuild metrics event received for metrics ({})", ids_str); diff --git a/broker/test/rebuild_graphs.cc b/broker/test/rebuild_graphs.cc index 6edc7262424..381673ef238 100644 --- a/broker/test/rebuild_graphs.cc +++ b/broker/test/rebuild_graphs.cc @@ -256,7 +256,7 @@ int main() { // Launch rebuild. { QSqlQuery q(*db.centreon_db()); - if (!q.exec("UPDATE rt_index_data SET must_be_rebuild=1")) + if (!q.exec("UPDATE rt_index_data SET must_be_rebuild='1'")) throw(exceptions::msg() << "cannot launch rebuild from DB: " << qPrintable(q.lastError().text())); sleep_for(15); diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/internal.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/internal.hh index f3c57ee2d9e..1f468d2866d 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/internal.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/internal.hh @@ -33,8 +33,8 @@ namespace bbdo { /** * Here is a declaration of pb_rebuild_metrics which is a bbdo event we use to * ask rebuild of metrics. MetricIds is a vector of metric ids to rebuild. */ -using pb_rebuild_rrd_graphs = - io::protobuf; +using pb_rebuild_graphs = + io::protobuf; using pb_remove_graphs = io::protobuf; } // namespace bbdo diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh index 0b69efde1da..cd2ab0df13a 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/rebuilder.hh @@ -68,7 +68,7 @@ class rebuilder { ~rebuilder() noexcept = default; rebuilder(const rebuilder&) = delete; rebuilder& operator=(const rebuilder&) = delete; - void rebuild_rrd_graphs(const std::shared_ptr& d); + void rebuild_graphs(const std::shared_ptr& d); }; } // namespace unified_sql diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index 8d4965637fb..3912e29755f 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -201,7 +201,6 @@ class stream : public io::stream { std::time_t _next_loop_timeout; std::time_t _next_update_downtimes; - asio::steady_timer _timer; asio::steady_timer _queues_timer; /* To give the order to stop the check_queues */ std::atomic_bool _stop_check_queues; @@ -313,7 +312,8 @@ class stream : public io::stream { void _update_downtimes(); bool _is_valid_poller(uint32_t instance_id); void _check_queues(asio::error_code ec); - void _check_deleted_index(asio::error_code ec); + void _check_deleted_index(); + void _check_rebuild_index(); void _process_acknowledgement(const std::shared_ptr& d); void _process_comment(const std::shared_ptr& d); @@ -396,6 +396,7 @@ class stream : public io::stream { int32_t stop() override; void statistics(nlohmann::json& tree) const; void remove_graphs(const std::shared_ptr& d); + void update() override; }; } // namespace unified_sql CCB_END() diff --git a/broker/unified_sql/src/main.cc b/broker/unified_sql/src/main.cc index a2b6feb170a..375e6c1bd48 100644 --- a/broker/unified_sql/src/main.cc +++ b/broker/unified_sql/src/main.cc @@ -101,9 +101,8 @@ void broker_module_init(void const* arg) { /* Let's register the rebuild_metrics bbdo event. This is needed to send * the rebuild message from the gRPC interface. */ - e.register_event(make_type(io::bbdo, bbdo::de_rebuild_rrd_graphs), - "rebuild_metrics", - &bbdo::pb_rebuild_rrd_graphs::operations); + e.register_event(make_type(io::bbdo, bbdo::de_rebuild_graphs), + "rebuild_metrics", &bbdo::pb_rebuild_graphs::operations); /* Let's register the message to start rebuilds, send rebuilds and * terminate rebuilds. This is pb_rebuild_message. */ diff --git a/broker/unified_sql/src/rebuilder.cc b/broker/unified_sql/src/rebuilder.cc index bd7ed0c2144..da595278110 100644 --- a/broker/unified_sql/src/rebuilder.cc +++ b/broker/unified_sql/src/rebuilder.cc @@ -58,13 +58,13 @@ rebuilder::rebuilder(const database_config& db_cfg, * * @param d The BBDO message with all the metric ids to rebuild. */ -void rebuilder::rebuild_rrd_graphs(const std::shared_ptr& d) { +void rebuilder::rebuild_graphs(const std::shared_ptr& d) { asio::post(pool::io_context(), [this, data = d] { - const bbdo::pb_rebuild_rrd_graphs& ids = - *static_cast(data.get()); + const bbdo::pb_rebuild_graphs& ids = + *static_cast(data.get()); std::string ids_str{ - fmt::format("{}", fmt::join(ids.obj().index_id(), ","))}; + fmt::format("{}", fmt::join(ids.obj().index_ids(), ","))}; log_v2::sql()->debug( "Metric rebuild: Rebuild metrics event received for metrics ({})", ids_str); diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index a97d921e7a6..9c285010ec9 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -117,7 +117,6 @@ stream::stream(const database_config& dbcfg, _next_insert_perfdatas{std::time_t(nullptr) + 10}, _next_update_metrics{std::time_t(nullptr) + 10}, _next_loop_timeout{std::time_t(nullptr) + _loop_timeout}, - _timer{pool::io_context()}, _queues_timer{pool::io_context()}, _stop_check_queues{false}, _check_queues_stopped{false}, @@ -139,9 +138,6 @@ stream::stream(const database_config& dbcfg, log_v2::sql()->error("error while loading caches: {}", e.what()); throw; } - _timer.expires_after(std::chrono::minutes(5)); - _timer.async_wait( - std::bind(&stream::_check_deleted_index, this, std::placeholders::_1)); _queues_timer.expires_after(std::chrono::seconds(queue_timer_duration)); _queues_timer.async_wait( std::bind(&stream::_check_queues, this, std::placeholders::_1)); @@ -150,8 +146,8 @@ stream::stream(const database_config& dbcfg, stream::~stream() noexcept { std::promise p; - asio::post(_timer.get_executor(), [this, &p] { - _timer.cancel(); + asio::post(_queues_timer.get_executor(), [this, &p] { + _queues_timer.cancel(); p.set_value(); }); p.get_future().wait(); @@ -532,8 +528,8 @@ int32_t stream::write(const std::shared_ptr& data) { uint16_t elem = element_of_type(type); if (cat == io::neb) { (this->*(_neb_processing_table[elem]))(data); - } else if (type == make_type(io::bbdo, bbdo::de_rebuild_rrd_graphs)) - _rebuilder.rebuild_rrd_graphs(data); + } else if (type == make_type(io::bbdo, bbdo::de_rebuild_graphs)) + _rebuilder.rebuild_graphs(data); else if (type == make_type(io::bbdo, bbdo::de_remove_graphs)) remove_graphs(data); else { @@ -619,10 +615,11 @@ int32_t stream::stop() { * @param d The BBDO message with all the metrics/indexes to remove. */ void stream::remove_graphs(const std::shared_ptr& d) { + log_v2::sql()->info("remove graphs call"); asio::post(pool::instance().io_context(), [this, data = d] { mysql ms(_dbcfg); - const bbdo::pb_remove_graphs& ids = - *static_cast(data.get()); + bbdo::pb_remove_graphs* ids = + static_cast(data.get()); std::promise promise; std::future future = promise.get_future(); @@ -630,13 +627,13 @@ void stream::remove_graphs(const std::shared_ptr& d) { std::set indexes_to_delete; std::set metrics_to_delete; try { - if (!ids.obj().index_ids().empty()) { - ms.run_query_and_get_result( + if (!ids->obj().index_ids().empty()) { + std::string query{ fmt::format("SELECT i.id,m.metric_id, m.metric_name,i.host_id," "i.service_id FROM index_data i LEFT JOIN metrics m ON " "i.id=m.index_id WHERE i.id IN ({})", - fmt::join(ids.obj().index_ids(), ",")), - std::move(promise), conn); + fmt::join(ids->obj().index_ids(), ","))}; + ms.run_query_and_get_result(query, std::move(promise), conn); database::mysql_result res(future.get()); std::lock_guard lock(_metric_cache_m); @@ -651,15 +648,15 @@ void stream::remove_graphs(const std::shared_ptr& d) { } } - if (!ids.obj().metric_ids().empty()) { + if (!ids->obj().metric_ids().empty()) { promise = std::promise(); std::future future = promise.get_future(); - ms.run_query_and_get_result( + std::string query{ fmt::format("SELECT index_id,metric_id,metric_name FROM metrics " "WHERE metric_id IN ({})", - fmt::join(ids.obj().metric_ids(), ",")), - std::move(promise), conn); + fmt::join(ids->obj().metric_ids(), ","))}; + ms.run_query_and_get_result(query, std::move(promise), conn); database::mysql_result res(future.get()); std::lock_guard lock(_metric_cache_m); @@ -696,6 +693,9 @@ void stream::remove_graphs(const std::shared_ptr& d) { rmg->mut_obj().add_metric_ids(i); for (uint64_t i : indexes_to_delete) rmg->mut_obj().add_index_ids(i); + log_v2::sql()->info( + "publishing pb remove graph with {} metrics and {} indexes", + metrics_to_delete.size(), indexes_to_delete.size()); multiplexing::publisher().write(rmg); } else log_v2::sql()->info( @@ -703,3 +703,9 @@ void stream::remove_graphs(const std::shared_ptr& d) { mids_str, ids_str); }); } + +void stream::update() { + log_v2::sql()->info("unified_sql stream update"); + _check_deleted_index(); + _check_rebuild_index(); +} diff --git a/broker/unified_sql/src/stream_storage.cc b/broker/unified_sql/src/stream_storage.cc index a3a636f5f16..e2e81fcf879 100644 --- a/broker/unified_sql/src/stream_storage.cc +++ b/broker/unified_sql/src/stream_storage.cc @@ -35,6 +35,7 @@ #include "com/centreon/broker/misc/shared_mutex.hh" #include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/events.hh" +#include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -894,8 +895,8 @@ void stream::_check_queues(asio::error_code ec) { duration = 1; if (!_stop_check_queues) { - _timer.expires_after(std::chrono::seconds(duration)); - _timer.async_wait( + _queues_timer.expires_after(std::chrono::seconds(duration)); + _queues_timer.async_wait( std::bind(&stream::_check_queues, this, std::placeholders::_1)); } else { log_v2::sql()->info("SQL: check_queues correctly interrupted."); @@ -904,105 +905,91 @@ void stream::_check_queues(asio::error_code ec) { } } } + /** * Check for deleted index. */ -void stream::_check_deleted_index(asio::error_code ec) { - if (ec) - log_v2::sql()->info( - "unified_sql: the check for deleted indices encountered an error: {}", - ec.message()); - else { - // Info. - log_v2::sql()->info("unified_sql: starting DB cleanup"); - uint32_t deleted_index(0); - uint32_t deleted_metrics(0); - - // Fetch next index to delete. - { - std::promise promise; - std::future future = promise.get_future(); - int32_t conn = _mysql.choose_best_connection(-1); - std::set index_to_delete; - std::set metrics_to_delete; - try { - _mysql.run_query_and_get_result( - "SELECT m.index_id,m.metric_id, m.metric_name, i.host_id, " - "i.service_id FROM metrics m LEFT JOIN index_data i ON " - "i.id=m.index_id WHERE i.to_delete=1", - std::move(promise), conn); - database::mysql_result res(future.get()); - - std::lock_guard lock(_metric_cache_m); - while (_mysql.fetch_row(res)) { - index_to_delete.insert(res.value_as_u64(0)); - metrics_to_delete.insert(res.value_as_u64(1)); - _metric_cache.erase({res.value_as_u64(0), res.value_as_str(2)}); - _index_cache.erase({res.value_as_u32(3), res.value_as_u32(4)}); - } - std::promise promise_metrics; - std::future future_metrics = - promise_metrics.get_future(); - _mysql.run_query_and_get_result( - "SELECT metric_id, metric_name FROM metrics WHERE to_delete=1", - std::move(promise_metrics), conn); - res = future_metrics.get(); - - while (_mysql.fetch_row(res)) { - metrics_to_delete.insert(res.value_as_u64(0)); - _metric_cache.erase({res.value_as_u64(0), res.value_as_str(1)}); - } - } catch (const std::exception& e) { - log_v2::sql()->error( - "could not query index / metrics table(s) to get index to delete: " - "{} ", - e.what()); - } - - // Delete metrics. +void stream::_check_deleted_index() { + // Info. + log_v2::sql()->info("unified_sql: starting DB cleanup"); + + std::promise promise; + std::future future = promise.get_future(); + int32_t conn = _mysql.choose_best_connection(-1); + std::set index_to_delete; + std::set metrics_to_delete; + try { + _mysql.run_query_and_get_result( + "SELECT id FROM index_data WHERE to_delete=1", std::move(promise), + conn); + database::mysql_result res(future.get()); + + while (_mysql.fetch_row(res)) { + index_to_delete.insert(res.value_as_u64(0)); + } - std::string query; - std::string err_msg; - for (int64_t i : metrics_to_delete) { - query = fmt::format("DELETE FROM metrics WHERE metric_id={}", i); - _mysql.run_query(query, database::mysql_error::delete_metric, false, - conn); - _add_action(conn, actions::metrics); + std::promise promise_metrics; + std::future future_metrics = + promise_metrics.get_future(); + _mysql.run_query_and_get_result( + "SELECT metric_id FROM metrics WHERE to_delete=1", + std::move(promise_metrics), conn); + res = future_metrics.get(); - // Remove associated graph. - std::shared_ptr rg{ - std::make_shared(i, false)}; - multiplexing::publisher().write(rg); + while (_mysql.fetch_row(res)) { + metrics_to_delete.insert(res.value_as_u64(0)); + } + } catch (const std::exception& e) { + log_v2::sql()->error( + "could not query index / metrics table(s) to get index to delete: " + "{} ", + e.what()); + } - { - std::lock_guard lck(_queues_m); - _metrics.erase(i); - } - log_v2::perfdata()->debug("metrics erasing metric_id = {}", i); - deleted_metrics++; - } + log_v2::sql()->info("Something to remove?"); + if (!metrics_to_delete.empty() || !index_to_delete.empty()) { + log_v2::sql()->info("YES!!!"); + auto rg = std::make_shared(); + auto& obj = rg->mut_obj(); + for (auto& m : metrics_to_delete) + obj.add_metric_ids(m); + for (auto& i : index_to_delete) + obj.add_index_ids(i); + remove_graphs(rg); + } +} - // Delete index from DB. - for (int64_t i : index_to_delete) { - query = fmt::format("DELETE FROM index_data WHERE id={}", i); - _mysql.run_query(query, database::mysql_error::delete_index, false, - conn); - _add_action(conn, actions::index_data); - - // Remove associated graph. - std::shared_ptr rg{ - std::make_shared(i, true)}; - multiplexing::publisher().write(rg); - deleted_index++; - } +/** + * Check for indexes to rebuild. + */ +void stream::_check_rebuild_index() { + // Fetch next index to delete. + std::promise promise; + std::future future = promise.get_future(); + int32_t conn = _mysql.choose_best_connection(-1); + std::set index_to_rebuild; + try { + _mysql.run_query_and_get_result( + "SELECT id FROM index_data WHERE must_be_rebuild='1'", + std::move(promise), conn); + database::mysql_result res(future.get()); + + while (_mysql.fetch_row(res)) { + index_to_rebuild.insert(res.value_as_u64(0)); } - // End. - log_v2::perfdata()->info( - "unified_sql: end of DB cleanup: {} metrics and {} indices removed", - deleted_metrics, deleted_index); - _timer.expires_after(std::chrono::minutes(5)); - _timer.async_wait( - std::bind(&stream::_check_deleted_index, this, std::placeholders::_1)); + } catch (const std::exception& e) { + log_v2::sql()->error( + "could not query indexes table to get indexes to delete: {}", e.what()); + } + + log_v2::sql()->info("Something to rebuild?"); + if (!index_to_rebuild.empty()) { + log_v2::sql()->info("YES!!!"); + auto rg = std::make_shared(); + auto& obj = rg->mut_obj(); + for (auto& i : index_to_rebuild) + obj.add_index_ids(i); + _rebuilder.rebuild_graphs(rg); } } diff --git a/ccc/CMakeLists.txt b/ccc/CMakeLists.txt new file mode 100644 index 00000000000..c1b7fc73212 --- /dev/null +++ b/ccc/CMakeLists.txt @@ -0,0 +1,55 @@ +## +## Copyright 2022 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +# +# Global settings. +# + +# Set necessary settings. +project("Centreon Collect Client" C CXX) + +# set -latomic if OS is Raspbian. +if (CMAKE_SYSTEM_PROCESSOR MATCHES "arm") + set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -latomic") +endif () + +add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") + +option(WITH_LIBCXX "compiles and link cbd with clang++/libc++") +if (WITH_LIBCXX) + set(CMAKE_CXX_COMPILER "clang++") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") +# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -Werror -O1 -fno-omit-frame-pointer") +endif () + +include_directories( + ${CMAKE_SOURCE_DIR}/broker/core/src + ${CMAKE_SOURCE_DIR}/engine/enginerpc + ) +set(ccc_files + main.cc + client.cc + ) + +add_executable(ccc ${ccc_files}) +target_link_libraries(ccc CONAN_PKG::grpc cerpc berpc CONAN_PKG::abseil CONAN_PKG::fmt) +set_target_properties(ccc PROPERTIES COMPILE_FLAGS "-fPIC") + +install(TARGETS ccc + RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}" + ) diff --git a/ccc/client.cc b/ccc/client.cc new file mode 100644 index 00000000000..7fef74b7bd7 --- /dev/null +++ b/ccc/client.cc @@ -0,0 +1,356 @@ +/* +** Copyright 2022 Centreon +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +** +** For more information : contact@centreon.com +*/ + +#include "client.hh" +#include +#include +#include +#include +#include +#include +#include +#include "broker/core/src/broker.grpc.pb.h" +#include "com/centreon/exceptions/msg_fmt.hh" +#include "engine.grpc.pb.h" + +using namespace com::centreon::ccc; + +/** + * @brief Constructor + * + * @param channel The channel to use to the gRPC server. + * @param color_enabled A boolean telling if we should use colors or not. + */ +client::client(std::shared_ptr channel, bool color_enabled) + : _stub{std::make_unique(channel)}, + _server{CCC_NONE}, + _color_enabled{color_enabled} { + const ::google::protobuf::Empty e; + com::centreon::broker::Version broker_v; + com::centreon::engine::Version engine_v; + grpc::ByteBuffer request_buf; + + std::array service_name{"com.centreon.broker.Broker", + "com.centreon.engine.Engine"}; + + for (auto& sn : service_name) { + grpc::ClientContext context; + + bool own_buffer = false; + grpc::Status status = grpc::GenericSerialize( + e, &request_buf, &own_buffer); + auto resp = _stub->PrepareUnaryCall( + &context, absl::StrFormat("/%s/GetVersion", sn), request_buf, &_cq); + resp->StartCall(); + grpc::ByteBuffer resp_buf; + resp->Finish(&resp_buf, &status, reinterpret_cast(1)); + + google::protobuf::DynamicMessageFactory factory; + + void* tag; + bool ok = false; + _cq.Next(&tag, &ok); + grpc::ProtoBufferReader reader(&resp_buf); + + if (sn == "com.centreon.broker.Broker") { + if (broker_v.ParseFromZeroCopyStream(&reader)) { + std::string output_str; + google::protobuf::TextFormat::PrintToString(broker_v, &output_str); + if (!output_str.empty()) { + std::cerr << "Connected to a Centreon Broker " + << absl::StrFormat("%02d.%02d.%d gRPC server", + broker_v.major(), broker_v.minor(), + broker_v.patch()) + << std::endl; + _server = CCC_BROKER; + break; + } + } + } else if (sn == "com.centreon.engine.Engine") { + if (engine_v.ParseFromZeroCopyStream(&reader)) { + std::string output_str; + google::protobuf::TextFormat::PrintToString(engine_v, &output_str); + if (!output_str.empty()) { + std::cerr << "Connected to a Centreon Engine " + << absl::StrFormat("%02d.%02d.%d gRPC server", + engine_v.major(), engine_v.minor(), + engine_v.patch()) + << std::endl; + _server = CCC_ENGINE; + break; + } + } + } + } + if (_server == CCC_NONE) + throw com::centreon::exceptions::msg_fmt( + "Cannot connect to a Centreon Broker/Engine gRPC server"); +} + +std::list client::methods() const { + std::list retval; + const google::protobuf::DescriptorPool* p = + google::protobuf::DescriptorPool::generated_pool(); + const google::protobuf::ServiceDescriptor* service_descriptor; + switch (_server) { + case CCC_BROKER: + service_descriptor = p->FindServiceByName("com.centreon.broker.Broker"); + break; + case CCC_ENGINE: + service_descriptor = p->FindServiceByName("com.centreon.engine.Engine"); + break; + default: + // Should not occur + assert(1 == 0); + } + size_t size = service_descriptor->method_count(); + for (uint32_t i = 0; i < size; i++) { + const google::protobuf::MethodDescriptor* method = + service_descriptor->method(i); + const google::protobuf::Descriptor* input_message = method->input_type(); + const google::protobuf::Descriptor* output_message = method->output_type(); + retval.emplace_back(absl::StrFormat( + "%s%s%s(%s%s%s) -> %s%s%s", color(_color_enabled), + method->name(), color(_color_enabled), + color(_color_enabled), input_message->name(), + color(_color_enabled), + color(_color_enabled), output_message->name(), + color(_color_enabled))); + } + return retval; +} + +/** + * @brief Call the gRPC method cmd with the given arguments as a JSON string. + * + * @param cmd The name of the method to call. + * @param args The arguments as a JSON string. + * + * @return A JSON string with the output message. + */ +std::string client::call(const std::string& cmd, const std::string& args) { + const google::protobuf::DescriptorPool* p = + google::protobuf::DescriptorPool::generated_pool(); + const google::protobuf::ServiceDescriptor* service_descriptor; + std::string cmd_str; + switch (_server) { + case CCC_BROKER: + service_descriptor = p->FindServiceByName("com.centreon.broker.Broker"); + cmd_str = absl::StrFormat("/com.centreon.broker.Broker/%s", cmd); + break; + case CCC_ENGINE: + service_descriptor = p->FindServiceByName("com.centreon.engine.Engine"); + cmd_str = absl::StrFormat("/com.centreon.engine.Engine/%s", cmd); + break; + default: + // Should not occur + assert(1 == 0); + } + auto method = service_descriptor->FindMethodByName(cmd); + if (method == nullptr) + throw com::centreon::exceptions::msg_fmt("The command '{}' doesn't exist", + cmd_str); + + const google::protobuf::Descriptor* input_desc = method->input_type(); + google::protobuf::DynamicMessageFactory factory; + google::protobuf::Message* input_message = + factory.GetPrototype(input_desc)->New(); + google::protobuf::util::JsonParseOptions options; + options.ignore_unknown_fields = false; + options.case_insensitive_enum_parsing = true; + google::protobuf::util::Status status = + google::protobuf::util::JsonStringToMessage(args, input_message, options); + + if (status.code() != google::protobuf::util::status_internal::StatusCode::kOk) + throw com::centreon::exceptions::msg_fmt( + "Error during the execution of '{}' method: {}", cmd_str, + status.ToString()); + + grpc::ByteBuffer request_buf; + bool own_buffer = false; + grpc::ClientContext context; + grpc::Status status_res = grpc::GenericSerialize( + *input_message, &request_buf, &own_buffer); + auto resp = _stub->PrepareUnaryCall(&context, cmd_str, request_buf, &_cq); + resp->StartCall(); + grpc::ByteBuffer resp_buf; + resp->Finish(&resp_buf, &status_res, reinterpret_cast(1)); + + void* tag; + bool ok = false; + _cq.Next(&tag, &ok); + grpc::ProtoBufferReader reader(&resp_buf); + + const google::protobuf::Descriptor* output_desc = method->output_type(); + google::protobuf::Message* output_message = + factory.GetPrototype(output_desc)->New(); + + if (output_message->ParseFromZeroCopyStream(&reader)) { + std::string retval; + google::protobuf::util::JsonPrintOptions json_options; + json_options.add_whitespace = true; + auto status = google::protobuf::util::MessageToJsonString( + *output_message, &retval, json_options); + + return retval; + } + return ""; +} + +/** + * @brief Return the description of a message as a list of strings. This is + * needed because this function is recursive. This function should only be + * used by the info_method() method. + * + * @param desc The descriptor of the message + * @param output The output list. + * @param color_enabled Should we use ASCII colors or not? + * @param level the indentation to apply to each line. + */ +static void message_description(const google::protobuf::Descriptor* desc, + std::list& output, + bool color_enabled, + size_t level) { + std::string tab(level, static_cast(' ')); + bool one_of = false; + std::string one_of_name; + for (int i = 0; i < desc->field_count(); i++) { + auto f = desc->field(i); + + auto oof = f->containing_oneof(); + if (!one_of && oof) { + output.emplace_back(absl::StrFormat( + "%s%soneof%s \"%s%s%s\" {", tab, color(color_enabled), + color(color_enabled), color(color_enabled), + oof->name(), color(color_enabled))); + level += 2; + tab += " "; + one_of = true; + } else if (one_of && !oof) { + level -= 2; + tab.resize(tab.size() - 2); + output.emplace_back(absl::StrFormat("%s}", tab)); + one_of = false; + } + + const std::string& entry_name = f->name(); + std::string value; + switch (f->type()) { + case google::protobuf::FieldDescriptor::TYPE_BOOL: + value = "bool"; + break; + case google::protobuf::FieldDescriptor::TYPE_DOUBLE: + value = "double"; + break; + case google::protobuf::FieldDescriptor::TYPE_INT32: + value = "int32"; + break; + case google::protobuf::FieldDescriptor::TYPE_UINT32: + value = "uint32"; + break; + case google::protobuf::FieldDescriptor::TYPE_INT64: + value = "int64"; + break; + case google::protobuf::FieldDescriptor::TYPE_UINT64: + value = "uint64"; + break; + case google::protobuf::FieldDescriptor::TYPE_ENUM: { + output.emplace_back( + absl::StrFormat("%senum \"%s\": {", tab, entry_name)); + auto t_enum = f->enum_type(); + for (int i = 0; i < t_enum->value_count(); i++) { + auto v = t_enum->value(i); + output.emplace_back(absl::StrFormat(" %s%s", tab, v->name())); + } + output.emplace_back(absl::StrFormat("%s}", tab)); + continue; + } break; + case google::protobuf::FieldDescriptor::TYPE_STRING: + value = "string"; + break; + case google::protobuf::FieldDescriptor::TYPE_MESSAGE: + output.emplace_back(absl::StrFormat( + "%s\"%s%s%s\": {", tab, color(color_enabled), + entry_name, color(color_enabled))); + message_description(f->message_type(), output, color_enabled, + level + 2); + output.emplace_back(absl::StrFormat("%s}", tab)); + continue; + break; + default: + throw com::centreon::exceptions::msg_fmt("{} not implemented", + f->type()); + break; + } + if (f->is_repeated()) + output.emplace_back(absl::StrFormat( + "%s\"%s%s%s\": [%s%s%s]", tab, color(color_enabled), + entry_name, color(color_enabled), + color(color_enabled), value, + color(color_enabled))); + else + output.emplace_back(absl::StrFormat( + "%s\"%s%s%s\": %s%s%s", tab, color(color_enabled), + entry_name, color(color_enabled), + color(color_enabled), value, + color(color_enabled))); + } +} + +/** + * @brief Return some information about the gRPC method given as a string. + * + * @param cmd The gRPC method to describe. + * + * @return A string with the method informations. + */ +std::string client::info_method(const std::string& cmd) const { + std::string retval; + const google::protobuf::DescriptorPool* p = + google::protobuf::DescriptorPool::generated_pool(); + const google::protobuf::ServiceDescriptor* service_descriptor; + std::string cmd_str; + switch (_server) { + case CCC_BROKER: + service_descriptor = p->FindServiceByName("com.centreon.broker.Broker"); + cmd_str = absl::StrFormat("/com.centreon.broker.Broker/%s", cmd); + break; + case CCC_ENGINE: + service_descriptor = p->FindServiceByName("com.centreon.engine.Engine"); + cmd_str = absl::StrFormat("/com.centreon.engine.Engine/%s", cmd); + break; + default: + // Should not occur + assert(1 == 0); + } + auto method = service_descriptor->FindMethodByName(cmd); + if (method == nullptr) + throw com::centreon::exceptions::msg_fmt("The command '{}' doesn't exist", + cmd_str); + + const google::protobuf::Descriptor* input_message = method->input_type(); + std::list message{absl::StrFormat( + "\"%s%s%s\": {", color(_color_enabled), input_message->name(), + color(_color_enabled))}; + message_description(input_message, message, _color_enabled, 2); + message.push_back("}"); + retval = absl::StrJoin(message, "\n"); + return retval; +} diff --git a/ccc/client.hh b/ccc/client.hh new file mode 100644 index 00000000000..ce1132ab961 --- /dev/null +++ b/ccc/client.hh @@ -0,0 +1,62 @@ +/* +** Copyright 2022 Centreon +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +** +** For more information : contact@centreon.com +*/ + +#ifndef _CCC_CLIENT_HH +#define _CCC_CLIENT_HH +#include +#include +#include +#include + +namespace com { +namespace centreon { +namespace ccc { +constexpr const char color_green[] = "\u001b[32;1m"; +constexpr const char color_blue[] = "\u001b[34;1m"; +constexpr const char color_reset[] = "\u001b[0m"; +constexpr const char color_red[] = "\u001b[31;1m"; +constexpr const char color_yellow[] = "\u001b[33;1m"; +constexpr const char color_message[] = "\u001b[32;1m"; +constexpr const char color_method[] = "\u001b[34;1m"; +constexpr const char color_error[] = "\u001b[31;1m"; + +template +const char* color(bool enabled) { + if (enabled) + return C; + else + return ""; +} + +class client { + enum type { CCC_NONE, CCC_BROKER, CCC_ENGINE }; + std::unique_ptr _stub; + type _server; + bool _color_enabled; + grpc::CompletionQueue _cq; + + public: + client(std::shared_ptr channel, bool color_enabled = true); + std::list methods() const; + std::string call(const std::string& cmd, const std::string& args); + std::string info_method(const std::string& cmd) const; +}; +} // namespace ccc +} // namespace centreon +} // namespace com +#endif diff --git a/ccc/main.cc b/ccc/main.cc new file mode 100644 index 00000000000..814eaa74075 --- /dev/null +++ b/ccc/main.cc @@ -0,0 +1,165 @@ +/* +** Copyright 2022 Centreon +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +** +** For more information : contact@centreon.com +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "client.hh" + +using namespace nlohmann; +using namespace com::centreon::ccc; + +static struct option long_options[] = { + {"version", no_argument, 0, 'v'}, {"help", no_argument, 0, 'h'}, + {"port", required_argument, 0, 'p'}, {"list", no_argument, 0, 'l'}, + {"nocolor", no_argument, 0, 'n'}, {0, 0, 0, 0}}; + +static void usage(bool color_enabled) { + std::cout << color(color_enabled) + << "Use: " << color(color_enabled) + << "ccc [OPTIONS...] [COMMANDS]\n" + "'ccc' uses centreon-broker or centreon-engine gRPC api " + "to communicate with them\n" + "\n" + << color(color_enabled) + << "Options:" << color(color_enabled) + << "\n" + " -v, --version\n" + " Displays the version of ccc.\n" + " -h, --help [COMMAND]\n" + " Displays a general help or a help message on the command.\n" + " -p, --port \n" + " Specifies the gRPC server port to connect to.\n" + " -l, --list\n" + " Displays the available methods.\n" + " -n, --nocolor\n" + " Outputs are displayed with the current color.\n" + "\n" + << color(color_enabled) + << "Examples:" << color(color_enabled) + << "\n" + " ccc -p 51001 --list # Lists available functions " + "from gRPC interface at port 51000\n" + " ccc -p 51001 GetVersion{} # Calls the GetVersion method.\n"; +} + +int main(int argc, char** argv) { + int option_index = 0; + int opt; + int port = 0; + + bool list = false; + bool help = false; + bool color_enabled = true; + + while ((opt = getopt_long(argc, argv, "vhnp:l", long_options, + &option_index)) != -1) { + switch (opt) { + case 'v': + std::cout << "ccc " << CENTREON_CONNECTOR_VERSION << "\n"; + exit(0); + break; + case 'h': + help = true; + break; + case 'n': + color_enabled = false; + break; + case 'p': + if (!absl::SimpleAtoi(optarg, &port)) { + std::cerr << "The option -p expects a port number (ie a positive " + "integer)\n"; + exit(1); + } + break; + case 'l': + list = true; + break; + default: + std::cerr << "Unrecognized argument '" << opt << "'" << std::endl; + exit(3); + } + } + + if (help && optind == argc) { + usage(color_enabled); + exit(0); + } + + if (port == 0) { + std::cerr << "You must specify a port for the connection to the gRPC server" + << std::endl; + exit(2); + } + std::string url{absl::StrFormat("127.0.0.1:%d", port)}; + std::shared_ptr channel = + grpc::CreateChannel(url, grpc::InsecureChannelCredentials()); + + try { + client clt(channel, color_enabled); + if (help) { + std::string message{clt.info_method(argv[optind])}; + std::cout << "Input message for this function:\n" << message << std::endl; + exit(0); + } else if (list) { + if (optind < argc) { + std::cerr << "\n" + << color(color_enabled) + << "Error: " << color(color_enabled) + << "The list argument expects no command.\n" + << std::endl; + usage(color_enabled); + exit(4); + } + auto methods{clt.methods()}; + + for (auto& m : methods) + std::cout << " * " << m << std::endl; + } else { + for (int i = optind; i < argc; i++) { + absl::string_view full_cmd{argv[i]}; + size_t first = full_cmd.find_first_not_of(" \t"); + size_t last = full_cmd.find_first_of(" \t\n{(", first); + std::string cmd; + std::string args; + if (last == std::string::npos) + cmd = std::string(full_cmd); + else { + cmd = std::string(full_cmd.substr(first, last)); + args = std::string(full_cmd.substr(last)); + } + std::string res = clt.call(cmd, args); + std::cout << res << std::endl; + } + } + } catch (const std::exception& e) { + std::cerr << color(color_enabled) + << "Error: " << color(color_enabled) << e.what() + << std::endl; + exit(1); + } + + return 0; +} diff --git a/ci/debian/centreon-engine.postinst b/ci/debian/centreon-engine.postinst index 9cdbc14c15e..b717efd54bc 100755 --- a/ci/debian/centreon-engine.postinst +++ b/ci/debian/centreon-engine.postinst @@ -2,7 +2,7 @@ if [ "$1" = "configure" ] ; then if [ ! "$(getent passwd centreon-engine)" ]; then - adduser --system --group --home /var/lib/centreon-engine --no-create-home centreon-engine + adduser --system --group --home /var/lib/centreon-engine --shell /bin/bash --no-create-home centreon-engine fi if [ "$(getent passwd centreon)" ]; then usermod -a -G centreon-engine centreon diff --git a/ci/debian/control b/ci/debian/control index c4ed6cc77cb..f00ec96efca 100644 --- a/ci/debian/control +++ b/ci/debian/control @@ -159,3 +159,12 @@ Depends: ${shlibs:Depends} Description: This module of Centreon Broker allows you to write performance data generated by plugins (run themselves by Centreon Engine) to a Graphite database. + +Package: centreon-collect-client +Architecture: any +Depends: + centreon-broker (>= ${centreon:version}~), + centreon-engine (>= ${centreon:version}~), + ${misc:Depends}, + ${shlibs:Depends} +Description: gRPC client to connect to Centreon Broker or Centreon Engine. diff --git a/ci/docker/Dockerfile.collect-centos7-dependencies b/ci/docker/Dockerfile.collect-centos7-dependencies index 4d51cc69bed..3ccdd753a2f 100644 --- a/ci/docker/Dockerfile.collect-centos7-dependencies +++ b/ci/docker/Dockerfile.collect-centos7-dependencies @@ -48,12 +48,10 @@ RUN yum -y install devtoolset-9 \ unzip \ ShellCheck RUN ln -s /usr/bin/cmake3 /usr/bin/cmake -RUN pip3 install conan --prefix=/usr --upgrade -RUN rm -rf ~/.conan/profiles/default COPY conanfile.txt . RUN cat conanfile.txt RUN source /opt/rh/devtoolset-9/enable && source /opt/rh/rh-python38/enable && \ - conan install . -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build='*' + pip3 install conan --upgrade && conan install . -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build='*' RUN unzip -q sonar-scanner-cli-4.7.0.2747-linux.zip RUN rm -rf sonar-scanner-cli-4.7.0.2747-linux.zip diff --git a/ci/docker/Dockerfile.collect-debian11-dependencies b/ci/docker/Dockerfile.collect-debian11-dependencies index a6227f8cd93..6132883ee1d 100644 --- a/ci/docker/Dockerfile.collect-debian11-dependencies +++ b/ci/docker/Dockerfile.collect-debian11-dependencies @@ -1,6 +1,6 @@ FROM debian:bullseye RUN apt-get -y update && \ - apt-get -y install cmake \ + apt-get -y install cmake \ curl \ gcc \ g++ \ @@ -36,5 +36,3 @@ COPY conanfile.txt . RUN cat conanfile.txt RUN conan install . -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build='missing' WORKDIR /src - - diff --git a/ci/release/Jenkinsfile b/ci/release/Jenkinsfile index 0dcd36c7019..e862410e553 100644 --- a/ci/release/Jenkinsfile +++ b/ci/release/Jenkinsfile @@ -15,7 +15,7 @@ pipeline { echo 'Releasing Centreon Collect' loadCommonScripts() withCredentials([string(credentialsId: 'download-token', variable: 'DOWNLOAD_TOKEN')]) { - sh './ci/release/collect-release.sh' "$DOWNLOAD_TOKEN" + sh './ci/release/collect-release.sh "$DOWNLOAD_TOKEN"' } } } diff --git a/ci/scripts/collect-sources-analysis.sh b/ci/scripts/collect-sources-analysis.sh index 6771ce0548f..41211031653 100755 --- a/ci/scripts/collect-sources-analysis.sh +++ b/ci/scripts/collect-sources-analysis.sh @@ -22,6 +22,7 @@ fi DISTRIB=$( lsb_release -rs | cut -f1 -d. ) if [[ "$DISTRIB" = "7" ]] ; then source /opt/rh/devtoolset-9/enable + source /opt/rh/rh-python38/enable fi # Prepare compilation diff --git a/ci/scripts/collect-test-robot.sh b/ci/scripts/collect-test-robot.sh index 1704f8c0c08..6c7042acb11 100755 --- a/ci/scripts/collect-test-robot.sh +++ b/ci/scripts/collect-test-robot.sh @@ -33,12 +33,15 @@ unzip artifact.zip yum install -y https://yum.centreon.com/standard/22.04/el7/stable/noarch/RPMS/centreon-release-22.04-3.el7.centos.noarch.rpm yum install -y centreon-common cd artifacts -rm -rf centreon-collect-* -rpm -i centreon*.el7.x86_64.rpm +rpm -i centreon-broker*.el7.x86_64.rpm \ + centreon-clib*.el7.x86_64.rpm \ + centreon-engine*.el7.x86_64.rpm \ + centreon-connector*.el7.x86_64.rpm \ + centreon-collect-client*.el7.x86_64.rpm echo "########################### install robot framework ############################" cd /src/tests/ -pip3 install -U robotframework robotframework-databaselibrary pymysql +pip3 install -U robotframework robotframework-databaselibrary pymysql python-dateutil yum install "Development Tools" python3-devel -y diff --git a/ci/scripts/collect-unit-tests.sh b/ci/scripts/collect-unit-tests.sh index 926161c6317..e99c2c3449b 100755 --- a/ci/scripts/collect-unit-tests.sh +++ b/ci/scripts/collect-unit-tests.sh @@ -8,11 +8,12 @@ cd /src/build/ DISTRIB=$(lsb_release -rs | cut -f1 -d.) if [ "$DISTRIB" = "7" ] ; then source /opt/rh/devtoolset-9/enable -fi + source /opt/rh/rh-python38/enable +fi conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing if [ $(cat /etc/issue | awk '{print $1}') = "Debian" ] ; then CXXFLAGS="-Wall -Wextra" cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On .. -else +else CXXFLAGS="-Wall -Wextra" cmake3 -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On .. fi @@ -26,4 +27,4 @@ tests/ut_broker --gtest_output=xml:ut_broker.xml tests/ut_engine --gtest_output=xml:ut_engine.xml tests/ut_clib --gtest_output=xml:ut_clib.xml tests/ut_connector --gtest_output=xml:ut_connector.xml -echo "---------------------------------------------------------- end of ut tests ------------------------------------------------" \ No newline at end of file +echo "---------------------------------------------------------- end of ut tests ------------------------------------------------" diff --git a/broker/neb/inc/com/centreon/exceptions/msg_fmt.hh b/clib/inc/com/centreon/exceptions/msg_fmt.hh similarity index 100% rename from broker/neb/inc/com/centreon/exceptions/msg_fmt.hh rename to clib/inc/com/centreon/exceptions/msg_fmt.hh diff --git a/clib/inc/com/centreon/timestamp.hh b/clib/inc/com/centreon/timestamp.hh index f1e6382d281..64e03272482 100644 --- a/clib/inc/com/centreon/timestamp.hh +++ b/clib/inc/com/centreon/timestamp.hh @@ -21,6 +21,7 @@ #include #include +#include #include "com/centreon/namespace.hh" CC_BEGIN() @@ -39,6 +40,8 @@ class timestamp { public: timestamp(time_t secs = 0, int32_t usecs = 0); timestamp(const timestamp& right); + timestamp(const struct timeval& right) + : _secs(right.tv_sec), _usecs(right.tv_usec) {} ~timestamp() noexcept = default; timestamp& operator=(const timestamp& right); bool operator==(const timestamp& right) const noexcept; @@ -66,6 +69,8 @@ class timestamp { int64_t to_useconds() const noexcept; }; +std::ostream& operator<<(std::ostream& s, const timestamp& to_dump); + CC_END() #endif // !CC_TIMESTAMP_HH diff --git a/clib/src/timestamp.cc b/clib/src/timestamp.cc index 7dc7d002fed..88a771b8f6b 100644 --- a/clib/src/timestamp.cc +++ b/clib/src/timestamp.cc @@ -319,3 +319,18 @@ time_t timestamp::to_seconds() const noexcept { int64_t timestamp::to_useconds() const noexcept { return _secs * 1000000ll + _usecs; } + +CC_BEGIN() + +std::ostream& operator<<(std::ostream& s, const timestamp& to_dump) { + struct tm tmp; + time_t seconds = to_dump.to_seconds(); + localtime_r(&seconds, &tmp); + char buf[80]; + strftime(buf, sizeof(buf), "%c: ", &tmp); + s << buf; + + return s; +} + +CC_END() diff --git a/conanfile.txt b/conanfile.txt index 4cea37f18e9..f98bc89b18e 100644 --- a/conanfile.txt +++ b/conanfile.txt @@ -4,8 +4,8 @@ asio/1.22.1 fmt/8.1.1 spdlog/1.10.0 nlohmann_json/3.10.5 -openssl/1.1.1n -grpc/1.43.0 +openssl/1.1.1o +grpc/1.46.3 mariadb-connector-c/3.1.12 zlib/1.2.12 boost/1.79.0 @@ -15,7 +15,13 @@ libssh2/1.10.0 cmake [options] -grpc:secure=True boost:header_only=True +grpc:csharp_ext=False +grpc:csharp_plugin=False +grpc:node_plugin=False +grpc:objective_c_plugin=False +grpc:php_plugin=False +grpc:python_plugin=True +grpc:ruby_plugin=False +grpc:secure=True libssh2:shared=False - diff --git a/connectors/CMakeLists.txt b/connectors/CMakeLists.txt index 810eb444651..07ca20c11eb 100644 --- a/connectors/CMakeLists.txt +++ b/connectors/CMakeLists.txt @@ -74,6 +74,7 @@ if (WITH_TESTING) ${PROJECT_SOURCE_DIR}/ssh/test/orders.cc ${PROJECT_SOURCE_DIR}/ssh/test/reporter.cc ${PROJECT_SOURCE_DIR}/ssh/test/sessions.cc + ${PROJECT_SOURCE_DIR}/ssh/test/options.cc ) add_dependencies(ut_connector centreon_connector_perl) add_dependencies(ut_connector centreon_clib) diff --git a/connectors/ssh/CMakeLists.txt b/connectors/ssh/CMakeLists.txt index 68bdecf2fbb..8328a8ccf84 100644 --- a/connectors/ssh/CMakeLists.txt +++ b/connectors/ssh/CMakeLists.txt @@ -53,7 +53,7 @@ add_executable(centreon_connector_ssh ) add_dependencies(centreon_connector_ssh centreon_clib) target_link_libraries(centreon_connector_ssh - centreon_clib CONAN_PKG::spdlog CONAN_PKG::fmt CONAN_PKG::libssh2 pthread) + centreon_clib CONAN_PKG::spdlog CONAN_PKG::fmt CONAN_PKG::libssh2 CONAN_PKG::abseil pthread) diff --git a/connectors/ssh/src/orders/options.cc b/connectors/ssh/src/orders/options.cc index 55fc1a8d4b1..5ad574d0cfd 100644 --- a/connectors/ssh/src/orders/options.cc +++ b/connectors/ssh/src/orders/options.cc @@ -26,6 +26,7 @@ #endif // Windows or POSIX. #include #include +#include "absl/strings/numbers.h" #include "com/centreon/connector/ssh/orders/options.hh" #include "com/centreon/exceptions/basic.hh" #include "com/centreon/misc/command_line.hh" @@ -217,8 +218,18 @@ void options::parse(std::string const& cmdline) { break; case 'p': // Set port. - _port = atoi(optarg); - break; + { + unsigned int temp; + if (!absl::SimpleAtoi(optarg, &temp)) { + throw basic_error() << "the argument '" << optarg + << "' must be an unsigned short integer"; + } + if (temp > 65535) { + throw basic_error() << "the argument '" << optarg + << "' must be an integer between 0 and 65535"; + } + _port = temp; + } break; case '4': // Enable IPv4. _ip_protocol = ip_v4; @@ -237,7 +248,12 @@ void options::parse(std::string const& cmdline) { break; case 'E': // Skip stderr. - _skip_stderr = (optarg ? atoi(optarg) : 0); + if (!optarg) + _skip_stderr = 0; + else if (!absl::SimpleAtoi(optarg, &_skip_stderr)) { + throw basic_error() + << "the argument '" << optarg << "' must be an integer"; + } break; case 'f': // Fork ssh. @@ -265,11 +281,19 @@ void options::parse(std::string const& cmdline) { break; case 'S': // Skip stdout. - _skip_stdout = (optarg ? atoi(optarg) : 0); + if (!optarg) + _skip_stdout = 0; + else if (!absl::SimpleAtoi(optarg, &_skip_stdout)) { + throw basic_error() + << "the argument '" << optarg << "' must be an integer"; + } break; case 't': // Set timeout. - _timeout = atoi(optarg); + if (!absl::SimpleAtoi(optarg, &_timeout)) { + throw basic_error() + << "the argument '" << optarg << "' must be an unsigned integer"; + } break; case 'h': // Help. diff --git a/connectors/ssh/test/options.cc b/connectors/ssh/test/options.cc new file mode 100644 index 00000000000..377ff01999a --- /dev/null +++ b/connectors/ssh/test/options.cc @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +#include "com/centreon/connector/ssh/namespace.hh" +#include "com/centreon/connector/ssh/orders/options.hh" + +using namespace com::centreon::connector::orders; + +TEST(SSHOptions, cmdline_bad_port) { + // Base object. + std::string cmd("toto -p 1234567"); + std::unique_ptr opt; + ASSERT_THROW(std::make_unique(cmd), std::exception); +} + +TEST(SSHOptions, cmdline_bad_port_negative) { + // Base object. + std::string cmd("toto -p -123"); + std::unique_ptr opt; + ASSERT_THROW(std::make_unique(cmd), std::exception); +} + +TEST(SSHOptions, cmdline_good_port) { + // Base object. + std::string cmd("toto -p 14567"); + std::unique_ptr opt; + ASSERT_NO_THROW(std::make_unique(cmd)); +} diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index 5c642ff06a2..f451dc351af 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -1,21 +1,21 @@ -## -## Copyright 2011-2021 Centreon -## -## This file is part of Centreon Engine. -## -## Centreon Engine is free software: you can redistribute it and/or -## modify it under the terms of the GNU General Public License version 2 -## as published by the Free Software Foundation. -## -## Centreon Engine is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## General Public License for more details. -## -## You should have received a copy of the GNU General Public License -## along with Centreon Engine. If not, see -## . -## +# # +# # Copyright 2011-2021 Centreon +# # +# # This file is part of Centreon Engine. +# # +# # Centreon Engine is free software: you can redistribute it and/or +# # modify it under the terms of the GNU General Public License version 2 +# # as published by the Free Software Foundation. +# # +# # Centreon Engine is distributed in the hope that it will be useful, +# # but WITHOUT ANY WARRANTY; without even the implied warranty of +# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# # General Public License for more details. +# # +# # You should have received a copy of the GNU General Public License +# # along with Centreon Engine. If not, see +# # . +# # # # Global settings. @@ -25,18 +25,19 @@ project("Centreon Engine" C CXX) # set -latomic if OS is Raspbian. -if (CMAKE_SYSTEM_PROCESSOR MATCHES "arm") +if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -latomic") -endif () +endif() # With libasan option(WITH_ASAN "Add the libasan to check memory leaks and other memory issues." OFF) -if (WITH_ASAN) + +if(WITH_ASAN) set(CMAKE_BUILD_TYPE Debug) - set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") - set (CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") -endif () + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") +endif() set(INC_DIR "${PROJECT_SOURCE_DIR}/inc") set(SCRIPT_DIR "${PROJECT_SOURCE_DIR}/scripts") @@ -51,11 +52,12 @@ include_directories("${INC_DIR}/compatibility") link_directories(${CMAKE_SOURCE_DIR}/build/centreon-clib/) # Version. -if (CENTREON_ENGINE_PRERELEASE) +if(CENTREON_ENGINE_PRERELEASE) set(CENTREON_ENGINE_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}-${CENTREON_ENGINE_PRERELEASE}") -else () +else() set(CENTREON_ENGINE_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") endif() + message(STATUS "Generating version header (${CENTREON_ENGINE_VERSION}).") configure_file("${INC_DIR}/com/centreon/engine/version.hh.in" "${INC_DIR}/com/centreon/engine/version.hh") @@ -68,73 +70,80 @@ configure_file("${INC_DIR}/com/centreon/engine/version.hh.in" include(CheckLibraryExists) message(STATUS "Checking for libm.") check_library_exists("m" "ceil" "${CMAKE_LIBRARY_PATH}" MATH_LIB_FOUND) -if (MATH_LIB_FOUND) + +if(MATH_LIB_FOUND) set(MATH_LIBRARIES "m") -endif () +endif() + message(STATUS "Checking for libnsl.") check_library_exists("nsl" "getservbyname" "${CMAKE_LIBRARY_PATH}" NSL_LIB_FOUND) -if (NSL_LIB_FOUND) + +if(NSL_LIB_FOUND) set(NSL_LIBRARIES "nsl") -endif () +endif() + message(STATUS "Checking for libsocket.") check_library_exists("socket" "connect" "${CMAKE_LIBRARY_PATH}" SOCKET_LIB_FOUND) -if (SOCKET_LIB_FOUND) + +if(SOCKET_LIB_FOUND) set(SOCKET_LIBRARIES "socket") -endif () +endif() # Find pthreads. set(CMAKE_THREAD_PREFER_PTHREAD TRUE) include(FindThreads) -if (NOT CMAKE_USE_PTHREADS_INIT) + +if(NOT CMAKE_USE_PTHREADS_INIT) message(FATAL_ERROR "Could not find pthread's library.") -endif () +endif() + set(PTHREAD_LIBRARIES "${CMAKE_THREAD_LIBS_INIT}") -## Find Centreon Clib's headers. -#if (WITH_CENTREON_CLIB_INCLUDE_DIR) -# find_file( -# CLIB_HEADER_FOUND -# "com/centreon/clib/version.hh" -# PATHS "${WITH_CENTREON_CLIB_INCLUDE_DIR}" -# NO_DEFAULT_PATH) -# if (NOT CLIB_HEADER_FOUND) -# message(FATAL_ERROR "Could not find Centreon Clib's headers in ${WITH_CENTREON_CLIB_INCLUDE_DIR}.") -# endif () -# set(CLIB_INCLUDE_DIR "${WITH_CENTREON_CLIB_INCLUDE_DIR}") -#elseif (CLIB_FOUND) # Was Centreon Clib detected with pkg-config ? -# if (CMAKE_CXX_FLAGS) -# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CLIB_CFLAGS}") -# else () -# set(CMAKE_CXX_FLAGS "${CLIB_CFLAGS}") -# endif () -#else () -# find_path(CLIB_INCLUDE_DIR "com/centreon/clib/version.hh" PATH_SUFFIXES "centreon-clib") -# if (NOT CLIB_INCLUDE_DIR) -# message(FATAL_ERROR "Could not find Centreon Clib's headers (try WITH_CENTREON_CLIB_INCLUDE_DIR).") -# endif () -#endif () +# # Find Centreon Clib's headers. +# if (WITH_CENTREON_CLIB_INCLUDE_DIR) +# find_file( +# CLIB_HEADER_FOUND +# "com/centreon/clib/version.hh" +# PATHS "${WITH_CENTREON_CLIB_INCLUDE_DIR}" +# NO_DEFAULT_PATH) +# if (NOT CLIB_HEADER_FOUND) +# message(FATAL_ERROR "Could not find Centreon Clib's headers in ${WITH_CENTREON_CLIB_INCLUDE_DIR}.") +# endif () +# set(CLIB_INCLUDE_DIR "${WITH_CENTREON_CLIB_INCLUDE_DIR}") +# elseif (CLIB_FOUND) # Was Centreon Clib detected with pkg-config ? +# if (CMAKE_CXX_FLAGS) +# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CLIB_CFLAGS}") +# else () +# set(CMAKE_CXX_FLAGS "${CLIB_CFLAGS}") +# endif () +# else () +# find_path(CLIB_INCLUDE_DIR "com/centreon/clib/version.hh" PATH_SUFFIXES "centreon-clib") +# if (NOT CLIB_INCLUDE_DIR) +# message(FATAL_ERROR "Could not find Centreon Clib's headers (try WITH_CENTREON_CLIB_INCLUDE_DIR).") +# endif () +# endif () include_directories(${CMAKE_SOURCE_DIR}/clib/inc) -## Find Centreon Clib's library. -#if (WITH_CENTREON_CLIB_LIBRARIES) -# set(CLIB_LIBRARIES "${WITH_CENTREON_CLIB_LIBRARIES}") -#elseif (WITH_CENTREON_CLIB_LIBRARY_DIR) -# find_library( -# CLIB_LIBRARIES -# "centreon_clib" -# PATHS "${WITH_CENTREON_CLIB_LIBRARY_DIR}" -# NO_DEFAULT_PATH) -# if (NOT CLIB_LIBRARIES) -# message(FATAL_ERROR "Could not find Centreon Clib's library in ${WITH_CENTREON_CLIB_LIBRARY_DIR}.") -# endif () -#elseif (CLIB_FOUND) # Was Centreon Clib detected with pkg-config ? -# set(CLIB_LIBRARIES "${CLIB_LDFLAGS}") -#else () -# find_library(CLIB_LIBRARIES "centreon_clib") -# if (NOT CLIB_LIBRARIES) -# message(FATAL_ERROR "Could not find Centreon Clib's library (try WITH_CENTREON_CLIB_LIBRARY_DIR or WITH_CENTREON_CLIB_LIBRARIES).") -# endif () -#endif () +# # Find Centreon Clib's library. +# if (WITH_CENTREON_CLIB_LIBRARIES) +# set(CLIB_LIBRARIES "${WITH_CENTREON_CLIB_LIBRARIES}") +# elseif (WITH_CENTREON_CLIB_LIBRARY_DIR) +# find_library( +# CLIB_LIBRARIES +# "centreon_clib" +# PATHS "${WITH_CENTREON_CLIB_LIBRARY_DIR}" +# NO_DEFAULT_PATH) +# if (NOT CLIB_LIBRARIES) +# message(FATAL_ERROR "Could not find Centreon Clib's library in ${WITH_CENTREON_CLIB_LIBRARY_DIR}.") +# endif () +# elseif (CLIB_FOUND) # Was Centreon Clib detected with pkg-config ? +# set(CLIB_LIBRARIES "${CLIB_LDFLAGS}") +# else () +# find_library(CLIB_LIBRARIES "centreon_clib") +# if (NOT CLIB_LIBRARIES) +# message(FATAL_ERROR "Could not find Centreon Clib's library (try WITH_CENTREON_CLIB_LIBRARY_DIR or WITH_CENTREON_CLIB_LIBRARIES).") +# endif () +# endif () # Check functions. include(CheckIncludeFileCXX) @@ -143,21 +152,26 @@ include(CheckStructHasMember) message(STATUS "Checking for tm_zone member in tm struct.") check_struct_has_member("tm" "tm_zone" "time.h" HAVE_TM_ZONE) -if (HAVE_TM_ZONE) + +if(HAVE_TM_ZONE) add_definitions(-DHAVE_TM_ZONE) -endif () +endif() + include(CheckSymbolExists) message(STATUS "Checking for symbol tzname.") check_symbol_exists("tzname" "time.h" HAVE_TZNAME) -if (HAVE_TZNAME) + +if(HAVE_TZNAME) add_definitions(-DHAVE_TZNAME) -endif () +endif() + message(STATUS "Checking for function getopt_long.") check_include_file_cxx("getopt.h" HAVE_GETOPT_H) check_function_exists("getopt_long" HAVE_GETOPT_LONG) -if (HAVE_GETOPT_H AND HAVE_GETOPT_LONG) + +if(HAVE_GETOPT_H AND HAVE_GETOPT_LONG) add_definitions(-DHAVE_GETOPT_H) -endif () +endif() # # Options. @@ -172,60 +186,61 @@ set(CREATE_FILES "${WITH_CREATE_FILES}") set(PREFIX_ENGINE_CONF "${CMAKE_INSTALL_FULL_SYSCONFDIR}/centreon-engine") -## Library directory. -#if (WITH_PREFIX_LIB_ENGINE) -# set(PREFIX_LIB "${WITH_PREFIX_LIB_ENGINE}") -#else () -# set(PREFIX_LIB "${CMAKE_INSTALL_PREFIX}/lib/centreon-engine") -#endif () +# # Library directory. +# if (WITH_PREFIX_LIB_ENGINE) +# set(PREFIX_LIB "${WITH_PREFIX_LIB_ENGINE}") +# else () +# set(PREFIX_LIB "${CMAKE_INSTALL_PREFIX}/lib/centreon-engine") +# endif () # - # User used to run Centreon Engine. -if (WITH_USER_ENGINE) +if(WITH_USER_ENGINE) set(USER "${WITH_USER_ENGINE}") -else () +else() set(USER "root") -endif () +endif() # Group used to run Centreon Engine. -if (WITH_GROUP_ENGINE) +if(WITH_GROUP_ENGINE) set(GROUP "${WITH_GROUP_ENGINE}") -else () +else() set(GROUP "root") -endif () + +endif() + # Set startup script to auto if not define. -if (NOT WITH_STARTUP_SCRIPT) +if(NOT WITH_STARTUP_SCRIPT) set(WITH_STARTUP_SCRIPT "auto") -endif () +endif() # Check which startup script to use. -if (WITH_STARTUP_SCRIPT STREQUAL "auto") - if (CMAKE_SYSTEM_NAME STREQUAL "Linux") - if (OS_DISTRIBUTOR STREQUAL "Ubuntu") +if(WITH_STARTUP_SCRIPT STREQUAL "auto") + if(CMAKE_SYSTEM_NAME STREQUAL "Linux") + if(OS_DISTRIBUTOR STREQUAL "Ubuntu") set(WITH_STARTUP_SCRIPT "upstart") - else () + else() set(WITH_STARTUP_SCRIPT "sysv") - endif () - else () + endif() + else() message(STATUS "Centreon Engine does not provide startup script for ${CMAKE_SYSTEM_NAME}.") - endif () -endif () + endif() +endif() # Create upstart file. -if (WITH_STARTUP_SCRIPT STREQUAL "upstart") +if(WITH_STARTUP_SCRIPT STREQUAL "upstart") # Generate Upstart script. message(STATUS "Generating upstart script.") configure_file("${SCRIPT_DIR}/upstart.conf.in" "${SCRIPT_DIR}/upstart.conf") # Startup dir. - if (WITH_STARTUP_DIR) + if(WITH_STARTUP_DIR) set(STARTUP_DIR "${WITH_STARTUP_DIR}") - else () + else() set(STARTUP_DIR "/etc/init") - endif () + endif() # Script install rule. install(FILES "${SCRIPT_DIR}/upstart.conf" @@ -237,27 +252,29 @@ if (WITH_STARTUP_SCRIPT STREQUAL "upstart") set(STARTUP_SCRIPT "Upstart configuration file") # Create SysV start script. -elseif (WITH_STARTUP_SCRIPT STREQUAL "sysv") +elseif(WITH_STARTUP_SCRIPT STREQUAL "sysv") # Lock file. - if (WITH_LOCK_FILE) + if(WITH_LOCK_FILE) set(LOCK_FILE "${WITH_LOCK_FILE}") - else () - if (OS_DISTRIBUTOR STREQUAL "Ubuntu" - OR OS_DISTRIBUTOR STREQUAL "Debian" - OR OS_DISTRIBUTOR STREQUAL "SUSE LINUX") + else() + if(OS_DISTRIBUTOR STREQUAL "Ubuntu" + OR OS_DISTRIBUTOR STREQUAL "Debian" + OR OS_DISTRIBUTOR STREQUAL "SUSE LINUX") set(LOCK_FILE "/var/lock/centengine.lock") - else () + else() set(LOCK_FILE "/var/lock/subsys/centengine.lock") - endif () - endif () + endif() + endif() + string(REGEX REPLACE "/[^/]*$" "" LOCK_DIR "${LOCK_FILE}") # PID file. - if (WITH_PID_FILE) + if(WITH_PID_FILE) set(PID_FILE "${WITH_PID_FILE}") - else () + else() set(PID_FILE "/var/run/centengine.pid") - endif () + endif() + string(REGEX REPLACE "/[^/]*$" "" PID_DIR "${PID_FILE}") # Generate SysV script. @@ -266,11 +283,11 @@ elseif (WITH_STARTUP_SCRIPT STREQUAL "sysv") "${SCRIPT_DIR}/centengine.sh") # Startup dir. - if (WITH_STARTUP_DIR) + if(WITH_STARTUP_DIR) set(STARTUP_DIR "${WITH_STARTUP_DIR}") - else () + else() set(STARTUP_DIR "/etc/init.d") - endif () + endif() # Script install rule. install(PROGRAMS "${SCRIPT_DIR}/centengine.sh" @@ -282,18 +299,18 @@ elseif (WITH_STARTUP_SCRIPT STREQUAL "sysv") set(STARTUP_SCRIPT "SysV-style script") # Create Systemd start script. -elseif (WITH_STARTUP_SCRIPT STREQUAL "systemd") +elseif(WITH_STARTUP_SCRIPT STREQUAL "systemd") # Generate Systemd script. message(STATUS "Generating systemd startup script.") configure_file("${SCRIPT_DIR}/centengine.service.in" "${SCRIPT_DIR}/centengine.service") # Startup dir. - if (WITH_STARTUP_DIR) + if(WITH_STARTUP_DIR) set(STARTUP_DIR "${WITH_STARTUP_DIR}") - else () + else() set(STARTUP_DIR "/etc/systemd/system") - endif () + endif() # Script install rule. install(PROGRAMS "${SCRIPT_DIR}/centengine.service" @@ -303,33 +320,35 @@ elseif (WITH_STARTUP_SCRIPT STREQUAL "systemd") # String printed in summary. set(STARTUP_SCRIPT "Systemd script") -else () +else() # Default. message(STATUS "Invalid value for option WITH_STARTUP_SCRIPT (must be one of 'auto', 'sysv' or 'upstart').") set(STARTUP_SCRIPT "disabled") -endif () +endif() # logrotate directory. option(WITH_ENGINE_LOGROTATE_SCRIPT "Generate and install logrotate script." OFF) -if (WITH_ENGINE_LOGROTATE_SCRIPT) + +if(WITH_ENGINE_LOGROTATE_SCRIPT) # Generate logrotate file. message(STATUS "Generating logrorate file.") - if (WITH_STARTUP_SCRIPT STREQUAL "upstart") + + if(WITH_STARTUP_SCRIPT STREQUAL "upstart") configure_file( "${SCRIPT_DIR}/logrotate_upstart.conf.in" "${SCRIPT_DIR}/logrotate.conf" @ONLY) - elseif (WITH_STARTUP_SCRIPT STREQUAL "systemd") + elseif(WITH_STARTUP_SCRIPT STREQUAL "systemd") configure_file( "${SCRIPT_DIR}/logrotate_systemd.conf.in" "${SCRIPT_DIR}/logrotate.conf" @ONLY) - else () + else() configure_file( "${SCRIPT_DIR}/logrotate_sysv.conf.in" "${SCRIPT_DIR}/logrotate.conf" @ONLY) - endif () + endif() # logrotate file install directory. set(LOGROTATE_DIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}/logrotate.d") @@ -340,30 +359,33 @@ if (WITH_ENGINE_LOGROTATE_SCRIPT) DESTINATION "${LOGROTATE_DIR}" COMPONENT "runtime" RENAME "centengine" - ) -endif () + ) +endif() option(WITH_SHARED_LIB "Define if the core library is to be build as a shared object or a static library." OFF) -if (WITH_SHARED_LIB) + +if(WITH_SHARED_LIB) set(LIBRARY_TYPE SHARED) -else () +else() set(LIBRARY_TYPE STATIC) -endif () +endif() # Simumod module to simulate cbmod and catch its output option(WITH_SIMU "Add a module only used for tests to see data that cbmod should receive" OFF) -if (WITH_SIMU) + +if(WITH_SIMU) set(CMAKE_BUILD_TYPE "Debug") add_subdirectory(src/simumod) -endif () +endif() # DEBUG_CONFIG enables checks on configuration. Those checks are not free and # may slow down engine reloads. But it provides a way to check bugs in # the configuration system. option(WITH_DEBUG_CONFIG "Enables checks on configuration. This is an option for developers." OFF) -if (WITH_DEBUG_CONFIG) + +if(WITH_DEBUG_CONFIG) add_definitions(-DDEBUG_CONFIG) -endif () +endif() # Configure files. configure_file("${INC_DIR}/compatibility/common.h.in" @@ -379,14 +401,13 @@ add_definitions(-DDEFAULT_COMMAND_FILE="${ENGINE_VAR_LIB_DIR}/rw/centengine.cmd" add_definitions(-DDEFAULT_CONFIG_FILE="${PREFIX_ENGINE_CONF}/centengine.cfg") # Add specific linker flags for Mac OS to build correctly shared libraries. -if (APPLE) +if(APPLE) set(CMAKE_SHARED_LINKER_FLAGS "-Wl,-undefined -Wl,dynamic_lookup") -endif () +endif() # # Targets. # - set( FILES @@ -496,9 +517,11 @@ set( # Subdirectories with core features. add_subdirectory(src/broker) add_subdirectory(src/checks) -if (WITH_CONF) + +if(WITH_CONF) add_subdirectory(conf) -endif () +endif() + add_subdirectory(src/downtimes) add_subdirectory(src/configuration) add_subdirectory(src/commands) @@ -519,7 +542,6 @@ add_dependencies(cce_core centreon_clib) target_precompile_headers(cce_core PRIVATE ${PRECOMP_HEADER}) - # Link target with required libraries. target_link_libraries(cce_core CONAN_PKG::nlohmann_json ${MATH_LIBRARIES} @@ -536,7 +558,7 @@ set_property(TARGET "centengine" PROPERTY ENABLE_EXPORTS "1") add_dependencies(centengine centreon_clib) # Link centengine with required libraries. -target_link_libraries(centengine "-export-dynamic" centreon_clib "-Wl,-whole-archive" enginerpc cce_core "-Wl,-no-whole-archive" -L${CONAN_LIB_DIRS_GRPC} "-Wl,--whole-archive" grpc++ "-Wl,--no-whole-archive" CONAN_PKG::grpc ${absl_LIBS} CONAN_PKG::openssl ${c-ares_LIBS} CONAN_PKG::zlib dl) +target_link_libraries(centengine "-export-dynamic" centreon_clib "-Wl,-whole-archive" enginerpc cce_core "-Wl,-no-whole-archive" -L${CONAN_LIB_DIRS_GRPC} "-Wl,--whole-archive" grpc++ "-Wl,--no-whole-archive" CONAN_PKG::grpc ${absl_LIBS} CONAN_PKG::openssl ${c-ares_LIBS} CONAN_PKG::zlib dl) # centenginestats target. add_executable("centenginestats" "${SRC_DIR}/centenginestats.cc") @@ -556,8 +578,8 @@ install(TARGETS "centengine" "centenginestats" DESTINATION "${CMAKE_INSTALL_FULL_SBINDIR}" COMPONENT "runtime") -## Create directories. -if (CREATE_FILES) +# # Create directories. +if(CREATE_FILES) install(CODE " function(mkdir_chown user group path) if (APPLE OR (UNIX AND NOT CYGWIN)) @@ -598,7 +620,7 @@ if (CREATE_FILES) touch_chown(\"${USER}\" \"${GROUP}\" \"${ENGINE_VAR_LOG_DIR}/centengine.debug\") touch_chown(\"${USER}\" \"${GROUP}\" \"${ENGINE_VAR_LOG_DIR}/retention.dat\") ") -endif () +endif() # Install header files for development. install(DIRECTORY "${INC_DIR}/" @@ -611,13 +633,11 @@ install(DIRECTORY "${INC_DIR}/" # # Packaging. # - include(cmake/package.cmake) # # Print summary. # - message(STATUS "") message(STATUS "") message(STATUS "Configuration Summary") @@ -635,47 +655,59 @@ message(STATUS "") message(STATUS " Build") message(STATUS " - Compiler ${CMAKE_CXX_COMPILER} (${CMAKE_CXX_COMPILER_ID})") message(STATUS " - Extra compilation flags ${CMAKE_CXX_FLAGS}") -if (WITH_SHARED_LIB) + +if(WITH_SHARED_LIB) message(STATUS " - Build static core library no") -else () +else() message(STATUS " - Build static core library yes") -endif () +endif() + message(STATUS " - External commands module enabled") -if (WITH_TESTING) + +if(WITH_TESTING) message(STATUS " - Unit tests enabled") - if (WITH_COVERAGE) + + if(WITH_COVERAGE) message(STATUS " - Code coverage enabled") - else () + else() message(STATUS " - Code coverage disabled") - endif () -else () + endif() +else() message(STATUS " - Unit tests disabled") -endif () -if (WITH_ENGINE_LOGROTATE_SCRIPT) +endif() + +if(WITH_ENGINE_LOGROTATE_SCRIPT) message(STATUS " - logrotate script enabled") -else () +else() message(STATUS " - logrotate script disabled") -endif () +endif() + message(STATUS " - Startup script ${STARTUP_SCRIPT}") message(STATUS "") message(STATUS " Install") -#message(STATUS " - Prefix ${CMAKE_INSTALL_PREFIX}") + +# message(STATUS " - Prefix ${CMAKE_INSTALL_PREFIX}") message(STATUS " - Binary prefix ${CMAKE_INSTALL_FULL_SBINDIR}") message(STATUS " - Configuration prefix ${PREFIX_ENGINE_CONF}") -#message(STATUS " - Library prefix ${PREFIX_LIB}") + +# message(STATUS " - Library prefix ${PREFIX_LIB}") message(STATUS " - Include prefix ${CMAKE_INSTALL_FULL_INCLUDEDIR}/centreon-engine") message(STATUS " - var directory ${VAR_DIR}") message(STATUS " - Log archive directory ${ENGINE_VAR_LOG_ARCHIVE_DIR}") message(STATUS " - RW directory ${ENGINE_VAR_LIB_DIR}/rw") -if (LOCK_FILE) + +if(LOCK_FILE) message(STATUS " - Lock prefix ${LOCK_FILE}") -endif () -if (WITH_ENGINE_LOGROTATE_SCRIPT) +endif() + +if(WITH_ENGINE_LOGROTATE_SCRIPT) message(STATUS " - logrotate directory ${LOGROTATE_DIR}") -endif () -if (STARTUP_DIR) +endif() + +if(STARTUP_DIR) message(STATUS " - Startup directory ${STARTUP_DIR}") -endif () +endif() + message(STATUS " - User ${USER}") message(STATUS " - Group ${GROUP}") message(STATUS " - Package ${PACKAGE_LIST}") diff --git a/engine/enginerpc/engine_impl.cc b/engine/enginerpc/engine_impl.cc index 5a4849cc550..167fb3048ad 100644 --- a/engine/enginerpc/engine_impl.cc +++ b/engine/enginerpc/engine_impl.cc @@ -223,7 +223,7 @@ grpc::Status engine_impl::GetHost(grpc::ServerContext* context return 1; } - host->set_name(selectedhost->get_name()); + host->set_name(selectedhost->name()); host->set_alias(selectedhost->get_alias()); host->set_address(selectedhost->get_address()); host->set_check_period(selectedhost->check_period()); @@ -2671,7 +2671,7 @@ grpc::Status engine_impl::ChangeHostObjectCharVar( break; case ChangeObjectChar_Mode_CHANGE_CHECK_COMMAND: temp_host->set_check_command(request->charval()); - temp_host->set_check_command_ptr(cmd_found->second.get()); + temp_host->set_check_command_ptr(cmd_found->second); attr = MODATTR_CHECK_COMMAND; /* send data to event broker */ broker_adaptive_host_data(NEBTYPE_ADAPTIVEHOST_UPDATE, NEBFLAG_NONE, @@ -2778,7 +2778,7 @@ grpc::Status engine_impl::ChangeServiceObjectCharVar( attr = MODATTR_EVENT_HANDLER_COMMAND; } else if (request->mode() == ChangeObjectChar_Mode_CHANGE_CHECK_COMMAND) { temp_service->set_check_command(request->charval()); - temp_service->set_check_command_ptr(cmd_found->second.get()); + temp_service->set_check_command_ptr(cmd_found->second); attr = MODATTR_CHECK_COMMAND; } else if (request->mode() == ChangeObjectChar_Mode_CHANGE_CHECK_TIMEPERIOD) { diff --git a/engine/enginerpc/precomp_inc/precomp.hh b/engine/enginerpc/precomp_inc/precomp.hh index 8d7101df8e1..6cdbd6d9056 100644 --- a/engine/enginerpc/precomp_inc/precomp.hh +++ b/engine/enginerpc/precomp_inc/precomp.hh @@ -28,6 +28,7 @@ #include #include +#include #include #endif // CCE_PRECOMP_HH diff --git a/engine/inc/com/centreon/engine/anomalydetection.hh b/engine/inc/com/centreon/engine/anomalydetection.hh index 9a764811b8f..4295861c858 100644 --- a/engine/inc/com/centreon/engine/anomalydetection.hh +++ b/engine/inc/com/centreon/engine/anomalydetection.hh @@ -81,9 +81,12 @@ class anomalydetection : public service { void set_dependent_service(service* svc); void set_metric_name(std::string const& name); void set_thresholds_file(std::string const& file); - void set_thresholds( - const std::string& filename, - std::map >&& thresholds) noexcept; + + void set_thresholds_lock(const std::string& filename, + const nlohmann::json& thresholds); + void set_thresholds_no_lock(const std::string& filename, + const nlohmann::json& thresholds); + static int update_thresholds(const std::string& filename); virtual int run_async_check(int check_options, double latency, @@ -91,9 +94,11 @@ class anomalydetection : public service { bool reschedule_check, bool* time_is_valid, time_t* preferred_time) noexcept override; - commands::command* get_check_command_ptr() const; - std::tuple - parse_perfdata(std::string const& perfdata, time_t check_time); + int handle_async_check_result( + const check_result& queued_check_result) override; + bool parse_perfdata(std::string const& perfdata, + time_t check_time, + check_result& calculated_result); void init_thresholds(); void set_status_change(bool status_change); const std::string& get_metric_name() const; diff --git a/engine/inc/com/centreon/engine/broker.hh b/engine/inc/com/centreon/engine/broker.hh index 03d451409b3..f9f3d247350 100644 --- a/engine/inc/com/centreon/engine/broker.hh +++ b/engine/inc/com/centreon/engine/broker.hh @@ -440,7 +440,6 @@ int broker_contact_notification_method_data( struct timeval end_time, void* data, com::centreon::engine::contact* cntct, - char const* cmd, char const* ack_author, char const* ack_data, int escalated, @@ -468,7 +467,7 @@ void broker_downtime_data(int type, char const* comment_data, time_t start_time, time_t end_time, - int fixed, + bool fixed, unsigned long triggered_by, unsigned long duration, unsigned long downtime_id, @@ -527,7 +526,6 @@ int broker_host_check(int type, int state_type, struct timeval start_time, struct timeval end_time, - char const* cmd, double latency, double exectime, int timeout, @@ -550,12 +548,6 @@ void broker_log_data(int type, unsigned long data_type, time_t entry_time, struct timeval const* timestamp); -void broker_module_data(int type, - int flags, - int attr, - char const* module, - char const* args, - struct timeval const* timestamp); int broker_notification_data(int type, int flags, int attr, @@ -596,7 +588,6 @@ int broker_service_check(int type, int check_type, struct timeval start_time, struct timeval end_time, - char const* cmd, double latency, double exectime, int timeout, diff --git a/engine/inc/com/centreon/engine/check_result.hh b/engine/inc/com/centreon/engine/check_result.hh index 268414e617f..af50fac1c85 100644 --- a/engine/inc/com/centreon/engine/check_result.hh +++ b/engine/inc/com/centreon/engine/check_result.hh @@ -31,11 +31,13 @@ CCE_BEGIN() class notifier; class check_result { public: - check_result() = delete; + using pointer = std::shared_ptr; + + check_result(); check_result(enum check_source object_check_type, notifier* notifier, enum checkable::check_type check_type, - int check_options, + unsigned check_options, bool reschedule_check, double latency, struct timeval start_time, @@ -44,41 +46,42 @@ class check_result { bool exited_ok, int return_code, std::string const& output); - check_result(check_result const&) = delete; - check_result(check_result&&) = delete; - check_result& operator=(check_result const&) = delete; - enum check_source get_object_check_type() const; + inline enum check_source get_object_check_type() const { + return _object_check_type; + } void set_object_check_type(enum check_source object_check_type); - notifier* get_notifier(); + inline notifier* get_notifier() { return _notifier; } void set_notifier(notifier* notifier); - struct timeval get_finish_time() const; + inline struct timeval get_finish_time() const { return _finish_time; } void set_finish_time(struct timeval finish_time); - struct timeval get_start_time() const; + inline struct timeval get_start_time() const { return _start_time; } void set_start_time(struct timeval start_time); - int get_return_code() const; + inline int get_return_code() const { return _return_code; } void set_return_code(int return_code); - bool get_early_timeout() const; + inline bool get_early_timeout() const { return _early_timeout; } void set_early_timeout(bool early_timeout); - std::string const& get_output() const; + inline const std::string& get_output() const { return _output; } void set_output(std::string const& output); - bool get_exited_ok() const; + inline bool get_exited_ok() const { return _exited_ok; } void set_exited_ok(bool exited_ok); - bool get_reschedule_check() const; + inline bool get_reschedule_check() const { return _reschedule_check; } void set_reschedule_check(bool reschedule_check); - enum checkable::check_type get_check_type() const; + inline enum checkable::check_type get_check_type() const { + return _check_type; + }; void set_check_type(enum checkable::check_type check_type); - double get_latency() const; + inline double get_latency() const { return _latency; }; void set_latency(double latency); - int get_check_options() const; - void set_check_options(int check_options); + inline unsigned get_check_options() const { return _check_options; }; + void set_check_options(unsigned check_options); private: enum check_source _object_check_type; // is this a service or a host check? notifier* _notifier; // was this an active or passive service check? enum checkable::check_type _check_type; - int _check_options; + unsigned _check_options; bool _reschedule_check; // should we reschedule the next check double _latency; struct timeval _start_time; // time the service check was initiated diff --git a/engine/inc/com/centreon/engine/checkable.hh b/engine/inc/com/centreon/engine/checkable.hh index 15a9e88d09b..0dca420243c 100644 --- a/engine/inc/com/centreon/engine/checkable.hh +++ b/engine/inc/com/centreon/engine/checkable.hh @@ -43,7 +43,8 @@ class checkable { enum state_type { soft, hard }; - checkable(std::string const& display_name, + checkable(const std::string& name, + std::string const& display_name, std::string const& check_command, bool checks_enabled, bool accept_passive_checks, @@ -66,10 +67,12 @@ class checkable { bool obsess_over, std::string const& timezone, uint64_t icon_id); - virtual ~checkable() = default; + virtual ~checkable() noexcept = default; std::string const& get_display_name() const; void set_display_name(std::string const& name); + const std::string& name() const; + void set_name(const std::string& name); std::string const& check_command() const; void set_check_command(std::string const& check_command); uint32_t check_interval() const; @@ -155,8 +158,11 @@ class checkable { virtual bool is_in_downtime() const = 0; void set_event_handler_ptr(commands::command* cmd); commands::command* get_event_handler_ptr() const; - void set_check_command_ptr(commands::command* cmd); - commands::command* get_check_command_ptr() const; + void set_check_command_ptr(const std::shared_ptr& cmd); + inline const std::shared_ptr& get_check_command_ptr() + const { + return _check_command_ptr; + } bool get_is_executing() const; void set_is_executing(bool is_executing); void set_severity(std::shared_ptr sv); @@ -169,6 +175,7 @@ class checkable { timeperiod* check_period_ptr; private: + std::string _name; std::string _display_name; std::string _check_command; uint32_t _check_interval; @@ -210,7 +217,7 @@ class checkable { enum state_type _state_type; double _percent_state_change; commands::command* _event_handler_ptr; - commands::command* _check_command_ptr; + std::shared_ptr _check_command_ptr; bool _is_executing; std::shared_ptr _severity; uint64_t _icon_id; diff --git a/engine/inc/com/centreon/engine/checks/checker.hh b/engine/inc/com/centreon/engine/checks/checker.hh index b8f074a4712..a6327991b93 100644 --- a/engine/inc/com/centreon/engine/checks/checker.hh +++ b/engine/inc/com/centreon/engine/checks/checker.hh @@ -38,7 +38,7 @@ class checker : public commands::command_listener { public: static checker& instance(); - static void init(); + static void init(bool used_by_test = false); static void deinit(); void clear() noexcept; @@ -48,12 +48,15 @@ class checker : public commands::command_listener { int check_options, int use_cached_result, unsigned long check_timestamp_horizon); - void add_check_result(uint64_t id, check_result* result) noexcept; - void add_check_result_to_reap(check_result* result) noexcept; + void add_check_result(uint64_t id, + const check_result::pointer result) noexcept; + void add_check_result_to_reap(const check_result::pointer result) noexcept; static void forget(notifier* n) noexcept; + void wait_completion(); + private: - checker(); + checker(bool used_by_test); checker(checker const& right); ~checker() noexcept override; checker& operator=(checker const& right); @@ -66,19 +69,26 @@ class checker : public commands::command_listener { * Here is the list of prepared check results but with a command being * running. When the command will be finished, each check result is get back * updated and moved to _to_reap_partial list. */ - std::unordered_map _waiting_check_result; + std::unordered_map _waiting_check_result; /* This queue is filled during a cycle. When it is time to reap, its elements * are passed to _to_reap. It can then be filled in parallel during the * _to_reap treatment. */ - std::deque _to_reap_partial; + std::deque _to_reap_partial; /* * The list of check_results to reap: they contain data that have to be * translated to services/hosts. */ - std::deque _to_reap; + std::deque _to_reap; /* Due to reloads of centengine we have the following list with notifiers * that should be forgotten if notifiers are removed. */ std::deque _to_forget; + + /** + * used only for test in order to wait for completion + */ + const bool _used_by_test; + std::condition_variable _finish_cond; + bool _finished; }; } // namespace checks diff --git a/engine/inc/com/centreon/engine/commands/command.hh b/engine/inc/com/centreon/engine/commands/command.hh index f6510370c81..2f4f28ae414 100644 --- a/engine/inc/com/centreon/engine/commands/command.hh +++ b/engine/inc/com/centreon/engine/commands/command.hh @@ -49,11 +49,43 @@ class command { protected: static uint64_t get_uniq_id(); + std::mutex _lock; std::string _command_line; command_listener* _listener; std::string _name; + /** + * @brief the goal of this structure is to ensure that checks shared by + * anomalydetection and service are not called to often + * + */ + struct last_call { + using pointer = std::shared_ptr; + time_t launch_time; + std::shared_ptr res; + + last_call() : launch_time(0) {} + }; + + /** + * @brief this map is used to group service and anomalydetection in order to + * insure that both don't call check too often + * + */ + using caller_to_last_call_map = std::map; + caller_to_last_call_map _result_cache; + + using cmdid_to_last_call_map = std::map; + cmdid_to_last_call_map _current; + + bool gest_call_interval(uint64_t command_id, + const check_result::pointer& to_push_to_checker, + const void* caller); + void update_result_cache(uint64_t command_id, const result& res); + public: + using pointer = std::shared_ptr; + command(const std::string& name, const std::string& command_line, command_listener* listener = nullptr); @@ -67,15 +99,41 @@ class command { virtual std::string process_cmd(nagios_macros* macros) const; virtual uint64_t run(const std::string& processed_cmd, nagios_macros& macors, - uint32_t timeout) = 0; + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller = nullptr) = 0; virtual void run(const std::string& process_cmd, nagios_macros& macros, uint32_t timeout, result& res) = 0; + + template + void add_caller_group(caller_iterator begin, caller_iterator end); + void remove_caller(void* caller); + virtual void set_command_line(const std::string& command_line); void set_listener(command_listener* listener) noexcept; static command_map commands; }; + +template +void command::add_caller_group(caller_iterator begin, caller_iterator end) { + last_call::pointer obj{std::make_shared()}; + + std::unique_lock l(_lock); + for (; begin != end; ++begin) { + const void* caller = static_cast(*begin); + _result_cache[caller] = obj; + } +} + +std::ostream& operator<<(std::ostream& s, const command& cmd); + +inline std::ostream& operator<<(std::ostream& s, const command::pointer& cmd) { + s << *cmd; + return s; +} + } // namespace commands CCE_END() diff --git a/engine/inc/com/centreon/engine/commands/commands.hh b/engine/inc/com/centreon/engine/commands/commands.hh index 510044140ef..025aaf9e76d 100644 --- a/engine/inc/com/centreon/engine/commands/commands.hh +++ b/engine/inc/com/centreon/engine/commands/commands.hh @@ -143,7 +143,7 @@ void schedule_and_propagate_downtime(com::centreon::engine::host* temp_host, char const* comment_data, time_t start_time, time_t end_time, - int fixed, + bool fixed, unsigned long triggered_by, unsigned long duration); // schedules // downtime for diff --git a/engine/inc/com/centreon/engine/commands/connector.hh b/engine/inc/com/centreon/engine/commands/connector.hh index f7a289e3fa3..08be55b374d 100644 --- a/engine/inc/com/centreon/engine/commands/connector.hh +++ b/engine/inc/com/centreon/engine/commands/connector.hh @@ -127,7 +127,9 @@ class connector : public command, public process_listener { connector& operator=(const connector&) = delete; uint64_t run(std::string const& processed_cmd, nagios_macros& macros, - uint32_t timeout) override; + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller = nullptr) override; void run(std::string const& processed_cmd, nagios_macros& macros, uint32_t timeout, diff --git a/engine/inc/com/centreon/engine/commands/forward.hh b/engine/inc/com/centreon/engine/commands/forward.hh index 0fb529c3aff..95a24f95d4d 100644 --- a/engine/inc/com/centreon/engine/commands/forward.hh +++ b/engine/inc/com/centreon/engine/commands/forward.hh @@ -45,7 +45,9 @@ class forward : public command { forward& operator=(const forward&) = delete; uint64_t run(const std::string& processed_cmd, nagios_macros& macros, - uint32_t timeout) override; + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller = nullptr) override; void run(const std::string& processed_cmd, nagios_macros& macros, uint32_t timeout, diff --git a/engine/inc/com/centreon/engine/commands/raw.hh b/engine/inc/com/centreon/engine/commands/raw.hh index dcb91cbb7ab..15310dc2075 100644 --- a/engine/inc/com/centreon/engine/commands/raw.hh +++ b/engine/inc/com/centreon/engine/commands/raw.hh @@ -36,7 +36,6 @@ class environment; * Raw is a specific implementation of command. */ class raw : public command, public process_listener { - std::mutex _lock; std::unordered_map _processes_busy; std::deque _processes_free; @@ -68,7 +67,9 @@ class raw : public command, public process_listener { raw& operator=(const raw&) = delete; uint64_t run(const std::string& process_cmd, nagios_macros& macros, - uint32_t timeout) override; + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller = nullptr) override; void run(const std::string& process_cmd, nagios_macros& macros, uint32_t timeout, diff --git a/engine/inc/com/centreon/engine/commands/result.hh b/engine/inc/com/centreon/engine/commands/result.hh index 194802caf1c..f5ce5deabca 100644 --- a/engine/inc/com/centreon/engine/commands/result.hh +++ b/engine/inc/com/centreon/engine/commands/result.hh @@ -27,7 +27,10 @@ CCE_BEGIN() +class check_result; + namespace commands { + /** * @class result result.hh * @brief Result contain the result of execution process. @@ -41,6 +44,7 @@ class result { public: result(); result(result const& right); + result(const check_result& check_res); ~result() noexcept; result& operator=(result const& right); bool operator==(result const& right) const noexcept; @@ -52,6 +56,9 @@ class result { timestamp start_time; std::string output; }; + +std::ostream& operator<<(std::ostream& s, const result& to_dump); + } // namespace commands CCE_END() diff --git a/engine/inc/com/centreon/engine/common.hh b/engine/inc/com/centreon/engine/common.hh index d5c710b5cc2..1f4116eda3e 100644 --- a/engine/inc/com/centreon/engine/common.hh +++ b/engine/inc/com/centreon/engine/common.hh @@ -223,7 +223,7 @@ /* Host/service check options. */ #define CHECK_OPTION_NONE 0 /* No check options. */ -#define CHECK_OPTION_FORCE_EXECUTION 1 +#define CHECK_OPTION_FORCE_EXECUTION 1u /* Force execution of a check (ignores disabled services/hosts, invalid \ timeperiods). */ #define CHECK_OPTION_FRESHNESS_CHECK 2 /* This is a freshness check. */ diff --git a/engine/inc/com/centreon/engine/configuration/state.hh b/engine/inc/com/centreon/engine/configuration/state.hh index 0dece12fa31..8a053809f9b 100644 --- a/engine/inc/com/centreon/engine/configuration/state.hh +++ b/engine/inc/com/centreon/engine/configuration/state.hh @@ -256,6 +256,8 @@ class state { void log_passive_checks(bool value); bool log_pid() const noexcept; void log_pid(bool value); + inline bool log_file_line() const { return _log_file_line; } + void log_file_line(bool value); bool log_service_retries() const noexcept; void log_service_retries(bool value); float low_host_flap_threshold() const noexcept; @@ -293,11 +295,13 @@ class state { int perfdata_timeout() const noexcept; void perfdata_timeout(int value); std::string const& poller_name() const noexcept; - void poller_name(std::string const& value) noexcept; + void poller_name(std::string const& value); uint32_t poller_id() const noexcept; - void poller_id(uint32_t value) noexcept; + void poller_id(uint32_t value); uint16_t rpc_port() const noexcept; - void rpc_port(uint16_t value) noexcept; + void rpc_port(uint16_t value); + const std::string& rpc_listen_address() const noexcept; + void rpc_listen_address(const std::string& listen_address); bool process_performance_data() const noexcept; void process_performance_data(bool value); std::list const& resource_file() const noexcept; @@ -576,6 +580,7 @@ class state { bool _log_notifications; bool _log_passive_checks; bool _log_pid; + bool _log_file_line; bool _log_service_retries; float _low_host_flap_threshold; float _low_service_flap_threshold; @@ -597,6 +602,7 @@ class state { std::string _poller_name; uint32_t _poller_id; uint16_t _rpc_port; + std::string _rpc_listen_address; bool _process_performance_data; std::list _resource_file; unsigned long _retained_contact_host_attribute_mask; diff --git a/engine/inc/com/centreon/engine/host.hh b/engine/inc/com/centreon/engine/host.hh index f3fec385b03..c1e48aad72c 100644 --- a/engine/inc/com/centreon/engine/host.hh +++ b/engine/inc/com/centreon/engine/host.hh @@ -109,7 +109,7 @@ class host : public notifier { void add_child_host(host* child); void add_parent_host(std::string const& host_name); int log_event(); - int handle_async_check_result_3x(check_result* queued_check_result); + int handle_async_check_result_3x(const check_result& queued_check_result); int run_scheduled_check(int check_options, double latency); int run_async_check(int check_options, double latency, @@ -118,7 +118,7 @@ class host : public notifier { bool* time_is_valid, time_t* preferred_time) noexcept; bool schedule_check(time_t check_time, - int options, + uint32_t options, bool no_update_status_now = false) override; void check_for_flapping(bool update, bool actual_check, @@ -176,8 +176,6 @@ class host : public notifier { std::string const& get_current_state_as_string() const override; // setters / getters - std::string const& get_name() const; - void set_name(std::string const& name); std::string const& get_alias() const; void set_alias(std::string const& alias); std::string const& get_address() const; @@ -261,7 +259,6 @@ class host : public notifier { private: uint64_t _id; - std::string _name; std::string _alias; std::string _address; bool _process_performance_data; diff --git a/engine/inc/com/centreon/engine/log_v2.hh b/engine/inc/com/centreon/engine/log_v2.hh index a90157f0649..74b80a94237 100644 --- a/engine/inc/com/centreon/engine/log_v2.hh +++ b/engine/inc/com/centreon/engine/log_v2.hh @@ -19,6 +19,7 @@ #define CCE_LOG_V2_HH #include +#include #include #include "com/centreon/engine/configuration/state.hh" diff --git a/engine/inc/com/centreon/engine/nebstructs.hh b/engine/inc/com/centreon/engine/nebstructs.hh index abfb8387266..9d3f154570b 100644 --- a/engine/inc/com/centreon/engine/nebstructs.hh +++ b/engine/inc/com/centreon/engine/nebstructs.hh @@ -234,15 +234,12 @@ typedef struct nebstruct_contact_notification_method_struct { char* host_name; char* service_description; char* contact_name; - char* command_name; - char* command_args; int reason_type; int state; char* output; char* ack_author; char* ack_data; int escalated; - void* object_ptr; void* contact_ptr; } nebstruct_contact_notification_method_data; @@ -306,8 +303,8 @@ typedef struct nebstruct_event_handler_struct { int state_type; int state; int timeout; - char* command_name; - char* command_args; + std::string command_name; + std::string command_args; char* command_line; struct timeval start_time; struct timeval end_time; @@ -385,8 +382,6 @@ typedef struct nebstruct_host_check_struct { int state_type; int state; int timeout; - char* command_name; - char* command_args; const char* command_line; struct timeval start_time; struct timeval end_time; @@ -490,8 +485,8 @@ typedef struct nebstruct_program_status_struct { int obsess_over_services; unsigned long modified_host_attributes; unsigned long modified_service_attributes; - char const* global_host_event_handler; - char const* global_service_event_handler; + std::string global_host_event_handler; + std::string global_service_event_handler; } nebstruct_program_status_data; /* Relation data structure. */ @@ -530,8 +525,6 @@ typedef struct nebstruct_service_check_struct { int state_type; int state; int timeout; - char* command_name; - char* command_args; const char* command_line; struct timeval start_time; struct timeval end_time; diff --git a/engine/inc/com/centreon/engine/notifier.hh b/engine/inc/com/centreon/engine/notifier.hh index 2eadfa3883b..2936865a71f 100644 --- a/engine/inc/com/centreon/engine/notifier.hh +++ b/engine/inc/com/centreon/engine/notifier.hh @@ -97,6 +97,7 @@ class notifier : public checkable { typedef bool (notifier::*is_viable)(reason_type type, notification_option); notifier(notifier_type notification_flag, + const std::string& name, std::string const& display_name, std::string const& check_command, bool checks_enabled, @@ -167,7 +168,7 @@ class notifier : public checkable { void set_last_problem_id(unsigned long last_problem_id) noexcept; virtual bool schedule_check(time_t check_time, - int options, + uint32_t options, bool no_update_status_now) = 0; virtual void update_status() = 0; int notify(reason_type type, diff --git a/engine/inc/com/centreon/engine/service.hh b/engine/inc/com/centreon/engine/service.hh index b2696793e95..dd0bfee05e8 100644 --- a/engine/inc/com/centreon/engine/service.hh +++ b/engine/inc/com/centreon/engine/service.hh @@ -93,7 +93,7 @@ class service : public notifier { bool obsess_over, std::string const& timezone, uint64_t icon_id); - ~service() noexcept = default; + ~service() noexcept; void set_host_id(uint64_t host_id); uint64_t get_host_id() const; void set_service_id(uint64_t service_id); @@ -130,7 +130,8 @@ class service : public notifier { int get_current_state_int() const override; std::string const& get_current_state_as_string() const override; - int handle_async_check_result(check_result* queued_check_result); + virtual int handle_async_check_result( + const check_result& queued_check_result); int log_event(); void check_for_flapping(bool update, bool allow_flapstart_notification); int handle_service_event(); @@ -144,7 +145,7 @@ class service : public notifier { bool* time_is_valid, time_t* preferred_time) noexcept; bool schedule_check(time_t check_time, - int options, + uint32_t options, bool no_update_status_now = false) override; void set_flap(double percent_change, double high_threshold, @@ -202,7 +203,6 @@ class service : public notifier { uint64_t _host_id; uint64_t _service_id; std::string _hostname; - std::string _description; std::string _event_handler_args; std::string _check_command_args; diff --git a/engine/inc/com/centreon/engine/utils.hh b/engine/inc/com/centreon/engine/utils.hh index 7fba5a96291..4ef43e8fe0e 100644 --- a/engine/inc/com/centreon/engine/utils.hh +++ b/engine/inc/com/centreon/engine/utils.hh @@ -50,6 +50,7 @@ int get_raw_command_line_r(nagios_macros* mac, std::string const& cmd, std::string& full_command, int macro_options); + // trap signals void setup_sighandler(); // handles signals @@ -80,4 +81,14 @@ void parse_check_output(std::string const& buffer, } #endif // C++ +inline int get_raw_command_line_r( + nagios_macros* mac, + const std::shared_ptr cmd_ptr, + std::string const& cmd, + std::string& full_command, + int macro_options) { + return get_raw_command_line_r(mac, cmd_ptr.get(), cmd, full_command, + macro_options); +} + #endif // !CCE_UTILS_HH diff --git a/engine/modules/external_commands/precomp_inc/precomp.hh b/engine/modules/external_commands/precomp_inc/precomp.hh index c32aca4f17e..baab3f6597c 100644 --- a/engine/modules/external_commands/precomp_inc/precomp.hh +++ b/engine/modules/external_commands/precomp_inc/precomp.hh @@ -34,6 +34,7 @@ #include #include +#include #include #endif // CCE_EXTERNAL_COMMANDS_PRECOMP_HH diff --git a/engine/precomp_inc/precomp.hh b/engine/precomp_inc/precomp.hh index 70b4b2e7500..4fe49cef6d8 100644 --- a/engine/precomp_inc/precomp.hh +++ b/engine/precomp_inc/precomp.hh @@ -62,6 +62,7 @@ #include #include +#include #include #include "com/centreon/engine/namespace.hh" diff --git a/engine/scripts/centengine.sh.in b/engine/scripts/centengine.sh.in index 31b453816fa..7d1b870b6a1 100755 --- a/engine/scripts/centengine.sh.in +++ b/engine/scripts/centengine.sh.in @@ -17,10 +17,10 @@ ### END INIT INFO prefix=@CMAKE_INSTALL_PREFIX@ -var_dir=@VAR_DIR@ -rw_dir=@RW_DIR@ -binary=@PREFIX_BIN@/centengine -config_file=@PREFIX_CONF@/centengine.cfg +var_dir=@ENGINE_VAR_LOG_DIR@ +rw_dir=@ENGINE_VAR_LIB_DIR@/rw +binary=@CMAKE_INSTALL_FULL_SBINDIR@/centengine +config_file=@PREFIX_ENGINE_CONF@/centengine.cfg status_file=$var_dir/status.dat retention_file=$var_dir/retention.dat command_file=$rw_dir/centengine.cmd diff --git a/engine/src/anomalydetection.cc b/engine/src/anomalydetection.cc index f5253fc3eb4..ec36cd0f5ab 100644 --- a/engine/src/anomalydetection.cc +++ b/engine/src/anomalydetection.cc @@ -36,6 +36,125 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::logging; +CCE_BEGIN() +namespace commands { +class cancellable_command : public command { + command::pointer _original_command; + + /** + * @brief if _fake_result is set, check isn't executed + * + */ + check_result::pointer _fake_result; + + static const std::string _empty; + + public: + cancellable_command(const command::pointer& original_command) + : command(original_command ? original_command->get_name() + : "cancellable_command", + original_command ? original_command->get_command_line() : ""), + _original_command(original_command) {} + + void set_fake_result(const check_result::pointer& res) { _fake_result = res; } + void reset_fake_result() { _fake_result.reset(); } + + void set_original_command(const command::pointer& original_command) { + _original_command = original_command; + } + + const std::string& get_command_line() const noexcept override; + void set_command_line(const std::string& command_line) noexcept override; + + inline const command::pointer& get_original_command() const { + return _original_command; + } + + uint64_t run(const std::string& processed_cmd, + nagios_macros& macors, + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller = nullptr) override; + void run(const std::string& process_cmd, + nagios_macros& macros, + uint32_t timeout, + result& res) override; +}; + +const std::string cancellable_command::_empty; + +/** + * Run a command. + * + * @param[in] args The command arguments. + * @param[in] macros The macros data struct. + * @param[in] timeout The command timeout. + * @param[in] to_push_to_checker This check_result will be pushed to checher. + * @param[in] caller pointer to the caller + * + * @return The command id or 0 if it uses the perf_data of dependent_service + */ +uint64_t cancellable_command::run( + const std::string& processed_cmd, + nagios_macros& macors, + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller) { + if (_fake_result) { + checks::checker::instance().add_check_result_to_reap(_fake_result); + return 0; // no command => no async result + } else { + if (_original_command) { + uint64_t id = _original_command->run(processed_cmd, macors, timeout, + to_push_to_checker, caller); + log_v2::checks()->debug( + "cancellable_command::run command launched id={} cmd {}", id, + _original_command); + return id; + } else { + log_v2::checks()->debug("cancellable_command::run no original command"); + return 0; + } + } +} + +void cancellable_command::run(const std::string& process_cmd, + nagios_macros& macros, + uint32_t timeout, + result& res) { + if (_fake_result) { + res = result(*_fake_result); + _fake_result.reset(); + } else { + if (_original_command) { + _original_command->run(process_cmd, macros, timeout, res); + } + } +} + +const std::string& cancellable_command::get_command_line() const noexcept { + if (_original_command) { + return _original_command->get_command_line(); + } else { + log_v2::commands()->error( + "cancellable_command::get_command_line: original command no set"); + return _empty; + } +} + +void cancellable_command::set_command_line( + const std::string& command_line) noexcept { + if (_original_command) { + _original_command->set_command_line(command_line); + } else { + log_v2::commands()->error( + "cancellable_command::set_command_line: original command no set"); + } +} +} // namespace commands + +CCE_END() + /** * Anomaly detection constructor * @@ -182,7 +301,6 @@ anomalydetection::anomalydetection(uint64_t host_id, obsess_over, timezone, icon_id}, - _dependent_service{dependent_service}, _metric_name{metric_name}, _thresholds_file{thresholds_file}, _status_change{status_change}, @@ -190,6 +308,7 @@ anomalydetection::anomalydetection(uint64_t host_id, set_host_id(host_id); set_service_id(service_id); init_thresholds(); + set_dependent_service(dependent_service); } /** @@ -328,14 +447,16 @@ com::centreon::engine::anomalydetection* add_anomalydetection( engine_logger(log_config_error, basic) << "Error: Service comes from a database, therefore its service id " << "must not be null"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Service comes from a database, therefore its service id must " "not be null"); return nullptr; } else if (description.empty()) { engine_logger(log_config_error, basic) << "Error: Service description is not set"; - log_v2::config()->error("Error: Service description is not set"); + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Error: Service description is not set"); return nullptr; } else if (!host_name.empty()) { uint64_t hid = get_host_id(host_name); @@ -346,7 +467,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( << "' has a conflict between config does not match with the config " "id (" << hid << ")"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: host id ({}) of host ('{}') of anomaly detection service " "'{}' has a conflict between config does not match with the config " "id ({})", @@ -360,7 +482,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( engine_logger(log_config_error, basic) << "Error: Dependent service " << dependent_service_id << " does not exist (anomaly detection " << service_id << ")"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Dependent service {} does not exist (anomaly detection {})", dependent_service_id, service_id); return nullptr; @@ -372,7 +495,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( << "Error: metric name must be provided for an anomaly detection " "service (host_id:" << host_id << ", service_id:" << service_id << ")"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: metric name must be provided for an anomaly detection " "service (host_id:{}, service_id:{})", host_id, service_id); @@ -384,7 +508,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( << "Error: thresholds file must be provided for an anomaly detection " "service (host_id:" << host_id << ", service_id:" << service_id << ")"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: thresholds file must be provided for an anomaly detection " "service (host_id:{}, service_id:{})", host_id, service_id); @@ -398,7 +523,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( << "Error: Invalid max_attempts, check_interval, retry_interval" ", or notification_interval value for service '" << description << "' on host '" << host_name << "'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Invalid max_attempts, check_interval, retry_interval" ", or notification_interval value for service '{}' on host '{}'", description, host_name); @@ -410,7 +536,8 @@ com::centreon::engine::anomalydetection* add_anomalydetection( engine_logger(log_config_error, basic) << "Error: Service '" << description << "' on host '" << host_name << "' has already been defined"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Service '{}' on host '{}' has already been defined", description, host_name); return nullptr; @@ -512,7 +639,8 @@ int anomalydetection::run_async_check(int check_options, << ", latency=" << latency << ", scheduled_check=" << scheduled_check << ", reschedule_check=" << reschedule_check; - log_v2::functions()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::functions(), "anomalydetection::run_async_check, check_options={}, latency={}, " "scheduled_check={}, reschedule_check={}", check_options, latency, scheduled_check, reschedule_check); @@ -521,7 +649,8 @@ int anomalydetection::run_async_check(int check_options, << "** Running async check of anomalydetection '" << get_description() << "' on host '" << get_hostname() << "'..."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "** Running async check of anomalydetection '{} ' on host '{}'...", get_description(), get_hostname()); @@ -529,176 +658,93 @@ int anomalydetection::run_async_check(int check_options, if (!verify_check_viability(check_options, time_is_valid, preferred_time)) return ERROR; - // Send broker event. - timeval start_time = {0, 0}; - timeval end_time = {0, 0}; - int res = broker_service_check( - NEBTYPE_SERVICECHECK_ASYNC_PRECHECK, NEBFLAG_NONE, NEBATTR_NONE, this, - checkable::check_active, start_time, end_time, check_command().c_str(), - get_latency(), 0.0, 0, false, 0, nullptr, nullptr); - - // Anomalydetection check was cancelled by NEB module. reschedule check later. - if (NEBERROR_CALLBACKCANCEL == res) { - if (preferred_time != nullptr) - *preferred_time += - static_cast(check_interval() * config->interval_length()); - engine_logger(log_runtime_error, basic) - << "Error: Some broker module cancelled check of anomalydetection '" - << get_description() << "' on host '" << get_hostname(); - log_v2::runtime()->error( - "Error: Some broker module cancelled check of anomalydetection '{}' on " - "host '{}'", - get_description(), get_hostname()); - return ERROR; + // need to update original command? + if (!get_check_command_ptr()) { + set_check_command_ptr(std::make_shared( + _dependent_service->get_check_command_ptr())); + service* group[2] = {this, _dependent_service}; + _dependent_service->get_check_command_ptr()->add_caller_group(group, + group + 2); } - // Anomalydetection check was override by NEB module. - else if (NEBERROR_CALLBACKOVERRIDE == res) { - engine_logger(dbg_functions, basic) - << "Some broker module overrode check of anomalydetection '" - << get_description() << "' on host '" << get_hostname() - << "' so we'll bail out"; - log_v2::functions()->trace( - "Some broker module overrode check of anomalydetection '{}' on host " - "'{}' so we'll bail out", - get_description(), get_hostname()); - return OK; - } - - // Checking starts. - engine_logger(dbg_checks, basic) - << "Checking anomalydetection '" << get_description() << "' on host '" - << get_hostname() << "'..."; - log_v2::checks()->trace("Checking anomalydetection '{}' on host '{}'...", - get_description(), get_hostname()); - - // Clear check options. - if (scheduled_check) - set_check_options(CHECK_OPTION_NONE); - - // Update latency for event broker and macros. - double old_latency(get_latency()); - set_latency(latency); - - // Get current host and service macros. - nagios_macros* macros(get_global_macros()); - grab_host_macros_r(macros, get_host_ptr()); - grab_service_macros_r(macros, this); - std::string tmp; - get_raw_command_line_r(macros, get_check_command_ptr(), - check_command().c_str(), tmp, 0); - - // Time to start command. - gettimeofday(&start_time, nullptr); - - // Update the number of running service checks. - ++currently_running_service_checks; - engine_logger(dbg_checks, basic) - << "Current running service checks: " << currently_running_service_checks; - - log_v2::checks()->trace("Current running service checks: {}", - currently_running_service_checks); - // Set the execution flag. - set_is_executing(true); - - std::ostringstream oss; - oss << "Anomaly detection on metric '" << _metric_name << "', from service '" - << _dependent_service->get_description() << "' on host '" - << get_hostname() << "'"; - // Send event broker. - res = broker_service_check( - NEBTYPE_SERVICECHECK_INITIATE, NEBFLAG_NONE, NEBATTR_NONE, this, - checkable::check_active, start_time, end_time, check_command().c_str(), - get_latency(), 0.0, config->service_check_timeout(), false, 0, - oss.str().c_str(), nullptr); - - // Restore latency. - set_latency(old_latency); - - // Service check was override by neb_module. - if (NEBERROR_CALLBACKOVERRIDE == res) { - clear_volatile_macros_r(macros); - return OK; - } - - // Update statistics. - update_check_stats(scheduled_check ? ACTIVE_SCHEDULED_SERVICE_CHECK_STATS - : ACTIVE_ONDEMAND_SERVICE_CHECK_STATS, - start_time.tv_sec); - - std::string perfdata = string::extract_perfdata( - _dependent_service->get_perf_data(), _metric_name); - - std::string without_thresholds(string::remove_thresholds(perfdata)); - std::tuple pd = - parse_perfdata(without_thresholds, start_time.tv_sec); - size_t pos = without_thresholds.find(';'); - if (pos != std::string::npos) - without_thresholds = without_thresholds.substr(pos); - else - without_thresholds = ""; - - // Init check result info. - std::unique_ptr check_result_info( - new check_result(service_check, this, checkable::check_active, - check_options, reschedule_check, latency, start_time, - start_time, false, true, service::state_ok, "")); - - oss.str(""); - oss.setf(std::ios_base::fixed, std::ios_base::floatfield); - oss.precision(2); - check_result_info->set_early_timeout(false); - check_result_info->set_exited_ok(true); - if (std::get<0>(pd) == service::state_ok) - oss << "OK: Regular activity, " << _metric_name << '=' << std::get<1>(pd) - << std::get<2>(pd) << " |"; - else if (std::get<0>(pd) == service::state_unknown && - std::isnan(std::get<1>(pd))) - oss << "UNKNOWN: Unknown activity, " << _metric_name - << " did not return any values| "; - else { - oss << "NON-OK: Unusual activity, the actual value of " << _metric_name - << " is " << std::get<1>(pd) << std::get<2>(pd); - if (!std::isnan(std::get<3>(pd)) && !std::isnan(std::get<4>(pd))) - oss << " which is outside the forecasting range [" << std::get<3>(pd) - << std::get<2>(pd) << " ; " << std::get<4>(pd) << std::get<2>(pd) - << "] |"; - else - oss << " and the forecasting range is unknown |"; + if (std::static_pointer_cast( + get_check_command_ptr()) + ->get_original_command() != + _dependent_service->get_check_command_ptr()) { + std::static_pointer_cast( + get_check_command_ptr()) + ->set_original_command(_dependent_service->get_check_command_ptr()); + service* group[2] = {this, _dependent_service}; + _dependent_service->get_check_command_ptr()->add_caller_group(group, + group + 2); } - check_result_info->set_return_code(std::get<0>(pd)); - oss << perfdata; - if (!std::isnan(std::get<3>(pd))) { - oss << ' ' << _metric_name << "_lower_thresholds=" << std::get<3>(pd) - << std::get<2>(pd) << without_thresholds; - } - if (!std::isnan(std::get<4>(pd))) { - oss << ' ' << _metric_name << "_upper_thresholds=" << std::get<4>(pd) - << std::get<2>(pd) << without_thresholds; + if (get_current_state() == service::service_state::state_ok) { + // if state is ok we don't execute command + std::string dependent_perf_data = _dependent_service->get_perf_data(); + struct timeval now; + gettimeofday(&now, nullptr); + check_result::pointer fake_res = std::make_shared( + check_source::service_check, this, checkable::check_active, + check_options, reschedule_check, latency, now, now, true, false, + service_state::state_unknown, + "failed to calc check_result from perf_data"); + if (!parse_perfdata(dependent_perf_data, time(nullptr), *fake_res)) { + SPDLOG_LOGGER_ERROR(log_v2::checks(), + "parse_perfdata failed => unknown state"); + } else { + SPDLOG_LOGGER_TRACE( + log_v2::checks(), + "** Running async check of anomalydetection '{} ' on host '{}'... " + "without check", + get_description(), get_hostname()); + } + std::static_pointer_cast( + get_check_command_ptr()) + ->set_fake_result(fake_res); + } else { + if (!std::static_pointer_cast( + get_check_command_ptr()) + ->get_original_command()) { + SPDLOG_LOGGER_ERROR( + log_v2::checks(), + "anomaly: no original commands for host {} => do nothing", + get_hostname()); + return ERROR; + } + SPDLOG_LOGGER_TRACE( + log_v2::checks(), + "** Running async check of anomalydetection '{} ' on host '{}'... with " + "check", + get_description(), get_hostname()); + std::static_pointer_cast( + get_check_command_ptr()) + ->reset_fake_result(); // execute original commands } - /* We should master this string, so no need to check if it is utf-8 */ - check_result_info->set_output(oss.str()); - timestamp now(timestamp::now()); - - // Update check result. - timeval tv; - gettimeofday(&tv, nullptr); - check_result_info->set_finish_time(tv); - - // Queue check result. - // handle_async_check_result(check_result_info.get()); - checks::checker::instance().add_check_result_to_reap( - check_result_info.release()); - - // Cleanup. - clear_volatile_macros_r(macros); - - return OK; + return service::run_async_check(check_options, latency, scheduled_check, + reschedule_check, time_is_valid, + preferred_time); } -commands::command* anomalydetection::get_check_command_ptr() const { - return _dependent_service->get_check_command_ptr(); +int anomalydetection::handle_async_check_result( + const check_result& queued_check_result) { + std::string output{queued_check_result.get_output()}; + std::string plugin_output; + std::string long_plugin_output; + std::string perf_data; + parse_check_output(output, plugin_output, long_plugin_output, perf_data, true, + false); + + perf_data = string::extract_perfdata(perf_data, _metric_name); + + check_result anomaly_check_result(queued_check_result); + // mandatory to avoid service::handle_async_check_result to erase + // parse_perfdata output + anomaly_check_result.set_exited_ok(true); + parse_perfdata(perf_data, queued_check_result.get_start_time().tv_sec, + anomaly_check_result); + + return service::handle_async_check_result(anomaly_check_result); } /** @@ -710,25 +756,51 @@ commands::command* anomalydetection::get_check_command_ptr() const { * @return A tuple containing the status, the value, its unit, the lower bound * and the upper bound */ -std::tuple -anomalydetection::parse_perfdata(std::string const& perfdata, - time_t check_time) { +// std::tuple +bool anomalydetection::parse_perfdata(std::string const& perfdata, + time_t check_time, + check_result& calculated_result) { + std::ostringstream oss; + + if (!_thresholds_file_viable) { + engine_logger(log_info_message, basic) + << "The thresholds file is not viable " + "(not available or not readable)."; + SPDLOG_LOGGER_ERROR(log_v2::checks(), + "The thresholds file is not viable " + "(not available or not readable)."); + oss << "The thresholds file is not viable for metric " << _metric_name + << " | " << perfdata; + calculated_result.set_output(oss.str()); + calculated_result.set_return_code(service_state::state_unknown); + return false; + } + + std::string without_thresholds(perfdata); + string::remove_thresholds(without_thresholds); std::lock_guard lock(_thresholds_m); - size_t pos = perfdata.find_last_of("="); + size_t pos = without_thresholds.find_last_of("="); /* If the perfdata is wrong. */ if (pos == std::string::npos) { engine_logger(log_runtime_error, basic) - << "Error: Unable to parse perfdata '" << perfdata << "'"; - log_v2::runtime()->error("Error: Unable to parse perfdata '{}'", perfdata); - return std::make_tuple(service::state_unknown, NAN, "", NAN, NAN); + << "Error: Unable to parse perfdata '" << without_thresholds << "'"; + SPDLOG_LOGGER_ERROR(log_v2::runtime(), + "Error: Unable to parse perfdata '{}'", + without_thresholds); + oss << "UNKNOWN: Unknown activity, " << _metric_name + << " did not return any values" + << " | " << perfdata; + calculated_result.set_output(oss.str()); + calculated_result.set_return_code(service_state::state_unknown); + return false; } /* If the perfdata is good. */ pos++; char* unit; - double value = std::strtod(perfdata.c_str() + pos, &unit); - char const* end = perfdata.c_str() + perfdata.size() - 1; + double value = std::strtod(without_thresholds.c_str() + pos, &unit); + char const* end = without_thresholds.c_str() + without_thresholds.size() - 1; size_t l = 0; /* If there is a unit, it starts at unit char* */ while (unit + l <= end && unit[l] != ' ' && unit[l] != ';') @@ -737,19 +809,6 @@ anomalydetection::parse_perfdata(std::string const& perfdata, service::service_state status; - if (!_thresholds_file_viable) { - status = service::state_ok; - if (_status_change) { - engine_logger(log_info_message, basic) - << "The thresholds file is not viable " - "(not available or not readable)."; - log_v2::checks()->info( - "The thresholds file is not viable " - "(not available or not readable)."); - } - return std::make_tuple(status, value, unit, NAN, NAN); - } - /* The check time is probably between two timestamps stored in _thresholds. * * | d2 + @@ -774,11 +833,16 @@ anomalydetection::parse_perfdata(std::string const& perfdata, << "Error: the thresholds file is too old " "compared to the check timestamp " << check_time; - log_v2::runtime()->error( - "Error: the thresholds file is too old " - "compared to the check timestamp {}", - check_time); - return std::make_tuple(service::state_unknown, value, uom, NAN, NAN); + SPDLOG_LOGGER_ERROR(log_v2::runtime(), + "Error: the thresholds file is too old " + "compared to the check timestamp {}", + check_time); + oss << "The thresholds file is too old " + "compared to the check timestamp " + << check_time << " for metric " << _metric_name << " | " << perfdata; + calculated_result.set_output(oss.str()); + calculated_result.set_return_code(service_state::state_unknown); + return false; } if (it1 != _thresholds.begin()) --it1; @@ -786,10 +850,17 @@ anomalydetection::parse_perfdata(std::string const& perfdata, engine_logger(log_runtime_error, basic) << "Error: timestamp " << check_time << " too old compared with the thresholds file"; - log_v2::runtime()->error( + SPDLOG_LOGGER_ERROR( + log_v2::runtime(), "Error: timestamp {} too old compared with the thresholds file", check_time); - return std::make_tuple(service::state_unknown, value, uom, NAN, NAN); + oss << "timestamp " << check_time + << " is too old compared with the thresholds file for metric " + << _metric_name << " | " << perfdata << without_thresholds; + ; + calculated_result.set_output(oss.str()); + calculated_result.set_return_code(service_state::state_unknown); + return false; } /* Now it1.first <= check_time < it2.first */ @@ -811,7 +882,54 @@ anomalydetection::parse_perfdata(std::string const& perfdata, status = service::state_critical; } - return std::make_tuple(status, value, uom, lower, upper); + oss.setf(std::ios_base::fixed, std::ios_base::floatfield); + oss.precision(2); + calculated_result.set_early_timeout(false); + calculated_result.set_exited_ok(true); + if (status == service::state_ok) + oss << "OK: Regular activity, " << _metric_name << '=' << value << uom + << " |"; + else if (status == service::state_unknown && std::isnan(value)) + oss << "UNKNOWN: Unknown activity, " << _metric_name + << " did not return any values| "; + else { + oss << "NON-OK: Unusual activity, the actual value of " << _metric_name + << " is " << value << uom; + if (!std::isnan(lower) && !std::isnan(upper)) + oss << " which is outside the forecasting range [" << lower << uom + << " ; " << upper << uom << "] |"; + else + oss << " and the forecasting range is unknown |"; + } + + calculated_result.set_return_code(status); + + oss << without_thresholds; + + std::string without_thresholds_nor_value; + pos = without_thresholds.find(';'); + if (pos != std::string::npos) + without_thresholds_nor_value = without_thresholds.substr(pos); + + if (!std::isnan(lower)) { + oss << ' ' << _metric_name << "_lower_thresholds=" << lower << uom + << without_thresholds_nor_value; + } + if (!std::isnan(upper)) { + oss << ' ' << _metric_name << "_upper_thresholds=" << upper << uom + << without_thresholds_nor_value; + } + /* We should master this string, so no need to check if it is utf-8 */ + calculated_result.set_output(oss.str()); + + timestamp now(timestamp::now()); + + // Update check result. + timeval tv; + gettimeofday(&tv, nullptr); + calculated_result.set_finish_time(tv); + + return true; } void anomalydetection::init_thresholds() { @@ -819,11 +937,23 @@ void anomalydetection::init_thresholds() { engine_logger(dbg_config, most) << "Trying to read thresholds file '" << _thresholds_file << "'"; - log_v2::config()->debug("Trying to read thresholds file '{}'", - _thresholds_file); - std::ifstream t(_thresholds_file); - if (!t) + SPDLOG_LOGGER_DEBUG(log_v2::config(), "Trying to read thresholds file '{}'", + _thresholds_file); + std::ifstream t; + t.exceptions(t.exceptions() | std::ios::failbit); + try { + t.open(_thresholds_file); + } catch (const std::system_error& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Fail to read thresholds file '{}' : {}", + _thresholds_file, e.code().message()); return; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Fail to read thresholds file '{}' : {}", + _thresholds_file, e.what()); + return; + } std::stringstream buffer; buffer << t.rdbuf(); @@ -835,22 +965,24 @@ void anomalydetection::init_thresholds() { engine_logger(log_config_error, basic) << "Error: the file '" << _thresholds_file << "' contains errors: " << e.what(); - log_v2::config()->error("Error: the file '{}' contains errors: {}", - _thresholds_file, e.what()); + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Error: the file '{}' contains errors: {}", + _thresholds_file, e.what()); return; } if (!json.is_array()) { engine_logger(log_config_error, basic) << "Error: the file '" << _thresholds_file << "' is not a thresholds file. Its global structure is not an array."; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: the file '{}' is not a thresholds file. Its global structure " "is not an array.", _thresholds_file); return; } - int count = 0; + bool found = false; for (auto it = json.begin(); it != json.end(); ++it) { uint64_t host_id, service_id; auto item = it.value(); @@ -862,43 +994,30 @@ void anomalydetection::init_thresholds() { << "Error: host_id and service_id must " "be strings containing integers: " << e.what(); - log_v2::config()->error( - "Error: host_id and service_id must " - "be strings containing integers: {}", - e.what()); + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Error: host_id and service_id must " + "be strings containing integers: {}", + e.what()); return; } if (host_id == get_host_id() && service_id == get_service_id() && item["metric_name"].get() == _metric_name) { - engine_logger(dbg_config, most) - << "Filling thresholds in anomaly detection (host_id: " - << get_host_id() << ", service_id: " << get_service_id() - << ", metric: " << _metric_name << ")"; - log_v2::config()->debug( - "Filling thresholds in anomaly detection (host_id: {}, service_id: " - "{}, metric: {})", - get_host_id(), get_service_id(), _metric_name); - auto predict = item["predict"]; - _thresholds.clear(); - for (auto& i : predict) { - time_t timestamp = static_cast(i["timestamp"].get()); - double upper = i["upper"].get(); - double lower = i["lower"].get(); - _thresholds.emplace_hint( - _thresholds.end(), - std::make_pair(timestamp, std::make_pair(lower, upper))); - count++; + set_thresholds_no_lock(_thresholds_file, item["predict"]); + if (!_thresholds_file_viable) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "{} don't contain at least 2 thresholds datas for " + "host_id {} and service_id {}", + _thresholds_file, get_host_id(), get_service_id()); } + found = true; break; } } - if (count > 1) { - engine_logger(dbg_config, most) << "Number of rows in memory: " << count; - log_v2::config()->debug("Number of rows in memory: {}", count); - _thresholds_file_viable = true; - } else { - engine_logger(dbg_config, most) << "Nothing in memory"; - log_v2::config()->debug("Nothing in memory"); + if (!found) { + SPDLOG_LOGGER_ERROR( + log_v2::config(), + "{} don't contain datas for host_id {} and service_id {}", + _thresholds_file, get_host_id(), get_service_id()); } } @@ -911,13 +1030,22 @@ void anomalydetection::init_thresholds() { int anomalydetection::update_thresholds(const std::string& filename) { engine_logger(log_info_message, most) << "Reading thresholds file '" << filename << "'."; - log_v2::checks()->info("Reading thresholds file '{}'.", filename); - std::ifstream t(filename); - if (!t) { - engine_logger(log_config_error, basic) - << "Error: Unable to read the thresholds file '" << filename << "'."; - log_v2::config()->error("Error: Unable to read the thresholds file '{}'.", - filename); + SPDLOG_LOGGER_INFO(log_v2::checks(), "Reading thresholds file '{}'.", + filename); + + std::ifstream t; + t.exceptions(t.exceptions() | std::ios::failbit); + try { + t.open(filename); + } catch (const std::system_error& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Fail to read thresholds file '{}' : {}", filename, + e.code().message()); + return -1; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Fail to read thresholds file '{}' : {}", filename, + e.what()); return -1; } @@ -930,7 +1058,8 @@ int anomalydetection::update_thresholds(const std::string& filename) { engine_logger(log_config_error, basic) << "Error: The thresholds file '" << filename << "' should be a json file: " << e.what(); - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: The thresholds file '{}' should be a json file: {}", filename, e.what()); return -2; @@ -940,7 +1069,8 @@ int anomalydetection::update_thresholds(const std::string& filename) { engine_logger(log_config_error, basic) << "Error: the file '" << filename << "' is not a thresholds file. Its global structure is not an array."; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: the file '{}' is not a thresholds file. Its global structure " "is not an array.", filename); @@ -958,10 +1088,10 @@ int anomalydetection::update_thresholds(const std::string& filename) { << "Error: host_id and service_id must " "be strings containing integers: " << e.what(); - log_v2::config()->error( - "Error: host_id and service_id must " - "be strings containing integers: {}", - e.what()); + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Error: host_id and service_id must " + "be strings containing integers: {}", + e.what()); continue; } auto found = service::services_by_id.find({host_id, svc_id}); @@ -970,14 +1100,22 @@ int anomalydetection::update_thresholds(const std::string& filename) { << "Error: The thresholds file contains thresholds for the anomaly " "detection service (host_id: " << host_id << ", service_id: " << svc_id << ") that does not exist"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: The thresholds file contains thresholds for the anomaly " "detection service (host_id: {}, service_id: {}) that does not exist", host_id, svc_id); continue; } std::shared_ptr ad = - std::static_pointer_cast(found->second); + std::dynamic_pointer_cast(found->second); + if (!ad) { + SPDLOG_LOGGER_ERROR( + log_v2::config(), + "host_id: {}, service_id: {} is not an anomaly detection service", + host_id, svc_id); + continue; + } const std::string& metric_name(item["metric_name"].get()); if (ad->get_metric_name() != metric_name) { engine_logger(log_config_error, basic) @@ -987,7 +1125,8 @@ int anomalydetection::update_thresholds(const std::string& filename) { << ") with metric_name='" << metric_name << "' whereas the configured metric name is '" << ad->get_metric_name() << "'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: The thresholds file contains thresholds for the anomaly " "detection service (host_id: {}, service_id: {}) with " "metric_name='{}' whereas the configured metric name is '{}'", @@ -999,32 +1138,56 @@ int anomalydetection::update_thresholds(const std::string& filename) { << "Filling thresholds in anomaly detection (host_id: " << ad->get_host_id() << ", service_id: " << ad->get_service_id() << ", metric: " << ad->get_metric_name() << ")"; - log_v2::checks()->info( + SPDLOG_LOGGER_INFO( + log_v2::checks(), "Filling thresholds in anomaly detection (host_id: {}, service_id: {}, " "metric: {})", ad->get_host_id(), ad->get_service_id(), ad->get_metric_name()); - auto predict = item["predict"]; - std::map > thresholds; - for (auto& i : predict) { - time_t timestamp = static_cast(i["timestamp"].get()); - double upper = i["upper"].get(); - double lower = i["lower"].get(); - thresholds.emplace_hint( - thresholds.end(), - std::make_pair(timestamp, std::make_pair(lower, upper))); - } - ad->set_thresholds(filename, std::move(thresholds)); + ad->set_thresholds_lock(filename, item["predict"]); } return 0; } -void anomalydetection::set_thresholds( - const std::string& filename, - std::map >&& thresholds) noexcept { +void anomalydetection::set_thresholds_lock(const std::string& filename, + const nlohmann::json& thresholds) { std::lock_guard _lock(_thresholds_m); - _thresholds_file = filename, _thresholds = thresholds; - _thresholds_file_viable = _thresholds.size() > 0; + set_thresholds_no_lock(filename, thresholds); +} + +void anomalydetection::set_thresholds_no_lock( + const std::string& filename, + const nlohmann::json& thresholds) { + if (_thresholds_file != filename) { + _thresholds_file = filename; + } + _thresholds.clear(); + for (auto& threshold_obj : thresholds) { + time_t timestamp = + static_cast(threshold_obj["timestamp"].get()); + double upper = threshold_obj["upper"].get(); + double lower = threshold_obj["lower"].get(); + _thresholds.emplace_hint( + _thresholds.end(), + std::make_pair(timestamp, std::make_pair(lower, upper))); + } + if (_thresholds.size() > 1) { + engine_logger(dbg_config, most) + << "host_id=" << get_host_id() << " serv_id=" << get_service_id() + << " Number of rows in memory: " << _thresholds.size(); + SPDLOG_LOGGER_DEBUG(log_v2::config(), + "host_id={} serv_id={} Number of rows in memory: {}", + get_host_id(), get_service_id(), _thresholds.size()); + _thresholds_file_viable = true; + } else { + engine_logger(dbg_config, most) + << "Nothing in memory " << _thresholds.size() + << " for host_id=" << get_host_id() << " serv_id=" << get_service_id(); + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Nothing in memory {} for host_id={} servid={}", + _thresholds.size(), get_host_id(), get_service_id()); + _thresholds_file_viable = false; + } } void anomalydetection::set_status_change(bool status_change) { diff --git a/engine/src/broker.cc b/engine/src/broker.cc index 2e67cb1e9fb..48c99ce75ff 100644 --- a/engine/src/broker.cc +++ b/engine/src/broker.cc @@ -20,6 +20,7 @@ */ #include "com/centreon/engine/broker.hh" +#include #include #include "com/centreon/engine/flapping.hh" #include "com/centreon/engine/globals.hh" @@ -591,7 +592,7 @@ int broker_contact_notification_data(int type, ds.output = const_cast(temp_service->get_plugin_output().c_str()); } else { temp_host = (host*)data; - ds.host_name = const_cast(temp_host->get_name().c_str()); + ds.host_name = const_cast(temp_host->name().c_str()); ds.service_description = NULL; ds.state = temp_host->get_current_state(); ds.output = const_cast(temp_host->get_plugin_output().c_str()); @@ -635,7 +636,6 @@ int broker_contact_notification_method_data(int type, struct timeval end_time, void* data, contact* cntct, - char const* cmd, char const* ack_author, char const* ack_data, int escalated, @@ -644,16 +644,6 @@ int broker_contact_notification_method_data(int type, if (!(config->event_broker_options() & BROKER_NOTIFICATIONS)) return OK; - // Get command name/args. - char* command_buf(NULL); - char* command_name(NULL); - char* command_args(NULL); - if (cmd) { - command_buf = string::dup(cmd); - command_name = strtok(command_buf, "!"); - command_args = strtok(NULL, "\x0"); - } - // Fill struct with relevant data. nebstruct_contact_notification_method_data ds; host* temp_host(NULL); @@ -667,8 +657,6 @@ int broker_contact_notification_method_data(int type, ds.end_time = end_time; ds.reason_type = reason_type; ds.contact_name = const_cast(cntct->get_name().c_str()); - ds.command_name = command_name; - ds.command_args = command_args; if (notification_type == notifier::service_notification) { temp_service = static_cast(data); ds.host_name = const_cast(temp_service->get_hostname().c_str()); @@ -678,7 +666,7 @@ int broker_contact_notification_method_data(int type, ds.output = const_cast(temp_service->get_plugin_output().c_str()); } else { temp_host = (host*)data; - ds.host_name = const_cast(temp_host->get_name().c_str()); + ds.host_name = const_cast(temp_host->name().c_str()); ds.service_description = NULL; ds.state = temp_host->get_current_state(); ds.output = const_cast(temp_host->get_plugin_output().c_str()); @@ -694,9 +682,6 @@ int broker_contact_notification_method_data(int type, return_code = neb_make_callbacks(NEBCALLBACK_CONTACT_NOTIFICATION_METHOD_DATA, &ds); - // Free memory. - delete[] command_buf; - return (return_code); } @@ -797,7 +782,7 @@ void broker_downtime_data(int type, char const* comment_data, time_t start_time, time_t end_time, - int fixed, + bool fixed, unsigned long triggered_by, unsigned long duration, unsigned long downtime_id, @@ -877,14 +862,12 @@ int broker_event_handler(int type, return ERROR; // Get command name/args. - char* command_buf(NULL); - char* command_name(NULL); - char* command_args(NULL); - if (cmd) { - command_buf = string::dup(cmd); - command_name = strtok(command_buf, "!"); - command_args = strtok(NULL, "\x0"); - } + std::vector cmd_split = + absl::StrSplit(cmd, absl::MaxSplits('!', 1)); + if (cmd_split.size() < 2) + return ERROR; + std::string& command_name = cmd_split[0]; + std::string& command_args = cmd_split[1]; // Fill struct with relevant data. nebstruct_event_handler_data ds; @@ -903,7 +886,7 @@ int broker_event_handler(int type, const_cast(temp_service->get_description().c_str()); } else { temp_host = (host*)data; - ds.host_name = const_cast(temp_host->get_name().c_str()); + ds.host_name = const_cast(temp_host->name().c_str()); ds.service_description = NULL; } ds.object_ptr = data; @@ -912,8 +895,8 @@ int broker_event_handler(int type, ds.start_time = start_time; ds.end_time = end_time; ds.timeout = timeout; - ds.command_name = command_name; - ds.command_args = command_args; + ds.command_name = std::move(command_name); + ds.command_args = std::move(command_args); ds.command_line = cmdline; ds.early_timeout = early_timeout; ds.execution_time = exectime; @@ -924,8 +907,6 @@ int broker_event_handler(int type, int return_code; return_code = neb_make_callbacks(NEBCALLBACK_EVENT_HANDLER_DATA, &ds); - // Free memory. - delete[] command_buf; return (return_code); } @@ -1123,7 +1104,6 @@ int broker_host_check(int type, int state_type, struct timeval start_time, struct timeval end_time, - char const* cmd, double latency, double exectime, int timeout, @@ -1140,23 +1120,13 @@ int broker_host_check(int type, if (!hst) return ERROR; - // Get command name/args. - char* command_buf(NULL); - char* command_name(NULL); - char* command_args(NULL); - if (cmd) { - command_buf = string::dup(cmd); - command_name = strtok(command_buf, "!"); - command_args = strtok(NULL, "\x0"); - } - // Fill struct with relevant data. nebstruct_host_check_data ds; ds.type = type; ds.flags = flags; ds.attr = attr; ds.timestamp = get_broker_timestamp(timestamp); - ds.host_name = const_cast(hst->get_name().c_str()); + ds.host_name = const_cast(hst->name().c_str()); ds.object_ptr = hst; ds.check_type = check_type; ds.current_attempt = hst->get_current_attempt(); @@ -1164,8 +1134,6 @@ int broker_host_check(int type, ds.state = state; ds.state_type = state_type; ds.timeout = timeout; - ds.command_name = command_name; - ds.command_args = command_args; ds.command_line = cmdline; ds.start_time = start_time; ds.end_time = end_time; @@ -1182,7 +1150,6 @@ int broker_host_check(int type, return_code = neb_make_callbacks(NEBCALLBACK_HOST_CHECK_DATA, &ds); // Free data. - delete[] command_buf; return return_code; } @@ -1253,43 +1220,6 @@ void broker_log_data(int type, neb_make_callbacks(NEBCALLBACK_LOG_DATA, &ds); } -/** - * Send module data to broker. - * - * @param[in] type Type. - * @param[in] flags Flags. - * @param[in] attr Attributes. - * @param[in] module Module. - * @param[in] args Module arguments. - * @param[in] timestamp Timestamp. - */ -void broker_module_data(int type, - int flags, - int attr, - char const* module, - char const* args, - struct timeval const* timestamp) { - // Config check. - if (!(config->event_broker_options() & BROKER_MODULE_DATA)) - return; - - // Fill struct with relevant data. - nebstruct_module_data ds; - ds.type = type; - ds.flags = flags; - ds.attr = attr; - ds.timestamp = get_broker_timestamp(timestamp); - ds.module = string::dup(module); - ds.args = string::dup(args); - - // Make callbacks. - neb_make_callbacks(NEBCALLBACK_MODULE_DATA, &ds); - - // Free memory. - delete[] ds.module; - delete[] ds.args; -} - /** * Send notification data to broker. * @@ -1347,7 +1277,7 @@ int broker_notification_data(int type, ds.output = const_cast(temp_service->get_plugin_output().c_str()); } else { temp_host = (host*)data; - ds.host_name = const_cast(temp_host->get_name().c_str()); + ds.host_name = const_cast(temp_host->name().c_str()); ds.service_description = NULL; ds.state = temp_host->get_current_state(); ds.output = const_cast(temp_host->get_plugin_output().c_str()); @@ -1429,17 +1359,11 @@ void broker_program_status(int type, ds.obsess_over_services = config->obsess_over_services(); ds.modified_host_attributes = modified_host_process_attributes; ds.modified_service_attributes = modified_service_process_attributes; - ds.global_host_event_handler = - string::dup(config->global_host_event_handler()); - ds.global_service_event_handler = - string::dup(config->global_service_event_handler()); + ds.global_host_event_handler = config->global_host_event_handler(); + ds.global_service_event_handler = config->global_service_event_handler(); // Make callbacks. neb_make_callbacks(NEBCALLBACK_PROGRAM_STATUS_DATA, &ds); - - // Free memory. - delete[] ds.global_host_event_handler; - delete[] ds.global_service_event_handler; } /** @@ -1538,7 +1462,6 @@ int broker_service_check(int type, int check_type, struct timeval start_time, struct timeval end_time, - char const* cmd, double latency, double exectime, int timeout, @@ -1552,16 +1475,6 @@ int broker_service_check(int type, if (!svc) return ERROR; - // Get command name/args. - char* command_buf(NULL); - char* command_name(NULL); - char* command_args(NULL); - if (cmd) { - command_buf = string::dup(cmd); - command_name = strtok(command_buf, "!"); - command_args = strtok(NULL, "\x0"); - } - // Fill struct with relevant data. nebstruct_service_check_data ds; ds.type = type; @@ -1577,8 +1490,6 @@ int broker_service_check(int type, ds.state = svc->get_current_state(); ds.state_type = svc->get_state_type(); ds.timeout = timeout; - ds.command_name = command_name; - ds.command_args = command_args; ds.command_line = cmdline; ds.start_time = start_time; ds.end_time = end_time; @@ -1594,8 +1505,6 @@ int broker_service_check(int type, int return_code; return_code = neb_make_callbacks(NEBCALLBACK_SERVICE_CHECK_DATA, &ds); - // Free data. - delete[] command_buf; return return_code; } @@ -1674,7 +1583,7 @@ void broker_statechange_data(int type, ds.output = const_cast(temp_service->get_plugin_output().c_str()); } else { temp_host = (host*)data; - ds.host_name = const_cast(temp_host->get_name().c_str()); + ds.host_name = const_cast(temp_host->name().c_str()); ds.service_description = NULL; ds.output = const_cast(temp_host->get_plugin_output().c_str()); } diff --git a/engine/src/broker/compatibility.cc b/engine/src/broker/compatibility.cc index 94ed1a52463..56fd85781c1 100644 --- a/engine/src/broker/compatibility.cc +++ b/engine/src/broker/compatibility.cc @@ -72,7 +72,7 @@ void compatibility::copyright_module(broker::handle* mod) { void compatibility::create_module(broker::handle* mod) { if (mod) { // Allocate memory. - std::unique_ptr new_module(new nebmodule); + auto new_module = std::make_unique(); // Module parameters. new_module->filename = string::dup(mod->get_filename()); diff --git a/engine/src/broker/handle.cc b/engine/src/broker/handle.cc index b51293545ba..aecba921917 100644 --- a/engine/src/broker/handle.cc +++ b/engine/src/broker/handle.cc @@ -196,7 +196,7 @@ void handle::open() { return; try { - _handle = std::shared_ptr(new library(_filename)); + _handle = std::make_shared(_filename); _handle->load(); int api_version(*static_cast(_handle->resolve("__neb_api_version"))); diff --git a/engine/src/broker/loader.cc b/engine/src/broker/loader.cc index 5e35d662929..3a0efb136d4 100644 --- a/engine/src/broker/loader.cc +++ b/engine/src/broker/loader.cc @@ -46,7 +46,7 @@ using namespace com::centreon::engine::logging; */ std::shared_ptr loader::add_module(std::string const& filename, std::string const& args) { - std::shared_ptr module(new handle(filename, args)); + auto module = std::make_shared(filename, args); _modules.push_back(module); return module; } diff --git a/engine/src/check_result.cc b/engine/src/check_result.cc index 8ad507c7716..61c804b650e 100644 --- a/engine/src/check_result.cc +++ b/engine/src/check_result.cc @@ -25,10 +25,23 @@ using namespace com::centreon::engine; +check_result::check_result() + : _object_check_type{check_source::service_check}, + _notifier{nullptr}, + _check_type(checkable::check_type::check_passive), + _check_options{0}, + _reschedule_check{false}, + _latency{0}, + _start_time{0, 0}, + _finish_time{0, 0}, + _early_timeout{false}, + _exited_ok{false}, + _return_code{0} {} + check_result::check_result(enum check_source object_check_type, notifier* notifier, enum checkable::check_type check_type, - int check_options, + unsigned check_options, bool reschedule_check, double latency, struct timeval start_time, @@ -50,58 +63,30 @@ check_result::check_result(enum check_source object_check_type, _return_code{return_code}, _output{output} {} -enum check_source check_result::get_object_check_type() const { - return _object_check_type; -} - void check_result::set_object_check_type(enum check_source object_check_type) { _object_check_type = object_check_type; } -notifier* check_result::get_notifier() { - return _notifier; -} - void check_result::set_notifier(notifier* notifier) { _notifier = notifier; } -struct timeval check_result::get_finish_time() const { - return _finish_time; -} - void check_result::set_finish_time(struct timeval finish_time) { _finish_time = finish_time; } -struct timeval check_result::get_start_time() const { - return _start_time; -} - void check_result::set_start_time(struct timeval start_time) { _start_time = start_time; } -int check_result::get_return_code() const { - return _return_code; -} - void check_result::set_return_code(int return_code) { _return_code = return_code; } -bool check_result::get_early_timeout() const { - return _early_timeout; -} - void check_result::set_early_timeout(bool early_timeout) { _early_timeout = early_timeout; } -std::string const& check_result::get_output() const { - return _output; -} - /** * @brief Set the check output to the check_result. A boolean is also here * to check or not if the string is legal UTF-8. If it may be non UTF-8, @@ -114,42 +99,22 @@ void check_result::set_output(std::string const& output) { _output = output; } -bool check_result::get_exited_ok() const { - return _exited_ok; -} - void check_result::set_exited_ok(bool exited_ok) { _exited_ok = exited_ok; } -bool check_result::get_reschedule_check() const { - return _reschedule_check; -} - void check_result::set_reschedule_check(bool reschedule_check) { _reschedule_check = reschedule_check; } -enum checkable::check_type check_result::get_check_type() const { - return _check_type; -} - void check_result::set_check_type(enum checkable::check_type check_type) { _check_type = check_type; } -double check_result::get_latency() const { - return _latency; -} - void check_result::set_latency(double latency) { _latency = latency; } -int check_result::get_check_options() const { - return _check_options; -} - -void check_result::set_check_options(int check_options) { +void check_result::set_check_options(unsigned check_options) { _check_options = check_options; } diff --git a/engine/src/checkable.cc b/engine/src/checkable.cc index 55334fb962c..d9280a9c2aa 100644 --- a/engine/src/checkable.cc +++ b/engine/src/checkable.cc @@ -24,7 +24,8 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::logging; -checkable::checkable(const std::string& display_name, +checkable::checkable(const std::string& name, + const std::string& display_name, const std::string& check_command, bool checks_enabled, bool accept_passive_checks, @@ -48,7 +49,8 @@ checkable::checkable(const std::string& display_name, const std::string& timezone, uint64_t icon_id) : check_period_ptr{nullptr}, - _display_name{display_name}, + _name{name}, + _display_name{display_name.empty() ? name : display_name}, _check_command{check_command}, _check_interval{check_interval}, _retry_interval{retry_interval}, @@ -120,7 +122,7 @@ const std::string& checkable::get_display_name() const { } void checkable::set_display_name(const std::string& display_name) { - _display_name = display_name; + _display_name = display_name.empty() ? _name : display_name; } const std::string& checkable::check_command() const { @@ -456,11 +458,8 @@ void checkable::set_event_handler_ptr(commands::command* cmd) { _event_handler_ptr = cmd; } -commands::command* checkable::get_check_command_ptr() const { - return _check_command_ptr; -} - -void checkable::set_check_command_ptr(commands::command* cmd) { +void checkable::set_check_command_ptr( + const std::shared_ptr& cmd) { _check_command_ptr = cmd; } @@ -525,3 +524,11 @@ std::forward_list>& checkable::mut_tags() { const std::forward_list>& checkable::tags() const { return _tags; } + +const std::string& checkable::name() const { + return _name; +} + +void checkable::set_name(const std::string& name) { + _name = name; +} diff --git a/engine/src/checks/checker.cc b/engine/src/checks/checker.cc index 55d19040840..223a1bb9878 100644 --- a/engine/src/checks/checker.cc +++ b/engine/src/checks/checker.cc @@ -56,9 +56,9 @@ checker& checker::instance() { return *_instance; } -void checker::init() { +void checker::init(bool used_by_test) { if (!_instance) - _instance = new checker(); + _instance = new checker(used_by_test); } void checker::deinit() { @@ -71,21 +71,9 @@ void checker::deinit() { void checker::clear() noexcept { try { std::lock_guard lock(_mut_reap); - while (!_to_reap_partial.empty()) { - check_result* result = _to_reap_partial.front(); - _to_reap_partial.pop_front(); - delete result; - } - while (!_to_reap.empty()) { - check_result* result = _to_reap.front(); - _to_reap.pop_front(); - delete result; - } - auto it = _waiting_check_result.begin(); - while (it != _waiting_check_result.end()) { - delete it->second; - it = _waiting_check_result.erase(it); - } + _to_reap_partial.clear(); + _to_reap.clear(); + _waiting_check_result.clear(); _to_forget.clear(); } catch (...) { } @@ -96,10 +84,10 @@ void checker::clear() noexcept { */ void checker::reap() { engine_logger(dbg_functions, basic) << "checker::reap"; - log_v2::functions()->trace("checker::reap()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "checker::reap()"); engine_logger(dbg_checks, basic) << "Starting to reap check results."; - log_v2::checks()->trace("Starting to reap check results."); + SPDLOG_LOGGER_TRACE(log_v2::checks(), "Starting to reap check results."); // Time to start reaping. time_t reaper_start_time; @@ -115,7 +103,6 @@ void checker::reap() { for (auto it = _waiting_check_result.begin(); it != _waiting_check_result.end();) { if (it->second->get_notifier() == n) { - delete it->second; it = _waiting_check_result.erase(it); } else ++it; @@ -123,7 +110,6 @@ void checker::reap() { for (auto it = _to_reap_partial.begin(); it != _to_reap_partial.end();) { if ((*it)->get_notifier() == n) { - delete *it; it = _to_reap_partial.erase(it); } else ++it; @@ -140,9 +126,10 @@ void checker::reap() { ++reaped_checks; engine_logger(dbg_checks, basic) << "Found a check result (#" << reaped_checks << ") to handle..."; - log_v2::checks()->trace("Found a check result (#{}) to handle...", - reaped_checks); - check_result* result = _to_reap.front(); + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "Found a check result (#{}) to handle...", + reaped_checks); + check_result::pointer result = _to_reap.front(); _to_reap.pop_front(); // Service check result-> @@ -153,16 +140,18 @@ void checker::reap() { engine_logger(dbg_checks, more) << "Handling check result for service " << svc->get_host_id() << "/" << svc->get_service_id() << "..."; - log_v2::checks()->debug("Handling check result for service {}/{}...", - svc->get_host_id(), svc->get_service_id()); - svc->handle_async_check_result(result); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Handling check result for service {}/{}...", + svc->get_host_id(), svc->get_service_id()); + svc->handle_async_check_result(*result); } catch (std::exception const& e) { engine_logger(log_runtime_warning, basic) << "Check result queue errors for service " << svc->get_host_id() << "/" << svc->get_service_id() << " : " << e.what(); - log_v2::runtime()->warn( - "Check result queue errors for service {}/{} : {}", - svc->get_host_id(), svc->get_service_id(), e.what()); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Check result queue errors for service {}/{} : {}", + svc->get_host_id(), svc->get_service_id(), + e.what()); } } // Host check result-> @@ -172,9 +161,10 @@ void checker::reap() { // Process the check result-> engine_logger(dbg_checks, more) << "Handling check result for host " << hst->get_host_id() << "..."; - log_v2::checks()->debug("Handling check result for host {}...", - hst->get_host_id()); - hst->handle_async_check_result_3x(result); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Handling check result for host {}...", + hst->get_host_id()); + hst->handle_async_check_result_3x(*result); } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) << "Check result queue errors for " @@ -184,8 +174,6 @@ void checker::reap() { } } - delete result; - // Check if reaping has timed out. time_t current_time; time(¤t_time); @@ -194,7 +182,8 @@ void checker::reap() { engine_logger(dbg_checks, basic) << "Breaking out of check result reaper: " << "max reaper time exceeded"; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Breaking out of check result reaper: max reaper time exceeded"); break; } @@ -203,7 +192,8 @@ void checker::reap() { if (sigshutdown) { engine_logger(dbg_checks, basic) << "Breaking out of check result reaper: signal encountered"; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Breaking out of check result reaper: signal encountered"); break; } @@ -213,7 +203,8 @@ void checker::reap() { // Reaping finished. engine_logger(dbg_checks, basic) << "Finished reaping " << reaped_checks << " check results"; - log_v2::checks()->trace("Finished reaping {} check results", reaped_checks); + SPDLOG_LOGGER_TRACE(log_v2::checks(), "Finished reaping {} check results", + reaped_checks); } /** @@ -234,29 +225,32 @@ void checker::run_sync(host* hst, << "checker::run: hst=" << hst << ", check_options=" << check_options << ", use_cached_result=" << use_cached_result << ", check_timestamp_horizon=" << check_timestamp_horizon; - log_v2::functions()->trace( - "checker::run: hst={:p}, check_options={}" - ", use_cached_result={}" - ", check_timestamp_horizon={}", - (void*)hst, check_options, use_cached_result, check_timestamp_horizon); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "checker::run: hst={:p}, check_options={}" + ", use_cached_result={}" + ", check_timestamp_horizon={}", + (void*)hst, check_options, use_cached_result, + check_timestamp_horizon); // Preamble. if (!hst) throw engine_error() << "Attempt to run synchronous check on invalid host"; if (!hst->get_check_command_ptr()) throw engine_error() << "Attempt to run synchronous active check on host '" - << hst->get_name() << "' with no check command"; + << hst->name() << "' with no check command"; engine_logger(dbg_checks, basic) - << "** Run sync check of host '" << hst->get_name() << "'..."; - log_v2::checks()->trace("** Run sync check of host '{}'...", hst->get_name()); + << "** Run sync check of host '" << hst->name() << "'..."; + SPDLOG_LOGGER_TRACE(log_v2::checks(), "** Run sync check of host '{}'...", + hst->name()); // Check if the host is viable now. if (!hst->verify_check_viability(check_options, nullptr, nullptr)) { if (check_result_code) *check_result_code = hst->get_current_state(); engine_logger(dbg_checks, basic) << "Host check is not viable at this time"; - log_v2::checks()->trace("Host check is not viable at this time"); + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "Host check is not viable at this time"); return; } @@ -275,8 +269,8 @@ void checker::run_sync(host* hst, *check_result_code = hst->get_current_state(); engine_logger(dbg_checks, more) << "* Using cached host state: " << hst->get_current_state(); - log_v2::checks()->debug("* Using cached host state: {}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "* Using cached host state: {}", + hst->get_current_state()); // Update statistics. update_check_stats(ACTIVE_ONDEMAND_HOST_CHECK_STATS, start_time.tv_sec); @@ -288,8 +282,9 @@ void checker::run_sync(host* hst, // Checking starts. engine_logger(dbg_checks, more) << "* Running actual host check: old state=" << hst->get_current_state(); - log_v2::checks()->debug("* Running actual host check: old state={}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "* Running actual host check: old state={}", + hst->get_current_state()); // Update statistics. update_check_stats(ACTIVE_ONDEMAND_HOST_CHECK_STATS, start_time.tv_sec); @@ -327,9 +322,8 @@ void checker::run_sync(host* hst, broker_host_check(NEBTYPE_HOSTCHECK_INITIATE, NEBFLAG_NONE, NEBATTR_NONE, hst, checkable::check_active, hst->get_current_state(), hst->get_state_type(), start_time, end_time, - hst->check_command().c_str(), hst->get_latency(), 0.0, - config->host_check_timeout(), false, 0, nullptr, nullptr, - nullptr, nullptr, nullptr); + hst->get_latency(), 0.0, config->host_check_timeout(), + false, 0, nullptr, nullptr, nullptr, nullptr, nullptr); // Execute command synchronously. host::host_state host_result(_execute_sync(hst)); @@ -344,22 +338,21 @@ void checker::run_sync(host* hst, // Synchronous check is done. engine_logger(dbg_checks, more) << "* Sync host check done: new state=" << hst->get_current_state(); - log_v2::checks()->debug("* Sync host check done: new state={}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "* Sync host check done: new state={}", + hst->get_current_state()); // Get the end time of command. gettimeofday(&end_time, nullptr); // Send event broker. - broker_host_check(NEBTYPE_HOSTCHECK_PROCESSED, NEBFLAG_NONE, NEBATTR_NONE, - hst, checkable::check_active, hst->get_current_state(), - hst->get_state_type(), start_time, end_time, - hst->check_command().c_str(), hst->get_latency(), - hst->get_execution_time(), config->host_check_timeout(), - false, hst->get_current_state(), nullptr, - const_cast(hst->get_plugin_output().c_str()), - const_cast(hst->get_long_plugin_output().c_str()), - const_cast(hst->get_perf_data().c_str()), nullptr); + broker_host_check( + NEBTYPE_HOSTCHECK_PROCESSED, NEBFLAG_NONE, NEBATTR_NONE, hst, + checkable::check_active, hst->get_current_state(), hst->get_state_type(), + start_time, end_time, hst->get_latency(), hst->get_execution_time(), + config->host_check_timeout(), false, hst->get_current_state(), nullptr, + const_cast(hst->get_plugin_output().c_str()), + const_cast(hst->get_long_plugin_output().c_str()), + const_cast(hst->get_perf_data().c_str()), nullptr); } /************************************** @@ -371,7 +364,10 @@ void checker::run_sync(host* hst, /** * Default constructor. */ -checker::checker() : commands::command_listener() {} +checker::checker(bool used_by_test) + : commands::command_listener(), + _used_by_test(used_by_test), + _finished(false) {} /** * Default destructor. @@ -388,19 +384,21 @@ checker::~checker() noexcept { void checker::finished(commands::result const& res) noexcept { // Debug message. engine_logger(dbg_functions, basic) << "checker::finished: res=" << &res; - log_v2::functions()->trace("checker::finished: res={:p}", (void*)&res); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "checker::finished: res={:p}", + (void*)&res); std::unique_lock lock(_mut_reap); auto it_id = _waiting_check_result.find(res.command_id); if (it_id == _waiting_check_result.end()) { engine_logger(log_runtime_warning, basic) << "command ID '" << res.command_id << "' not found"; - log_v2::runtime()->warn("command ID '{}' not found", res.command_id); + SPDLOG_LOGGER_WARN(log_v2::runtime(), "command ID '{}' not found", + res.command_id); return; } // Find check result. - check_result* result = it_id->second; + check_result::pointer result = it_id->second; _waiting_check_result.erase(it_id); lock.unlock(); @@ -418,6 +416,20 @@ void checker::finished(commands::result const& res) noexcept { // Queue check result. lock.lock(); _to_reap_partial.push_back(result); + if (_used_by_test) { + _finished = true; + lock.unlock(); + _finish_cond.notify_one(); + } +} + +void checker::wait_completion() { + if (!_used_by_test) { + throw std::invalid_argument("checker not in test usage"); + } + std::unique_lock lock(_mut_reap); + _finished = false; + _finish_cond.wait(lock, [this]() { return _finished; }); } /** @@ -430,31 +442,32 @@ void checker::finished(commands::result const& res) noexcept { */ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { engine_logger(dbg_functions, basic) << "checker::_execute_sync: hst=" << hst; - log_v2::functions()->trace("checker::_execute_sync: hst={:p}", (void*)hst); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "checker::_execute_sync: hst={:p}", + (void*)hst); // Preamble. if (!hst) throw engine_error() << "Attempt to run synchronous check on invalid host"; if (!hst->get_check_command_ptr()) throw engine_error() << "Attempt to run synchronous active check on host '" - << hst->get_name() << "' with no check command"; + << hst->name() << "' with no check command"; engine_logger(dbg_checks, basic) - << "** Executing sync check of host '" << hst->get_name() << "'..."; - log_v2::checks()->trace("** Executing sync check of host '{}'...", - hst->get_name()); + << "** Executing sync check of host '" << hst->name() << "'..."; + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "** Executing sync check of host '{}'...", hst->name()); // Send broker event. timeval start_time; timeval end_time; memset(&start_time, 0, sizeof(start_time)); memset(&end_time, 0, sizeof(end_time)); - int ret(broker_host_check( - NEBTYPE_HOSTCHECK_SYNC_PRECHECK, NEBFLAG_NONE, NEBATTR_NONE, hst, - checkable::check_active, hst->get_current_state(), hst->get_state_type(), - start_time, end_time, hst->check_command().c_str(), hst->get_latency(), - 0.0, config->host_check_timeout(), false, 0, nullptr, nullptr, nullptr, - nullptr, nullptr)); + int ret(broker_host_check(NEBTYPE_HOSTCHECK_SYNC_PRECHECK, NEBFLAG_NONE, + NEBATTR_NONE, hst, checkable::check_active, + hst->get_current_state(), hst->get_state_type(), + start_time, end_time, hst->get_latency(), 0.0, + config->host_check_timeout(), false, 0, nullptr, + nullptr, nullptr, nullptr, nullptr)); // Host sync check was cancelled or overriden by NEB module. if ((NEBERROR_CALLBACKCANCEL == ret) || (NEBERROR_CALLBACKOVERRIDE == ret)) @@ -474,15 +487,14 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { hst->set_last_check(start_time.tv_sec); // Get command object. - commands::command* cmd = hst->get_check_command_ptr(); + commands::command::pointer cmd = hst->get_check_command_ptr(); std::string processed_cmd(cmd->process_cmd(macros)); const char* tmp_processed_cmd = processed_cmd.c_str(); // Send broker event. broker_host_check(NEBTYPE_HOSTCHECK_RAW_START, NEBFLAG_NONE, NEBATTR_NONE, hst, checkable::check_active, host::state_up, - hst->get_state_type(), start_time, end_time, - hst->check_command().c_str(), 0.0, 0.0, + hst->get_state_type(), start_time, end_time, 0.0, 0.0, config->host_check_timeout(), false, service::state_ok, processed_cmd.c_str(), const_cast(hst->get_plugin_output().c_str()), @@ -493,12 +505,13 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { engine_logger(dbg_commands, more) << "Raw host check command: " << hst->get_check_command_ptr()->get_command_line(); - log_v2::commands()->trace("Raw host check command: {}", - hst->get_check_command_ptr()->get_command_line()); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "Raw host check command: {}", + hst->get_check_command_ptr()->get_command_line()); engine_logger(dbg_commands, more) << "Processed host check ommand: " << processed_cmd; - log_v2::commands()->trace("Processed host check ommand: {}", processed_cmd); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "Processed host check ommand: {}", + processed_cmd); // Cleanup. hst->set_plugin_output(""); @@ -531,7 +544,8 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { engine_logger(log_runtime_warning, basic) << "Error: Synchronous host check command execution failed: " << e.what(); - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Error: Synchronous host check command execution failed: {}", e.what()); } @@ -568,12 +582,13 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { res.output = oss.str(); engine_logger(log_runtime_warning, basic) << "Warning: Host check command '" << processed_cmd << "' for host '" - << hst->get_name() << "' timed out after " - << config->host_check_timeout() << " seconds"; - log_v2::runtime()->warn( + << hst->name() << "' timed out after " << config->host_check_timeout() + << " seconds"; + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: Host check command '{}' for host '{}' timed out after {} " "seconds", - processed_cmd, hst->get_name(), config->host_check_timeout()); + processed_cmd, hst->name(), config->host_check_timeout()); } // Update values. @@ -622,9 +637,8 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { broker_host_check( NEBTYPE_HOSTCHECK_RAW_END, NEBFLAG_NONE, NEBATTR_NONE, hst, checkable::check_active, return_result, hst->get_state_type(), start_time, - end_time, hst->check_command().c_str(), 0.0, execution_time, - config->host_check_timeout(), res.exit_status == process::timeout, - res.exit_code, tmp_processed_cmd, + end_time, 0.0, execution_time, config->host_check_timeout(), + res.exit_status == process::timeout, res.exit_code, tmp_processed_cmd, const_cast(hst->get_plugin_output().c_str()), const_cast(hst->get_long_plugin_output().c_str()), const_cast(hst->get_perf_data().c_str()), nullptr); @@ -632,7 +646,8 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { // Termination. engine_logger(dbg_checks, basic) << "** Sync host check done: state=" << return_result; - log_v2::checks()->trace("** Sync host check done: state={}", return_result); + SPDLOG_LOGGER_TRACE(log_v2::checks(), "** Sync host check done: state={}", + return_result); return return_result; } @@ -645,8 +660,9 @@ com::centreon::engine::host::host_state checker::_execute_sync(host* hst) { * @param id A command id * @param check_result A check_result coming from a service or a host. */ -void checker::add_check_result(uint64_t id, - check_result* check_result) noexcept { +void checker::add_check_result( + uint64_t id, + const check_result::pointer check_result) noexcept { std::lock_guard lock(_mut_reap); _waiting_check_result[id] = check_result; } @@ -657,7 +673,8 @@ void checker::add_check_result(uint64_t id, * * @param check_result The check_result already finished. */ -void checker::add_check_result_to_reap(check_result* check_result) noexcept { +void checker::add_check_result_to_reap( + const check_result::pointer check_result) noexcept { std::lock_guard lock(_mut_reap); _to_reap_partial.push_back(check_result); } diff --git a/engine/src/command_manager.cc b/engine/src/command_manager.cc index b2b4b9beddb..d49bea73f6e 100644 --- a/engine/src/command_manager.cc +++ b/engine/src/command_manager.cc @@ -143,12 +143,12 @@ int command_manager::process_passive_service_check( timeval set_tv = {.tv_sec = check_time, .tv_usec = 0}; - check_result* result = - new check_result(service_check, found->second.get(), - checkable::check_passive, CHECK_OPTION_NONE, false, - static_cast(tv.tv_sec - check_time) + - static_cast(tv.tv_usec) / 1000000.0, - set_tv, set_tv, false, true, return_code, output); + check_result::pointer result = std::make_shared( + service_check, found->second.get(), checkable::check_passive, + CHECK_OPTION_NONE, false, + static_cast(tv.tv_sec - check_time) + + static_cast(tv.tv_usec) / 1000000.0, + set_tv, set_tv, false, true, return_code, output); /* make sure the return code is within bounds */ if (result->get_return_code() < 0 || result->get_return_code() > 3) @@ -213,12 +213,12 @@ int command_manager::process_passive_host_check(time_t check_time, tv_start.tv_sec = check_time; tv_start.tv_usec = 0; - check_result* result = - new check_result(host_check, it->second.get(), checkable::check_passive, - CHECK_OPTION_NONE, false, - static_cast(tv.tv_sec - check_time) + - static_cast(tv.tv_usec) / 1000000.0, - tv_start, tv_start, false, true, return_code, output); + check_result::pointer result = std::make_shared( + host_check, it->second.get(), checkable::check_passive, CHECK_OPTION_NONE, + false, + static_cast(tv.tv_sec - check_time) + + static_cast(tv.tv_usec) / 1000000.0, + tv_start, tv_start, false, true, return_code, output); /* make sure the return code is within bounds */ if (result->get_return_code() < 0 || result->get_return_code() > 3) diff --git a/engine/src/commands/command.cc b/engine/src/commands/command.cc index c1a90f71d50..a604923cf5c 100644 --- a/engine/src/commands/command.cc +++ b/engine/src/commands/command.cc @@ -18,8 +18,11 @@ */ #include "com/centreon/engine/commands/command.hh" +#include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/configuration/applier/state.hh" #include "com/centreon/engine/exceptions/error.hh" +#include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/log_v2.hh" #include "com/centreon/engine/logging/logger.hh" #include "com/centreon/engine/macros/grab.hh" @@ -132,7 +135,7 @@ void commands::command::set_listener( */ std::string commands::command::process_cmd(nagios_macros* macros) const { std::string command_line; - process_macros_r(macros, _command_line, command_line, 0); + process_macros_r(macros, this->get_command_line(), command_line, 0); return command_line; } @@ -144,3 +147,77 @@ std::string commands::command::process_cmd(nagios_macros* macros) const { uint64_t commands::command::get_uniq_id() { return ++_id; } + +void commands::command::remove_caller(void* caller) { + std::unique_lock l(_lock); + _result_cache.erase(caller); +} + +/** + * @brief ensure that checks isn't used to often + * + * @param command_id + * @param to_push_to_checker check_result to push to checks::checker + * @param caller pointer of the caller object as a service or anomalydetection + * @return true check can ben done + * @return false check musn't be done, the previous result is pushed to + * checks::checker + */ +bool commands::command::gest_call_interval( + uint64_t command_id, + const check_result::pointer& to_push_to_checker, + const void* caller) { + std::shared_ptr result_to_reuse; + + { + std::lock_guard lock(_lock); + // are we allowed to execute command + caller_to_last_call_map::iterator group_search = _result_cache.find(caller); + if (group_search != _result_cache.end()) { + time_t now = time(nullptr); + if (group_search->second->launch_time + config->interval_length() >= + now && + group_search->second->res) { // old check is too recent + result_to_reuse = std::make_shared(*group_search->second->res); + result_to_reuse->command_id = command_id; + result_to_reuse->start_time = timestamp::now(); + result_to_reuse->end_time = timestamp::now(); + } else { + // old check is old enough => we do the check + group_search->second->launch_time = now; + _current[command_id] = group_search->second; + } + } + } + + checks::checker::instance().add_check_result(command_id, to_push_to_checker); + if (_listener && result_to_reuse) { + _listener->finished(*result_to_reuse); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "command::run: id={} , reuse result", command_id); + return false; + } + return true; +} + +void commands::command::update_result_cache(uint64_t command_id, + const result& res) { + std::lock_guard lock(_lock); + cmdid_to_last_call_map::iterator to_update = _current.find(command_id); + if (to_update != _current.end()) { + to_update->second->res = std::make_shared(res); + _current.erase(to_update); + } +} + +CCE_BEGIN() +namespace commands { + +std::ostream& operator<<(std::ostream& s, const commands::command& cmd) { + s << "cmd_name:" << cmd.get_name() << " cmd_line:" << cmd.get_command_line(); + return s; +} + +} // namespace commands + +CCE_END() diff --git a/engine/src/commands/commands.cc b/engine/src/commands/commands.cc index 1e284ee31b7..4f86ab97a92 100644 --- a/engine/src/commands/commands.cc +++ b/engine/src/commands/commands.cc @@ -163,8 +163,9 @@ int cmd_add_comment(int cmd, time_t entry_time, char* args) { char* svc_description(nullptr); char* user(nullptr); char* comment_data(nullptr); - int persistent(0); + bool persistent{false}; uint64_t service_id = 0; + const char* command_name; /* get the host name */ if ((host_name = my_strtok(args, ";")) == nullptr) @@ -172,6 +173,8 @@ int cmd_add_comment(int cmd, time_t entry_time, char* args) { /* if we're adding a service comment... */ if (cmd == CMD_ADD_SVC_COMMENT) { + command_name = "ADD_SVC_COMMENT"; + /* get the service description */ if ((svc_description = my_strtok(nullptr, ";")) == nullptr) return ERROR; @@ -182,6 +185,8 @@ int cmd_add_comment(int cmd, time_t entry_time, char* args) { if (found == service::services.end() || !found->second) return ERROR; service_id = found->second->get_service_id(); + } else { + command_name = "ADD_HOST_COMMENT"; } /* else verify that the host is valid */ @@ -195,11 +200,13 @@ int cmd_add_comment(int cmd, time_t entry_time, char* args) { /* get the persistent flag */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - persistent = atoi(temp_ptr); - if (persistent > 1) - persistent = 1; - else if (persistent < 0) - persistent = 0; + + if (!absl::SimpleAtob(temp_ptr, &persistent)) { + log_v2::external_command()->error( + "Error: could not {} : persistent '{}' must be 1 or 0", command_name, + temp_ptr); + return ERROR; + } /* get the name of the user who entered the comment */ if ((user = my_strtok(nullptr, ";")) == nullptr) @@ -210,26 +217,31 @@ int cmd_add_comment(int cmd, time_t entry_time, char* args) { return ERROR; /* add the comment */ - std::shared_ptr com{new comment( + auto com = std::make_shared( (cmd == CMD_ADD_HOST_COMMENT) ? comment::host : comment::service, comment::user, temp_host->get_host_id(), service_id, entry_time, user, - comment_data, persistent, comment::external, false, (time_t)0)}; - comment::comments.insert({com->get_comment_id(), com}); - + comment_data, persistent, comment::external, false, (time_t)0); + uint64_t comment_id = com->get_comment_id(); + comment::comments.insert({comment_id, com}); + log_v2::external_command()->trace("{}, comment_id: {}, data: {}", + command_name, comment_id, + com->get_comment_data()); return OK; } /* removes a host or service comment from the status log */ -int cmd_delete_comment(int cmd[[maybe_unused]], char* args) { +int cmd_delete_comment(int cmd [[maybe_unused]], char* args) { uint64_t comment_id{0}; - /* get the comment id we should delete */ - if ((comment_id = strtoul(args, nullptr, 10)) == 0) + if (!absl::SimpleAtoi(args, &comment_id)) { + log_v2::external_command()->error( + "Error: could not delete comment : comment_id '{}' must be an " + "integer >= 0", + args); return ERROR; - + } /* delete the specified comment */ comment::delete_comment(comment_id); - return OK; } @@ -312,7 +324,13 @@ int cmd_delay_notification(int cmd, char* args) { /* get the time that we should delay until... */ if ((temp_ptr = my_strtok(nullptr, "\n")) == nullptr) return ERROR; - delay_time = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &delay_time)) { + log_v2::external_command()->error( + "Error: could not delay notification : delay_time '{}' must be " + "an integer", + temp_ptr); + return ERROR; + } /* delay the next notification... */ if (cmd == CMD_DELAY_HOST_NOTIFICATION) @@ -361,7 +379,13 @@ int cmd_schedule_check(int cmd, char* args) { /* get the next check time */ if ((temp_ptr = my_strtok(nullptr, "\n")) == nullptr) return ERROR; - delay_time = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &delay_time)) { + log_v2::external_command()->error( + "Error: could not schedule check : delay_time '{}' must be " + "an integer", + temp_ptr); + return ERROR; + } /* schedule the host check */ if (cmd == CMD_SCHEDULE_HOST_CHECK || cmd == CMD_SCHEDULE_FORCED_HOST_CHECK) @@ -416,7 +440,13 @@ int cmd_schedule_host_service_checks(int cmd, char* args, int force) { /* get the next check time */ if ((temp_ptr = my_strtok(nullptr, "\n")) == nullptr) return ERROR; - delay_time = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &delay_time)) { + log_v2::external_command()->error( + "Error: could not schedule host service checks : delay_time '{}' " + "must be an integer", + temp_ptr); + return ERROR; + } /* reschedule all services on the specified host */ for (service_map_unsafe::iterator it(temp_host->services.begin()), @@ -439,8 +469,13 @@ void cmd_signal_process(int cmd, char* args) { /* get the time to schedule the event */ if ((temp_ptr = my_strtok(args, "\n")) == nullptr) scheduled_time = 0L; - else - scheduled_time = strtoul(temp_ptr, nullptr, 10); + else if (!absl::SimpleAtoi(temp_ptr, &scheduled_time)) { + log_v2::external_command()->error( + "Error: could not signal process : scheduled_time '{}' " + "must be an integer", + temp_ptr); + return; + } /* add a scheduled program shutdown or restart to the event list */ timed_event* evt = new timed_event( @@ -570,12 +605,12 @@ int process_passive_service_check(time_t check_time, timeval set_tv = {.tv_sec = check_time, .tv_usec = 0}; - check_result* result = - new check_result(service_check, found->second.get(), - checkable::check_passive, CHECK_OPTION_NONE, false, - static_cast(tv.tv_sec - check_time) + - static_cast(tv.tv_usec / 1000000.0), - set_tv, set_tv, false, true, return_code, output); + check_result::pointer result = std::make_shared( + service_check, found->second.get(), checkable::check_passive, + CHECK_OPTION_NONE, false, + static_cast(tv.tv_sec - check_time) + + static_cast(tv.tv_usec / 1000000.0), + set_tv, set_tv, false, true, return_code, output); /* make sure the return code is within bounds */ if (result->get_return_code() < 0 || result->get_return_code() > 3) { @@ -684,12 +719,12 @@ int process_passive_host_check(time_t check_time, gettimeofday(&tv, nullptr); timeval tv_start = {.tv_sec = check_time, .tv_usec = 0}; - check_result* result = - new check_result(host_check, it->second.get(), checkable::check_passive, - CHECK_OPTION_NONE, false, - static_cast(tv.tv_sec - check_time) + - static_cast(tv.tv_usec / 1000000.0), - tv_start, tv_start, false, true, return_code, output); + check_result::pointer result = std::make_shared( + host_check, it->second.get(), checkable::check_passive, CHECK_OPTION_NONE, + false, + static_cast(tv.tv_sec - check_time) + + static_cast(tv.tv_usec / 1000000.0), + tv_start, tv_start, false, true, return_code, output); /* make sure the return code is within bounds */ if (result->get_return_code() < 0 || result->get_return_code() > 3) @@ -732,7 +767,7 @@ int cmd_acknowledge_problem(int cmd, char* args) { return ERROR; /* verify that the service is valid */ - found = service::services.find({it->second->get_name(), svc_description}); + found = service::services.find({it->second->name(), svc_description}); if (found == service::services.end() || !found->second) return ERROR; @@ -799,7 +834,7 @@ int cmd_remove_acknowledgement(int cmd, char* args) { return ERROR; /* verify that the service is valid */ - found = service::services.find({it->second->get_name(), svc_description}); + found = service::services.find({it->second->name(), svc_description}); if (found == service::services.end() || !found->second) return ERROR; @@ -828,7 +863,7 @@ int cmd_schedule_downtime(int cmd, time_t entry_time, char* args) { char* temp_ptr{nullptr}; time_t start_time{0}; time_t end_time{0}; - int fixed{0}; + bool fixed{false}; uint64_t triggered_by{0}; unsigned long duration{0}; char* author{nullptr}; @@ -877,7 +912,7 @@ int cmd_schedule_downtime(int cmd, time_t entry_time, char* args) { /* verify that the service is valid */ service_map::const_iterator found( - service::services.find({temp_host->get_name(), svc_description})); + service::services.find({temp_host->name(), svc_description})); if (found == service::services.end() || !found->second) return ERROR; @@ -887,27 +922,56 @@ int cmd_schedule_downtime(int cmd, time_t entry_time, char* args) { /* get the start time */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - start_time = (time_t)strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &start_time)) { + log_v2::external_command()->error( + "Error: could not schedule downtime : start_time '{}' must be " + "an integer", + temp_ptr); + return ERROR; + } /* get the end time */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - end_time = (time_t)strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &end_time)) { + log_v2::external_command()->error( + "Error: could not schedule downtime : end_time '{}' must be " + "an integer", + temp_ptr); + return ERROR; + } /* get the fixed flag */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - fixed = atoi(temp_ptr); + if (!absl::SimpleAtob(temp_ptr, &fixed)) { + log_v2::external_command()->error( + "Error: could not schedule downtime : fixed '{}' must be 1 or 0", + temp_ptr); + return ERROR; + } /* get the trigger id */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - triggered_by = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &triggered_by)) { + log_v2::external_command()->error( + "Error: could not schedule downtime : triggered_by '{}' must be an " + "integer >= 0", + temp_ptr); + return ERROR; + } /* get the duration */ if ((temp_ptr = my_strtok(nullptr, ";")) == nullptr) return ERROR; - duration = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &duration)) { + log_v2::external_command()->error( + "Error: could not schedule downtime : duration '{}' must be an integer " + ">= 0", + temp_ptr); + return ERROR; + } /* get the author */ if ((author = my_strtok(nullptr, ";")) == nullptr) @@ -927,7 +991,7 @@ int cmd_schedule_downtime(int cmd, time_t entry_time, char* args) { return ERROR; /* duration should be auto-calculated, not user-specified */ - if (fixed > 0) + if (fixed) duration = (unsigned long)(end_time - start_time); /* schedule downtime */ @@ -1060,7 +1124,13 @@ int cmd_delete_downtime(int cmd, char* args) { if (nullptr == (temp_ptr = my_strtok(args, "\n"))) return ERROR; - downtime_id = strtoul(temp_ptr, nullptr, 10); + if (!absl::SimpleAtoi(temp_ptr, &downtime_id)) { + log_v2::external_command()->error( + "Error: could not delete downtime : downtime_id '{}' must be an " + "integer >= 0", + temp_ptr); + return ERROR; + } if (CMD_DEL_HOST_DOWNTIME == cmd || CMD_DEL_SVC_DOWNTIME == cmd) downtime_manager::instance().unschedule_downtime(downtime_id); @@ -1790,7 +1860,7 @@ int cmd_change_object_char_var(int cmd, char* args) { case CMD_CHANGE_HOST_CHECK_COMMAND: temp_host->set_check_command(temp_ptr); - temp_host->set_check_command_ptr(cmd_found->second.get()); + temp_host->set_check_command_ptr(cmd_found->second); attr = MODATTR_CHECK_COMMAND; break; @@ -1814,7 +1884,7 @@ int cmd_change_object_char_var(int cmd, char* args) { case CMD_CHANGE_SVC_CHECK_COMMAND: found_svc->second->set_check_command(temp_ptr); - found_svc->second->set_check_command_ptr(cmd_found->second.get()); + found_svc->second->set_check_command_ptr(cmd_found->second); attr = MODATTR_CHECK_COMMAND; break; @@ -2429,7 +2499,7 @@ void schedule_and_propagate_downtime(host* temp_host, char const* comment_data, time_t start_time, time_t end_time, - int fixed, + bool fixed, unsigned long triggered_by, unsigned long duration) { /* check all child hosts... */ diff --git a/engine/src/commands/connector.cc b/engine/src/commands/connector.cc index 7ed2c2fb9f2..dff97411836 100644 --- a/engine/src/commands/connector.cc +++ b/engine/src/commands/connector.cc @@ -118,10 +118,10 @@ connector::~connector() noexcept { * @return The command id. */ uint64_t connector::run(const std::string& processed_cmd, - nagios_macros& macros, - uint32_t timeout) { - (void)macros; - + nagios_macros&, + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller) { engine_logger(dbg_commands, basic) << "connector::run: connector='" << _name << "', cmd='" << processed_cmd << "', timeout=" << timeout; @@ -131,7 +131,12 @@ uint64_t connector::run(const std::string& processed_cmd, // Set query informations. uint64_t command_id(get_uniq_id()); - std::shared_ptr info(new query_info); + + if (!gest_call_interval(command_id, to_push_to_checker, caller)) { + return command_id; + } + + auto info = std::make_shared(); info->processed_cmd = processed_cmd; info->start_time = timestamp::now(); info->timeout = timeout; @@ -197,7 +202,7 @@ void connector::run(const std::string& processed_cmd, // Set query informations. uint64_t command_id(get_uniq_id()); - std::shared_ptr info(new query_info); + auto info = std::make_shared(); info->processed_cmd = processed_cmd; info->start_time = timestamp::now(); info->timeout = timeout; @@ -371,6 +376,7 @@ void connector::finished(process& p) noexcept { try { engine_logger(dbg_commands, basic) << "connector::finished: process=" << &p; log_v2::commands()->trace("connector::finished: process={}", (void*)&p); + UNIQUE_LOCK(lock, _lock); _is_running = false; _data_available.clear(); @@ -661,14 +667,10 @@ void connector::_recv_query_execute(char const* data) { << res.output << "'"; log_v2::commands()->trace( "connector::_recv_query_execute: " - "id={}, " - "start_time={}, " - "end_time={}, " - "exit_code={}, " - "exit_status={}, " - "output='{}'", - command_id, res.start_time.to_mseconds(), res.end_time.to_mseconds(), - res.exit_code, res.exit_status, res.output); + "id={}, {}", + command_id, res); + + update_result_cache(command_id, res); if (!info->waiting_result) { // Forward result to the listener. diff --git a/engine/src/commands/forward.cc b/engine/src/commands/forward.cc index b2a8b91b014..589cb7a0fa5 100644 --- a/engine/src/commands/forward.cc +++ b/engine/src/commands/forward.cc @@ -52,13 +52,18 @@ forward::forward(std::string const& command_name, * @param[in] args The command arguments. * @param[in] macros The macros data struct. * @param[in] timeout The command timeout. + * @param[in] to_push_to_checker This check_result will be pushed to checher. + * @param[in] caller pointer to the caller * * @return The command id. */ uint64_t forward::run(std::string const& processed_cmd, nagios_macros& macros, - uint32_t timeout) { - return _command->run(processed_cmd, macros, timeout); + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller) { + return _command->run(processed_cmd, macros, timeout, to_push_to_checker, + caller); } /** diff --git a/engine/src/commands/processing.cc b/engine/src/commands/processing.cc index 3550b8e12ff..d9ab5f842d9 100644 --- a/engine/src/commands/processing.cc +++ b/engine/src/commands/processing.cc @@ -734,16 +734,38 @@ void processing::_wrapper_enable_host_svc_checks(host* hst) { } void processing::_wrapper_set_host_notification_number(host* hst, char* args) { - if (hst && args) - hst->set_notification_number(atoi(args)); + if (hst && args) { + int notification_number; + if (!absl::SimpleAtoi(args, ¬ification_number)) { + log_v2::runtime()->error( + "Error: could not set host notification number: '{}' must be a " + "positive integer", + args); + return; + } + hst->set_notification_number(notification_number); + } } void processing::_wrapper_send_custom_host_notification(host* hst, char* args) { char* buf[3] = {NULL, NULL, NULL}; + int option; if ((buf[0] = my_strtok(args, ";")) && (buf[1] = my_strtok(NULL, ";")) && (buf[2] = my_strtok(NULL, ";"))) { - hst->notify(notifier::reason_custom, buf[1], buf[2], - static_cast(atoi(buf[0]))); + if (!absl::SimpleAtoi(buf[0], &option)) { + log_v2::runtime()->error( + "Error: could not send custom host notification: '{}' must be an " + "integer between 0 and 7", + buf[0]); + } else if (option >= 0 && option <= 7) { + hst->notify(notifier::reason_custom, buf[1], buf[2], + static_cast(option)); + } else { + log_v2::runtime()->error( + "Error: could not send custom host notification: '{}' must be an " + "integer between 0 and 7", + buf[0]); + } } } @@ -798,16 +820,39 @@ void processing::_wrapper_disable_passive_service_checks(host* hst) { void processing::_wrapper_set_service_notification_number(service* svc, char* args) { char* str(my_strtok(args, ";")); - if (svc && str) - svc->set_notification_number(atoi(str)); + int notification_number; + if (svc && str) { + if (!absl::SimpleAtoi(str, ¬ification_number)) { + log_v2::runtime()->error( + "Error: could not set service notification number: '{}' must be a " + "positive integer", + str); + return; + } + svc->set_notification_number(notification_number); + } } void processing::_wrapper_send_custom_service_notification(service* svc, char* args) { char* buf[3] = {NULL, NULL, NULL}; + int notification_number; if ((buf[0] = my_strtok(args, ";")) && (buf[1] = my_strtok(NULL, ";")) && (buf[2] = my_strtok(NULL, ";"))) { - svc->notify(notifier::reason_custom, buf[1], buf[2], - static_cast(atoi(buf[0]))); + if (!absl::SimpleAtoi(buf[0], ¬ification_number)) { + log_v2::runtime()->error( + "Error: could not send custom service notification: '{}' must be an " + "integer between 0 and 7", + buf[0]); + } else if (notification_number >= 0 && notification_number <= 7) { + svc->notify( + notifier::reason_custom, buf[1], buf[2], + static_cast(notification_number)); + } else { + log_v2::runtime()->error( + "Error: could not send custom service notification: '{}' must be an " + "integer between 0 and 7", + buf[0]); + } } } diff --git a/engine/src/commands/raw.cc b/engine/src/commands/raw.cc index 1ec33928a68..490348da692 100644 --- a/engine/src/commands/raw.cc +++ b/engine/src/commands/raw.cc @@ -64,8 +64,8 @@ raw::~raw() noexcept { } catch (std::exception const& e) { engine_logger(log_runtime_error, basic) << "Error: Raw command destructor failed: " << e.what(); - log_v2::runtime()->error("Error: Raw command destructor failed: {}", - e.what()); + SPDLOG_LOGGER_ERROR(log_v2::runtime(), + "Error: Raw command destructor failed: {}", e.what()); } } @@ -75,20 +75,28 @@ raw::~raw() noexcept { * @param[in] args The command arguments. * @param[in] macros The macros data struct. * @param[in] timeout The command timeout. + * @param[in] to_push_to_checker This check_result will be pushed to checher. + * @param[in] caller pointer to the caller * * @return The command id. */ uint64_t raw::run(std::string const& processed_cmd, nagios_macros& macros, - uint32_t timeout) { + uint32_t timeout, + const check_result::pointer& to_push_to_checker, + const void* caller) { engine_logger(dbg_commands, basic) << "raw::run: cmd='" << processed_cmd << "', timeout=" << timeout; - log_v2::commands()->trace("raw::run: cmd='{}', timeout={}", processed_cmd, - timeout); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::run: cmd='{}', timeout={}", + processed_cmd, timeout); // Get process and put into the busy list. process* p; uint64_t command_id(get_uniq_id()); + + if (!gest_call_interval(command_id, to_push_to_checker, caller)) { + return command_id; + } { std::lock_guard lock(_lock); p = _get_free_process(); @@ -97,8 +105,8 @@ uint64_t raw::run(std::string const& processed_cmd, engine_logger(dbg_commands, basic) << "raw::run: id=" << command_id << ", process=" << p; - log_v2::commands()->trace("raw::run: id={} , process={}", command_id, - (void*)p); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::run: id={} , process={}", + command_id, (void*)p); // Setup environnement macros if is necessary. environment env; @@ -109,13 +117,13 @@ uint64_t raw::run(std::string const& processed_cmd, p->exec(processed_cmd.c_str(), env.data(), timeout); engine_logger(dbg_commands, basic) << "raw::run: start process success: id=" << command_id; - log_v2::commands()->trace("raw::run: start process success: id={}", - command_id); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "raw::run: start process success: id={}", command_id); } catch (...) { engine_logger(dbg_commands, basic) << "raw::run: start process failed: id=" << command_id; - log_v2::commands()->trace("raw::run: start process failed: id={}", - command_id); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "raw::run: start process failed: id={}", command_id); std::lock_guard lock(_lock); _processes_busy.erase(p); @@ -139,8 +147,8 @@ void raw::run(std::string const& processed_cmd, result& res) { engine_logger(dbg_commands, basic) << "raw::run: cmd='" << processed_cmd << "', timeout=" << timeout; - log_v2::commands()->trace("raw::run: cmd='{}', timeout={}", processed_cmd, - timeout); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::run: cmd='{}', timeout={}", + processed_cmd, timeout); // Get process. process p; @@ -148,8 +156,8 @@ void raw::run(std::string const& processed_cmd, engine_logger(dbg_commands, basic) << "raw::run: id=" << command_id << ", process=" << &p; - log_v2::commands()->trace("raw::run: id={}, process={}", command_id, - (void*)&p); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::run: id={}, process={}", + command_id, (void*)&p); // Setup environement macros if is necessary. environment env; @@ -160,13 +168,13 @@ void raw::run(std::string const& processed_cmd, p.exec(processed_cmd.c_str(), env.data(), timeout); engine_logger(dbg_commands, basic) << "raw::run: start process success: id=" << command_id; - log_v2::commands()->trace("raw::run: start process success: id={}", - command_id); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "raw::run: start process success: id={}", command_id); } catch (...) { engine_logger(dbg_commands, basic) << "raw::run: start process failed: id=" << command_id; - log_v2::commands()->trace("raw::run: start process failed: id={}", - command_id); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "raw::run: start process failed: id={}", command_id); throw; } @@ -208,16 +216,10 @@ void raw::run(std::string const& processed_cmd, << ", " "output='" << res.output << "'"; - log_v2::commands()->trace( - "raw::run: end process: " - "id={}, " - "start_time={}, " - "end_time={}, " - "exit_code={}, " - "exit_status={}, " - "output='{}'", - command_id, res.start_time.to_mseconds(), res.end_time.to_mseconds(), - res.exit_code, res.exit_status, res.output); + SPDLOG_LOGGER_TRACE(log_v2::commands(), + "raw::run: end process: " + "id={}, {}", + command_id, res); } /************************************** @@ -253,7 +255,8 @@ void raw::data_is_available_err(process& p) noexcept { void raw::finished(process& p) noexcept { try { engine_logger(dbg_commands, basic) << "raw::finished: process=" << &p; - log_v2::commands()->trace("raw::finished: process={}", (void*)&p); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::finished: process={}", + (void*)&p); uint64_t command_id(0); { @@ -269,9 +272,9 @@ void raw::finished(process& p) noexcept { engine_logger(log_runtime_warning, basic) << "Warning: Invalid process pointer: " "process not found into process busy list"; - log_v2::runtime()->warn( - "Warning: Invalid process pointer: " - "process not found into process busy list"); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Warning: Invalid process pointer: " + "process not found into process busy list"); return; } // Get command_id and remove the process from the busy list. @@ -280,7 +283,7 @@ void raw::finished(process& p) noexcept { } engine_logger(dbg_commands, basic) << "raw::finished: id=" << command_id; - log_v2::commands()->trace("raw::finished: id={}", command_id); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::finished: id={}", command_id); // Build check result. result res; @@ -315,11 +318,10 @@ void raw::finished(process& p) noexcept { << ", exit_code=" << res.exit_code << ", exit_status=" << res.exit_status << ", output='" << res.output << "'"; - log_v2::commands()->trace( - "raw::finished: id={}, start_time={}, end_time={}, exit_code={}, " - "exit_status={}, output='{}'", - command_id, res.start_time.to_mseconds(), res.end_time.to_mseconds(), - res.exit_code, res.exit_status, res.output); + SPDLOG_LOGGER_TRACE(log_v2::commands(), "raw::finished: id={}, {}", + command_id, res); + + update_result_cache(command_id, res); // Forward result to the listener. if (_listener) @@ -327,8 +329,9 @@ void raw::finished(process& p) noexcept { } catch (std::exception const& e) { engine_logger(log_runtime_warning, basic) << "Warning: Raw process termination routine failed: " << e.what(); - log_v2::runtime()->warn( - "Warning: Raw process termination routine failed: {}", e.what()); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Warning: Raw process termination routine failed: {}", + e.what()); // Release process, put into the free list. std::lock_guard lock(_lock); diff --git a/engine/src/commands/result.cc b/engine/src/commands/result.cc index 3c0c090b6c5..c079c30ab06 100644 --- a/engine/src/commands/result.cc +++ b/engine/src/commands/result.cc @@ -17,6 +17,7 @@ * */ #include "com/centreon/engine/commands/result.hh" +#include "com/centreon/engine/check_result.hh" #include "com/centreon/timestamp.hh" @@ -43,6 +44,14 @@ result::result(result const& right) { _internal_copy(right); } +result::result(const check_result& check_res) + : command_id(0), + end_time(check_res.get_finish_time()), + exit_code(check_res.get_return_code()), + start_time(check_res.get_start_time()), + exit_status(check_res.get_exited_ok() ? process::normal : process::crash), + output(check_res.get_output()) {} + /** * Destructor. */ @@ -104,3 +113,17 @@ void result::_internal_copy(result const& right) { start_time = right.start_time; output = right.output; } + +CCE_BEGIN() +namespace commands { +std::ostream& operator<<(std::ostream& s, const result& to_dump) { + s << "start_time=" << to_dump.start_time << ", end_time=" << to_dump.end_time + << ", exit_code=" << to_dump.exit_code + << ", exit_status=" << to_dump.exit_status << ", output='" << to_dump.output + << '\''; + return s; +} + +} // namespace commands + +CCE_END() diff --git a/engine/src/compatibility/logging.cc b/engine/src/compatibility/logging.cc index d97bb506495..f7d1505e30f 100644 --- a/engine/src/compatibility/logging.cc +++ b/engine/src/compatibility/logging.cc @@ -49,12 +49,12 @@ void log_host_state(unsigned int type, com::centreon::engine::host* hst) { state = host::tab_host_states[hst->get_current_state()].second.c_str(); std::string const& state_type{host::tab_state_type[hst->get_state_type()]}; engine_logger(log_info_message, basic) - << type_str << " HOST STATE: " << hst->get_name() << ";" << state << ";" + << type_str << " HOST STATE: " << hst->name() << ";" << state << ";" << state_type << ";" << hst->get_current_attempt() << ";" << hst->get_plugin_output(); - log_v2::events()->info("{} HOST STATE: {};{};{};{};{}", type_str, - hst->get_name(), state, state_type, - hst->get_current_attempt(), hst->get_plugin_output()); + log_v2::events()->info("{} HOST STATE: {};{};{};{};{}", type_str, hst->name(), + state, state_type, hst->get_current_attempt(), + hst->get_plugin_output()); } /** diff --git a/engine/src/configuration/anomalydetection.cc b/engine/src/configuration/anomalydetection.cc index 9f2ca802fd5..0199740d76b 100644 --- a/engine/src/configuration/anomalydetection.cc +++ b/engine/src/configuration/anomalydetection.cc @@ -2247,7 +2247,7 @@ bool anomalydetection::_set_category_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::servicecategory); } else { @@ -2282,7 +2282,7 @@ bool anomalydetection::_set_group_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::servicegroup); } else { diff --git a/engine/src/configuration/applier/anomalydetection.cc b/engine/src/configuration/applier/anomalydetection.cc index 14920726211..8f474020305 100644 --- a/engine/src/configuration/applier/anomalydetection.cc +++ b/engine/src/configuration/applier/anomalydetection.cc @@ -100,8 +100,9 @@ void applier::anomalydetection::add_object( engine_logger(logging::dbg_config, logging::more) << "Creating new anomalydetection '" << obj.service_description() << "' of host '" << obj.host_name() << "'."; - log_v2::config()->debug("Creating new anomalydetection '{}' of host '{}'.", - obj.service_description(), obj.host_name()); + SPDLOG_LOGGER_DEBUG(log_v2::config(), + "Creating new anomalydetection '{}' of host '{}'.", + obj.service_description(), obj.host_name()); // Add anomalydetection to the global configuration set. config->anomalydetections().insert(obj); @@ -238,8 +239,9 @@ void applier::anomalydetection::modify_object( engine_logger(logging::dbg_config, logging::more) << "Modifying new anomalydetection '" << service_description << "' of host '" << host_name << "'."; - log_v2::config()->debug("Modifying new anomalydetection '{}' of host '{}'.", - service_description, host_name); + SPDLOG_LOGGER_DEBUG(log_v2::config(), + "Modifying new anomalydetection '{}' of host '{}'.", + service_description, host_name); // Find the configuration object. set_anomalydetection::iterator it_cfg( @@ -447,8 +449,9 @@ void applier::anomalydetection::remove_object( engine_logger(logging::dbg_config, logging::more) << "Removing anomalydetection '" << service_description << "' of host '" << host_name << "'."; - log_v2::config()->debug("Removing anomalydetection '{}' of host '{}'.", - service_description, host_name); + SPDLOG_LOGGER_DEBUG(log_v2::config(), + "Removing anomalydetection '{}' of host '{}'.", + service_description, host_name); // Find anomalydetection. service_id_map::iterator it(engine::service::services_by_id.find(obj.key())); @@ -498,8 +501,9 @@ void applier::anomalydetection::resolve_object( engine_logger(logging::dbg_config, logging::more) << "Resolving anomalydetection '" << obj.service_description() << "' of host '" << obj.host_name() << "'."; - log_v2::config()->debug("Resolving anomalydetection '{}' of host '{}'.", - obj.service_description(), obj.host_name()); + SPDLOG_LOGGER_DEBUG(log_v2::config(), + "Resolving anomalydetection '{}' of host '{}'.", + obj.service_description(), obj.host_name()); // Find anomalydetection. service_id_map::iterator it( diff --git a/engine/src/configuration/applier/command.cc b/engine/src/configuration/applier/command.cc index 6020e345b41..749f79a642b 100644 --- a/engine/src/configuration/applier/command.cc +++ b/engine/src/configuration/applier/command.cc @@ -126,8 +126,8 @@ void applier::command::modify_object(configuration::command const& obj) { // not referenced anywhere, only ::command objects are. commands::command::commands.erase(obj.command_name()); if (obj.connector().empty()) { - std::shared_ptr raw{new commands::raw( - obj.command_name(), obj.command_line(), &checks::checker::instance())}; + auto raw = std::make_shared( + obj.command_name(), obj.command_line(), &checks::checker::instance()); commands::command::commands[raw->get_name()] = raw; } else { connector_map::iterator found_con{ diff --git a/engine/src/configuration/applier/connector.cc b/engine/src/configuration/applier/connector.cc index 3a2b22e3c26..08561a60c18 100644 --- a/engine/src/configuration/applier/connector.cc +++ b/engine/src/configuration/applier/connector.cc @@ -60,8 +60,8 @@ void applier::connector::add_object(configuration::connector const& obj) { config->connectors().insert(obj); // Create connector. - std::shared_ptr cmd(new commands::connector( - obj.connector_name(), processed_cmd, &checks::checker::instance())); + auto cmd = std::make_shared( + obj.connector_name(), processed_cmd, &checks::checker::instance()); commands::connector::connectors[obj.connector_name()] = cmd; } diff --git a/engine/src/configuration/applier/contactgroup.cc b/engine/src/configuration/applier/contactgroup.cc index dc7d3021e43..86a82f74507 100644 --- a/engine/src/configuration/applier/contactgroup.cc +++ b/engine/src/configuration/applier/contactgroup.cc @@ -84,8 +84,7 @@ void applier::contactgroup::add_object(configuration::contactgroup const& obj) { config->contactgroups().insert(obj); // Create contact group. - std::shared_ptr cg{new engine::contactgroup(obj)}; - + auto cg = std::make_shared(obj); for (set_string::const_iterator it(obj.members().begin()), end(obj.members().end()); it != end; ++it) { diff --git a/engine/src/configuration/applier/host.cc b/engine/src/configuration/applier/host.cc index c7a19e2964c..aad3189d4c5 100644 --- a/engine/src/configuration/applier/host.cc +++ b/engine/src/configuration/applier/host.cc @@ -59,7 +59,7 @@ void applier::host::add_object(configuration::host const& obj) { config->hosts().insert(obj); // Create host. - std::shared_ptr h{new engine::host( + auto h = std::make_shared( obj.host_id(), obj.host_name(), obj.display_name(), obj.alias(), obj.address(), obj.check_period(), static_cast(obj.initial_state()), @@ -95,9 +95,9 @@ void applier::host::add_object(configuration::host const& obj) { obj.have_coords_3d(), true, // should_be_drawn, enabled by Nagios obj.retain_status_information(), obj.retain_nonstatus_information(), - obj.obsess_over_host(), obj.timezone(), obj.icon_id())}; + obj.obsess_over_host(), obj.timezone(), obj.icon_id()); - engine::host::hosts.insert({h->get_name(), h}); + engine::host::hosts.insert({h->name(), h}); engine::host::hosts_by_id.insert({obj.host_id(), h}); h->set_initial_notif_time(0); @@ -247,8 +247,8 @@ void applier::host::modify_object(configuration::host const& obj) { config->hosts().insert(obj); // Modify properties. - if (it_obj->second->get_name() != obj.host_name()) { - engine::host::hosts.erase(it_obj->second->get_name()); + if (it_obj->second->name() != obj.host_name()) { + engine::host::hosts.erase(it_obj->second->name()); engine::host::hosts.insert({obj.host_name(), it_obj->second}); } @@ -487,7 +487,7 @@ void applier::host::remove_object(configuration::host const& obj) { // remove host from hostgroup->members for (auto& it_h : it->second->get_parent_groups()) - it_h->members.erase(it->second->get_name()); + it_h->members.erase(it->second->name()); // Notify event broker. timeval tv(get_broker_timestamp(nullptr)); @@ -503,7 +503,7 @@ void applier::host::remove_object(configuration::host const& obj) { MODATTR_ALL, &tv); // Erase host object (will effectively delete the object). - engine::host::hosts.erase(it->second->get_name()); + engine::host::hosts.erase(it->second->name()); engine::host::hosts_by_id.erase(it); } diff --git a/engine/src/configuration/applier/hostescalation.cc b/engine/src/configuration/applier/hostescalation.cc index 564b692541f..8ed4aef8c7d 100644 --- a/engine/src/configuration/applier/hostescalation.cc +++ b/engine/src/configuration/applier/hostescalation.cc @@ -60,7 +60,7 @@ void applier::hostescalation::add_object( config->hostescalations().insert(obj); // Create host escalation. - std::shared_ptr he{new engine::hostescalation( + auto he = std::make_shared( *obj.hosts().begin(), obj.first_notification(), obj.last_notification(), obj.notification_interval(), obj.escalation_period(), ((obj.escalation_options() & configuration::hostescalation::down) @@ -73,7 +73,7 @@ void applier::hostescalation::add_object( ((obj.escalation_options() & configuration::hostescalation::recovery) ? notifier::up : notifier::none), - obj.uuid())}; + obj.uuid()); // Add new items to the configuration state. engine::hostescalation::hostescalations.insert({he->get_hostname(), he}); diff --git a/engine/src/configuration/applier/hostgroup.cc b/engine/src/configuration/applier/hostgroup.cc index d425a02ced0..3bede9295c8 100644 --- a/engine/src/configuration/applier/hostgroup.cc +++ b/engine/src/configuration/applier/hostgroup.cc @@ -64,9 +64,9 @@ void applier::hostgroup::add_object(configuration::hostgroup const& obj) { config->hostgroups().insert(obj); // Create host group. - std::shared_ptr hg{new engine::hostgroup( + auto hg = std::make_shared( obj.hostgroup_id(), obj.hostgroup_name(), obj.alias(), obj.notes(), - obj.notes_url(), obj.action_url())}; + obj.notes_url(), obj.action_url()); // Add new items to the configuration state. engine::hostgroup::hostgroups.insert({hg->get_group_name(), hg}); diff --git a/engine/src/configuration/applier/scheduler.cc b/engine/src/configuration/applier/scheduler.cc index be2ca9b27b4..fb6d59d6b4d 100644 --- a/engine/src/configuration/applier/scheduler.cc +++ b/engine/src/configuration/applier/scheduler.cc @@ -573,9 +573,8 @@ void applier::scheduler::_calculate_host_scheduling_params() { } else { hst.set_should_be_scheduled(false); engine_logger(dbg_events, more) - << "Host " << hst.get_name() << " should not be scheduled."; - log_v2::events()->debug("Host {} should not be scheduled.", - hst.get_name()); + << "Host " << hst.name() << " should not be scheduled."; + log_v2::events()->debug("Host {} should not be scheduled.", hst.name()); } ++scheduling_info.total_hosts; @@ -909,8 +908,8 @@ void applier::scheduler::_schedule_host_events( for (unsigned int i(0); i < end; ++i) { com::centreon::engine::host& hst(*hosts[i]); - engine_logger(dbg_events, most) << "Host '" << hst.get_name() << "'"; - log_v2::events()->debug("Host '{}'", hst.get_name()); + engine_logger(dbg_events, most) << "Host '" << hst.name() << "'"; + log_v2::events()->debug("Host '{}'", hst.name()); // skip hosts that shouldn't be scheduled. if (!hst.get_should_be_scheduled()) { @@ -1041,8 +1040,9 @@ void applier::scheduler::_schedule_service_events( ++interleave_block_index * total_interleave_blocks); // set the preferred next check time for the service. - s->set_next_check((time_t)( - now + mult_factor * scheduling_info.service_inter_check_delay)); + s->set_next_check( + (time_t)(now + + mult_factor * scheduling_info.service_inter_check_delay)); // Make sure the service can actually be scheduled when we want. { diff --git a/engine/src/configuration/applier/serviceescalation.cc b/engine/src/configuration/applier/serviceescalation.cc index 25b2ee61de5..e7eb32325cc 100644 --- a/engine/src/configuration/applier/serviceescalation.cc +++ b/engine/src/configuration/applier/serviceescalation.cc @@ -64,7 +64,7 @@ void applier::serviceescalation::add_object( config->serviceescalations().insert(obj); // Create service escalation. - std::shared_ptr se{new engine::serviceescalation( + auto se = std::make_shared( obj.hosts().front(), obj.service_description().front(), obj.first_notification(), obj.last_notification(), obj.notification_interval(), obj.escalation_period(), @@ -83,7 +83,7 @@ void applier::serviceescalation::add_object( configuration::serviceescalation::recovery) ? notifier::ok : notifier::none), - obj.uuid())}; + obj.uuid()); // Add new items to the global list. engine::serviceescalation::serviceescalations.insert( diff --git a/engine/src/configuration/applier/servicegroup.cc b/engine/src/configuration/applier/servicegroup.cc index d2ea1d162d5..c594a4505e2 100644 --- a/engine/src/configuration/applier/servicegroup.cc +++ b/engine/src/configuration/applier/servicegroup.cc @@ -77,9 +77,9 @@ void applier::servicegroup::add_object(configuration::servicegroup const& obj) { config->servicegroups().insert(obj); // Create servicegroup. - std::shared_ptr sg{new engine::servicegroup( + auto sg = std::make_shared( obj.servicegroup_id(), obj.servicegroup_name(), obj.alias(), obj.notes(), - obj.notes_url(), obj.action_url())}; + obj.notes_url(), obj.action_url()); // Add new items to the list. engine::servicegroup::servicegroups.insert({sg->get_group_name(), sg}); diff --git a/engine/src/configuration/applier/timeperiod.cc b/engine/src/configuration/applier/timeperiod.cc index 5e222b12e6b..3a4c55076cd 100644 --- a/engine/src/configuration/applier/timeperiod.cc +++ b/engine/src/configuration/applier/timeperiod.cc @@ -74,8 +74,8 @@ void applier::timeperiod::add_object(configuration::timeperiod const& obj) { config->timeperiods().insert(obj); // Create time period. - std::shared_ptr tp{ - new engine::timeperiod(obj.timeperiod_name(), obj.alias())}; + auto tp = + std::make_shared(obj.timeperiod_name(), obj.alias()); engine::timeperiod::timeperiods.insert({obj.timeperiod_name(), tp}); diff --git a/engine/src/configuration/host.cc b/engine/src/configuration/host.cc index f21f2d4d039..88b44b4541d 100644 --- a/engine/src/configuration/host.cc +++ b/engine/src/configuration/host.cc @@ -1759,7 +1759,7 @@ bool host::_set_category_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::hostcategory); } else { @@ -1793,7 +1793,7 @@ bool host::_set_group_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::hostgroup); } else { diff --git a/engine/src/configuration/object.cc b/engine/src/configuration/object.cc index 1933523d331..76fd084e27f 100644 --- a/engine/src/configuration/object.cc +++ b/engine/src/configuration/object.cc @@ -125,41 +125,41 @@ bool object::operator!=(object const& right) const noexcept { object_ptr object::create(std::string const& type_name) { object_ptr obj; if (type_name == "service") - obj = object_ptr(new configuration::service()); + obj = std::make_shared(); else if (type_name == "host") - obj = object_ptr(new configuration::host()); + obj = std::make_shared(); else if (type_name == "contact") - obj = object_ptr(new configuration::contact()); + obj = std::make_shared(); else if (type_name == "contactgroup") - obj = object_ptr(new configuration::contactgroup()); + obj = std::make_shared(); else if (type_name == "servicegroup") - obj = object_ptr(new configuration::servicegroup()); + obj = std::make_shared(); else if (type_name == "hostgroup") - obj = object_ptr(new configuration::hostgroup()); + obj = std::make_shared(); else if (type_name == "servicedependency") - obj = object_ptr(new configuration::servicedependency()); + obj = std::make_shared(); else if (type_name == "serviceescalation") - obj = object_ptr(new configuration::serviceescalation()); + obj = std::make_shared(); else if (type_name == "hostdependency") - obj = object_ptr(new configuration::hostdependency()); + obj = std::make_shared(); else if (type_name == "hostescalation") - obj = object_ptr(new configuration::hostescalation()); + obj = std::make_shared(); else if (type_name == "command") - obj = object_ptr(new configuration::command()); + obj = std::make_shared(); else if (type_name == "timeperiod") - obj = object_ptr(new configuration::timeperiod()); + obj = std::make_shared(); else if (type_name == "connector") - obj = object_ptr(new configuration::connector()); + obj = std::make_shared(); else if (type_name == "serviceextinfo") - obj = object_ptr(new configuration::serviceextinfo()); + obj = std::make_shared(); else if (type_name == "hostextinfo") - obj = object_ptr(new configuration::hostextinfo()); + obj = std::make_shared(); else if (type_name == "anomalydetection") - obj = object_ptr(new configuration::anomalydetection()); + obj = std::make_shared(); else if (type_name == "severity") - obj = object_ptr(new configuration::severity()); + obj = std::make_shared(); else if (type_name == "tag") - obj = object_ptr(new configuration::tag()); + obj = std::make_shared(); return obj; } diff --git a/engine/src/configuration/service.cc b/engine/src/configuration/service.cc index 0e96d869aa4..c68148c59ec 100644 --- a/engine/src/configuration/service.cc +++ b/engine/src/configuration/service.cc @@ -2172,7 +2172,7 @@ bool service::_set_category_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::servicecategory); } else { @@ -2207,7 +2207,7 @@ bool service::_set_group_tags(const std::string& value) { for (auto& tag : tags) { int64_t id; bool parse_ok; - parse_ok = SimpleAtoi(tag, &id); + parse_ok = absl::SimpleAtoi(tag, &id); if (parse_ok) { _tags.emplace(id, tag::servicegroup); } else { diff --git a/engine/src/configuration/state.cc b/engine/src/configuration/state.cc index b4806c4a9a2..ec63624daab 100644 --- a/engine/src/configuration/state.cc +++ b/engine/src/configuration/state.cc @@ -140,6 +140,7 @@ std::unordered_map const state::_setters{ {"log_notifications", SETTER(bool, log_notifications)}, {"log_passive_checks", SETTER(bool, log_passive_checks)}, {"log_pid", SETTER(bool, log_pid)}, + {"log_file_line", SETTER(bool, log_file_line)}, {"log_rotation_method", SETTER(std::string const&, _set_log_rotation_method)}, {"log_service_retries", SETTER(bool, log_service_retries)}, @@ -169,6 +170,7 @@ std::unordered_map const state::_setters{ {"poller_name", SETTER(std::string const&, poller_name)}, {"poller_id", SETTER(uint32_t, poller_id)}, {"rpc_port", SETTER(uint16_t, rpc_port)}, + {"rpc_listen_address", SETTER(const std::string&, rpc_listen_address)}, {"precached_object_file", SETTER(std::string const&, _set_precached_object_file)}, {"process_performance_data", SETTER(bool, process_performance_data)}, @@ -390,6 +392,7 @@ static std::string const default_log_level_process("info"); static std::string const default_log_level_runtime("error"); static std::string const default_use_timezone(""); static bool const default_use_true_regexp_matching(false); +static const std::string default_rpc_listen_address("localhost"); /** * Default constructor. @@ -455,6 +458,7 @@ state::state() _log_notifications(default_log_notifications), _log_passive_checks(default_log_passive_checks), _log_pid(default_log_pid), + _log_file_line(false), _log_service_retries(default_log_service_retries), _low_host_flap_threshold(default_low_host_flap_threshold), _low_service_flap_threshold(default_low_service_flap_threshold), @@ -475,6 +479,7 @@ state::state() _poller_name{"unknown"}, _poller_id{0}, _rpc_port{0}, + _rpc_listen_address{default_rpc_listen_address}, _process_performance_data(default_process_performance_data), _retained_contact_host_attribute_mask( default_retained_contact_host_attribute_mask), @@ -626,6 +631,7 @@ state& state::operator=(state const& right) { _log_notifications = right._log_notifications; _log_passive_checks = right._log_passive_checks; _log_pid = right._log_pid; + _log_file_line = right._log_file_line; _log_service_retries = right._log_service_retries; _low_host_flap_threshold = right._low_host_flap_threshold; _low_service_flap_threshold = right._low_service_flap_threshold; @@ -646,6 +652,7 @@ state& state::operator=(state const& right) { _poller_name = right._poller_name; _poller_id = right._poller_id; _rpc_port = right._rpc_port; + _rpc_listen_address = right._rpc_listen_address; _process_performance_data = right._process_performance_data; _retained_contact_host_attribute_mask = right._retained_contact_host_attribute_mask; @@ -788,7 +795,7 @@ bool state::operator==(state const& right) const noexcept { _log_host_retries == right._log_host_retries && _log_notifications == right._log_notifications && _log_passive_checks == right._log_passive_checks && - _log_pid == right._log_pid && + _log_pid == right._log_pid && _log_file_line == right._log_file_line && _log_service_retries == right._log_service_retries && _low_host_flap_threshold == right._low_host_flap_threshold && _low_service_flap_threshold == right._low_service_flap_threshold && @@ -808,6 +815,7 @@ bool state::operator==(state const& right) const noexcept { _perfdata_timeout == right._perfdata_timeout && _poller_name == right._poller_name && _poller_id == right._poller_id && _rpc_port == right._rpc_port && + _rpc_listen_address == right._rpc_listen_address && _process_performance_data == right._process_performance_data && _retained_contact_host_attribute_mask == right._retained_contact_host_attribute_mask && @@ -2457,6 +2465,15 @@ void state::log_pid(bool value) { _log_pid = value; } +/** + * Set the log file line value. + * + * @param[in] value The new log file line value. + */ +void state::log_file_line(bool value) { + _log_file_line = value; +} + /** * Get log_service_retries value. * @@ -2793,7 +2810,7 @@ std::string const& state::poller_name() const noexcept { * * @param[in] value The new poller_name value. */ -void state::poller_name(std::string const& value) noexcept { +void state::poller_name(std::string const& value) { _poller_name = value; } @@ -2811,7 +2828,7 @@ uint32_t state::poller_id() const noexcept { * * @param[in] value The new poller_id value. */ -void state::poller_id(uint32_t value) noexcept { +void state::poller_id(uint32_t value) { _poller_id = value; } @@ -2825,14 +2842,32 @@ uint16_t state::rpc_port() const noexcept { } /** - * Set poller_id value. + * Set the rpc_port value. * * @param[in] value The new poller_id value. */ -void state::rpc_port(uint16_t value) noexcept { +void state::rpc_port(uint16_t value) { _rpc_port = value; } +/** + * Get rpc_listen_address value. + * + * @return The grpc api listen address value. + */ +const std::string& state::rpc_listen_address() const noexcept { + return _rpc_listen_address; +} + +/** + * Set grpc api listen_address value. + * + * @param[in] value The new grpc api listen address. + */ +void state::rpc_listen_address(const std::string& listen_address) { + _rpc_listen_address = listen_address; +} + /** * Get process_performance_data value. * diff --git a/engine/src/contact.cc b/engine/src/contact.cc index 4ee1f6810ee..5d28319a00b 100644 --- a/engine/src/contact.cc +++ b/engine/src/contact.cc @@ -548,7 +548,7 @@ std::shared_ptr add_contact( } // Allocate memory for a new contact. - std::shared_ptr obj(new contact); + auto obj = std::make_shared(); try { // Duplicate vars. diff --git a/engine/src/downtimes/downtime_manager.cc b/engine/src/downtimes/downtime_manager.cc index 65c0b055156..e742cccb4ee 100644 --- a/engine/src/downtimes/downtime_manager.cc +++ b/engine/src/downtimes/downtime_manager.cc @@ -153,10 +153,10 @@ int downtime_manager::check_pending_flex_host_downtime(host* hst) { current_time <= it->second->get_end_time()) { engine_logger(dbg_downtime, basic) << "Flexible downtime (id=" << it->second->get_downtime_id() - << ") for host '" << hst->get_name() << "' starting now..."; + << ") for host '" << hst->name() << "' starting now..."; log_v2::downtimes()->trace( "Flexible downtime (id={}) for host '{}' starting now...", - it->second->get_downtime_id(), hst->get_name()); + it->second->get_downtime_id(), hst->name()); it->second->start_flex_downtime(); it->second->handle(); diff --git a/engine/src/downtimes/host_downtime.cc b/engine/src/downtimes/host_downtime.cc index dfdb8c4f5cc..61023356246 100644 --- a/engine/src/downtimes/host_downtime.cc +++ b/engine/src/downtimes/host_downtime.cc @@ -180,14 +180,14 @@ int host_downtime::unschedule() { /* log a notice - this is parsed by the history CGI */ if (it->second->get_scheduled_downtime_depth() == 0) { engine_logger(log_info_message, basic) - << "HOST DOWNTIME ALERT: " << it->second->get_name() + << "HOST DOWNTIME ALERT: " << it->second->name() << ";CANCELLED; Scheduled downtime for host has been " "cancelled."; log_v2::events()->info( "HOST DOWNTIME ALERT: {};CANCELLED; Scheduled downtime for host has " "been " "cancelled.", - it->second->get_name()); + it->second->name()); /* send a notification */ it->second->notify(notifier::reason_downtimecancelled, "", "", @@ -241,9 +241,9 @@ int host_downtime::subscribe() { log_v2::downtimes()->trace("Scheduled Downtime Details:"); engine_logger(dbg_downtime, basic) << " Type: Host Downtime\n" " Host: " - << hst->get_name(); + << hst->name(); log_v2::downtimes()->trace(" Type: Host Downtime "); - log_v2::downtimes()->trace(" Host: {}", hst->get_name()); + log_v2::downtimes()->trace(" Host: {}", hst->name()); engine_logger(dbg_downtime, basic) << " Fixed/Flex: " << (is_fixed() ? "Fixed\n" : "Flexible\n") << " Start: " << start_time_string @@ -267,10 +267,10 @@ int host_downtime::subscribe() { /* add a non-persistent comment to the host or service regarding the scheduled * outage */ - std::shared_ptr com{ - new comment(comment::host, comment::downtime, hst->get_host_id(), 0, - time(NULL), "(Centreon Engine Process)", oss.str(), false, - comment::internal, false, (time_t)0)}; + auto com = std::make_shared( + comment::host, comment::downtime, hst->get_host_id(), 0, time(NULL), + "(Centreon Engine Process)", oss.str(), false, comment::internal, false, + (time_t)0); comment::comments.insert({com->get_comment_id(), com}); _comment_id = com->get_comment_id(); @@ -358,23 +358,23 @@ int host_downtime::handle() { if (it_hst->second->get_scheduled_downtime_depth() == 0) { engine_logger(dbg_downtime, basic) - << "Host '" << it_hst->second->get_name() + << "Host '" << it_hst->second->name() << "' has exited from a period of scheduled downtime (id=" << get_downtime_id() << ")."; log_v2::downtimes()->trace( "Host '{}' has exited from a period of scheduled downtime (id={}).", - it_hst->second->get_name(), get_downtime_id()); + it_hst->second->name(), get_downtime_id()); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_info_message, basic) - << "HOST DOWNTIME ALERT: " << it_hst->second->get_name() + << "HOST DOWNTIME ALERT: " << it_hst->second->name() << ";STOPPED; Host has exited from a period of scheduled " "downtime"; log_v2::events()->info( "HOST DOWNTIME ALERT: {};STOPPED; Host has exited from a period of " "scheduled " "downtime", - it_hst->second->get_name()); + it_hst->second->name()); /* send a notification */ it_hst->second->notify(notifier::reason_downtimeend, get_author(), @@ -434,21 +434,21 @@ int host_downtime::handle() { if (it_hst->second->get_scheduled_downtime_depth() == 0) { engine_logger(dbg_downtime, basic) - << "Host '" << it_hst->second->get_name() + << "Host '" << it_hst->second->name() << "' has entered a period of scheduled downtime (id=" << get_downtime_id() << ")."; log_v2::downtimes()->trace( "Host '{}' has entered a period of scheduled downtime (id={}).", - it_hst->second->get_name(), get_downtime_id()); + it_hst->second->name(), get_downtime_id()); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_info_message, basic) - << "HOST DOWNTIME ALERT: " << it_hst->second->get_name() + << "HOST DOWNTIME ALERT: " << it_hst->second->name() << ";STARTED; Host has entered a period of scheduled downtime"; log_v2::events()->info( "HOST DOWNTIME ALERT: {};STARTED; Host has entered a period of " "scheduled downtime", - it_hst->second->get_name()); + it_hst->second->name()); /* send a notification */ it_hst->second->notify(notifier::reason_downtimestart, get_author(), diff --git a/engine/src/events/loop.cc b/engine/src/events/loop.cc index 22c28728193..014a9e108f3 100644 --- a/engine/src/events/loop.cc +++ b/engine/src/events/loop.cc @@ -332,9 +332,10 @@ void loop::_dispatching() { else { if (notifier::soft == temp_service->get_state_type() && temp_service->get_current_state() != service::state_ok) - temp_service->set_next_check((time_t)( - temp_service->get_next_check() + - temp_service->retry_interval() * config->interval_length())); + temp_service->set_next_check( + (time_t)(temp_service->get_next_check() + + temp_service->retry_interval() * + config->interval_length())); else temp_service->set_next_check( (time_t)(temp_service->get_next_check() + diff --git a/engine/src/events/timed_event.cc b/engine/src/events/timed_event.cc index 717582289eb..888de3cfa69 100644 --- a/engine/src/events/timed_event.cc +++ b/engine/src/events/timed_event.cc @@ -292,11 +292,11 @@ void timed_event::_exec_event_host_check() { (double)(tv.tv_usec / 1000) / 1000.0); engine_logger(dbg_events, basic) - << "** Host Check Event ==> Host: '" << hst->get_name() + << "** Host Check Event ==> Host: '" << hst->name() << "', Options: " << event_options << ", Latency: " << latency << " sec"; log_v2::events()->trace( "** Host Check Event ==> Host: '{}', Options: {}, Latency: {} sec", - hst->get_name(), event_options, latency); + hst->name(), event_options, latency); // run the host check. hst->run_scheduled_check(event_options, latency); diff --git a/engine/src/host.cc b/engine/src/host.cc index a932f7047b9..3e53b6d1e50 100644 --- a/engine/src/host.cc +++ b/engine/src/host.cc @@ -136,11 +136,11 @@ host_id_map host::hosts_by_id; * @param[in] timezone The timezone to apply to the host */ host::host(uint64_t host_id, - std::string const& name, - std::string const& display_name, - std::string const& alias, - std::string const& address, - std::string const& check_period, + const std::string& name, + const std::string& display_name, + const std::string& alias, + const std::string& address, + const std::string& check_period, enum host::host_state initial_state, uint32_t check_interval, uint32_t retry_interval, @@ -153,12 +153,12 @@ host::host(uint64_t host_id, uint32_t notification_interval, uint32_t first_notification_delay, uint32_t recovery_notification_delay, - std::string const& notification_period, + const std::string& notification_period, bool notifications_enabled, - std::string const& check_command, + const std::string& check_command, bool checks_enabled, bool accept_passive_checks, - std::string const& event_handler, + const std::string& event_handler, bool event_handler_enabled, bool flap_detection_enabled, double low_flap_threshold, @@ -172,13 +172,13 @@ host::host(uint64_t host_id, bool process_perfdata, bool check_freshness, int freshness_threshold, - std::string const& notes, - std::string const& notes_url, - std::string const& action_url, - std::string const& icon_image, - std::string const& icon_image_alt, - std::string const& vrml_image, - std::string const& statusmap_image, + const std::string& notes, + const std::string& notes_url, + const std::string& action_url, + const std::string& icon_image, + const std::string& icon_image_alt, + const std::string& vrml_image, + const std::string& statusmap_image, double x_2d, double y_2d, bool have_2d_coords, @@ -190,10 +190,11 @@ host::host(uint64_t host_id, bool retain_status_information, bool retain_nonstatus_information, bool obsess_over_host, - std::string const& timezone, + const std::string& timezone, uint64_t icon_id) : notifier{host_notification, - !display_name.empty() ? display_name : name, + name, + display_name, check_command, checks_enabled, accept_passive_checks, @@ -234,7 +235,6 @@ host::host(uint64_t host_id, false, icon_id}, _id{host_id}, - _name{name}, _address{address}, _process_performance_data{process_perfdata}, _vrml_image{vrml_image}, @@ -311,7 +311,7 @@ void host::add_child_host(host* child) { if (!child) throw engine_error() << "add child link called with nullptr ptr"; - child_hosts.insert({child->get_name(), child}); + child_hosts.insert({child->name(), child}); // Notify event broker. timeval tv(get_broker_timestamp(nullptr)); @@ -319,7 +319,7 @@ void host::add_child_host(host* child) { nullptr, child, nullptr, &tv); } -void host::add_parent_host(std::string const& host_name) { +void host::add_parent_host(const std::string& host_name) { // Make sure we have the data we need. if (host_name.empty()) { engine_logger(log_config_error, basic) @@ -329,38 +329,30 @@ void host::add_parent_host(std::string const& host_name) { } // A host cannot be a parent/child of itself. - if (_name == host_name) { + if (name() == host_name) { engine_logger(log_config_error, basic) - << "Error: Host '" << _name << "' cannot be a child/parent of itself"; + << "Error: Host '" << name() << "' cannot be a child/parent of itself"; log_v2::config()->error( - "Error: Host '{}' cannot be a child/parent of itself", _name); + "Error: Host '{}' cannot be a child/parent of itself", name()); throw engine_error() << "host is child/parent itself"; } parent_hosts.insert({host_name, nullptr}); } -std::string const& host::get_name() const { - return _name; -} - -void host::set_name(std::string const& name) { - _name = name; -} - -std::string const& host::get_alias() const { +const std::string& host::get_alias() const { return _alias; } -void host::set_alias(std::string const& alias) { +void host::set_alias(const std::string& alias) { _alias = alias; } -std::string const& host::get_address() const { +const std::string& host::get_address() const { return _address; } -void host::set_address(std::string const& address) { +void host::set_address(const std::string& address) { _address = address; } @@ -372,19 +364,19 @@ void host::set_process_performance_data(bool process_performance_data) { _process_performance_data = process_performance_data; } -std::string const& host::get_vrml_image() const { +const std::string& host::get_vrml_image() const { return _vrml_image; } -void host::set_vrml_image(std::string const& image) { +void host::set_vrml_image(const std::string& image) { _vrml_image = image; } -std::string const& host::get_statusmap_image() const { +const std::string& host::get_statusmap_image() const { return _statusmap_image; } -void host::set_statusmap_image(std::string const& image) { +void host::set_statusmap_image(const std::string& image) { _statusmap_image = image; } @@ -650,7 +642,7 @@ std::ostream& operator<<(std::ostream& os, const host& obj) { os << "host {\n" " name: " - << obj.get_name() + << obj.name() << "\n" " display_name: " << obj.get_display_name() @@ -1021,7 +1013,7 @@ int is_host_immediate_child_of_host(com::centreon::engine::host* parent_host, // Mid-level/bottom hosts. else { host_map_unsafe::const_iterator it{ - child_host->parent_hosts.find(parent_host->get_name())}; + child_host->parent_hosts.find(parent_host->name())}; return it != child_host->parent_hosts.end(); } @@ -1150,7 +1142,7 @@ bool engine::is_host_exist(uint64_t host_id) throw() { * * @return The host id or 0. */ -uint64_t engine::get_host_id(std::string const& name) { +uint64_t engine::get_host_id(const std::string& name) { host_map::const_iterator found{host::hosts.find(name)}; return found != host::hosts.end() ? found->second->get_host_id() : 0u; } @@ -1185,20 +1177,21 @@ int host::log_event() { log_options = tab_host_states[get_current_state()].first; state = tab_host_states[get_current_state()].second.c_str(); } - std::string const& state_type(tab_state_type[get_state_type()]); + const std::string& state_type(tab_state_type[get_state_type()]); engine_logger(log_options, basic) - << "HOST ALERT: " << get_name() << ";" << state << ";" << state_type - << ";" << get_current_attempt() << ";" << get_plugin_output(); - log_v2::events()->info("HOST ALERT: {};{};{};{};{}", get_name(), state, - state_type, get_current_attempt(), - get_plugin_output()); + << "HOST ALERT: " << name() << ";" << state << ";" << state_type << ";" + << get_current_attempt() << ";" << get_plugin_output(); + SPDLOG_LOGGER_INFO(log_v2::events(), "HOST ALERT: {};{};{};{};{}", name(), + state, state_type, get_current_attempt(), + get_plugin_output()); return OK; } /* process results of an asynchronous host check */ -int host::handle_async_check_result_3x(check_result* queued_check_result) { +int host::handle_async_check_result_3x( + const check_result& queued_check_result) { enum service::service_state svc_res{service::state_ok}; enum host::host_state hst_res{host::state_up}; int reschedule_check{false}; @@ -1207,83 +1200,83 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { struct timeval end_time_hires; engine_logger(dbg_functions, basic) << "handle_async_host_check_result_3x()"; - log_v2::functions()->trace("handle_async_host_check_result_3x()"); - - /* make sure we have what we need */ - if (!queued_check_result) - return ERROR; + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "handle_async_host_check_result_3x()"); /* get the current time */ time_t current_time = std::time(nullptr); double execution_time = - static_cast(queued_check_result->get_finish_time().tv_sec - - queued_check_result->get_start_time().tv_sec) + - static_cast(queued_check_result->get_finish_time().tv_usec - - queued_check_result->get_start_time().tv_usec) / + static_cast(queued_check_result.get_finish_time().tv_sec - + queued_check_result.get_start_time().tv_sec) + + static_cast(queued_check_result.get_finish_time().tv_usec - + queued_check_result.get_start_time().tv_usec) / 1000000.0; if (execution_time < 0.0) execution_time = 0.0; engine_logger(dbg_checks, more) - << "** Handling async check result for host '" << get_name() << "'..."; - log_v2::checks()->debug("** Handling async check result for host '{}'...", - get_name()); + << "** Handling async check result for host '" << name() << "'..."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "** Handling async check result for host '{}'...", + name()); engine_logger(dbg_checks, most) << "\tCheck Type: " - << (queued_check_result->get_check_type() == check_active ? "Active" - : "Passive") + << (queued_check_result.get_check_type() == check_active ? "Active" + : "Passive") << "\n" - << "\tCheck Options: " << queued_check_result->get_check_options() + << "\tCheck Options: " << queued_check_result.get_check_options() << "\n" << "\tReschedule Check?: " - << (queued_check_result->get_reschedule_check() ? "Yes" : "No") << "\n" + << (queued_check_result.get_reschedule_check() ? "Yes" : "No") << "\n" << "\tShould Reschedule Current Host Check?:" << get_should_reschedule_current_check() << "\tExited OK?: " - << (queued_check_result->get_exited_ok() ? "Yes" : "No") << "\n" + << (queued_check_result.get_exited_ok() ? "Yes" : "No") << "\n" << com::centreon::logging::setprecision(3) << "\tExec Time: " << execution_time << "\n" - << "\tLatency: " << queued_check_result->get_latency() << "\n" - << "\treturn Status: " << queued_check_result->get_return_code() + << "\tLatency: " << queued_check_result.get_latency() << "\n" + << "\treturn Status: " << queued_check_result.get_return_code() << "\n" - << "\tOutput: " << queued_check_result->get_output(); - - log_v2::checks()->debug("Check Type: {}", - queued_check_result->get_check_type() == check_active - ? "Active" - : "Passive"); - log_v2::checks()->debug("Check Options: {}", - queued_check_result->get_check_options()); - log_v2::checks()->debug( - "Reschedule Check?: {}", - queued_check_result->get_reschedule_check() ? "Yes" : "No"); - log_v2::checks()->debug( - "Should Reschedule Current Host Check?: {}", - queued_check_result->get_reschedule_check() ? "Yes" : "No"); - log_v2::checks()->debug("Exited OK?: {}", - queued_check_result->get_exited_ok() ? "Yes" : "No"); - log_v2::checks()->debug("Exec Time: {:.3f}", execution_time); - log_v2::checks()->debug("Latency: {}", - queued_check_result->get_latency()); - log_v2::checks()->debug("return Status: {}", - queued_check_result->get_return_code()); - log_v2::checks()->debug("Output: {}", - queued_check_result->get_output()); + << "\tOutput: " << queued_check_result.get_output(); + + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Check Type: {}", + queued_check_result.get_check_type() == check_active + ? "Active" + : "Passive"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Check Options: {}", + queued_check_result.get_check_options()); + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Reschedule Check?: {}", + queued_check_result.get_reschedule_check() ? "Yes" : "No"); + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Should Reschedule Current Host Check?: {}", + queued_check_result.get_reschedule_check() ? "Yes" : "No"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Exited OK?: {}", + queued_check_result.get_exited_ok() ? "Yes" : "No"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Exec Time: {:.3f}", + execution_time); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Latency: {}", + queued_check_result.get_latency()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "return Status: {}", + queued_check_result.get_return_code()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Output: {}", + queued_check_result.get_output()); /* decrement the number of host checks still out there... */ - if (queued_check_result->get_check_type() == check_active && + if (queued_check_result.get_check_type() == check_active && currently_running_host_checks > 0) currently_running_host_checks--; /* * skip this host check results if its passive and we aren't accepting passive * check results */ - if (queued_check_result->get_check_type() == check_passive) { + if (queued_check_result.get_check_type() == check_passive) { if (!config->accept_passive_host_checks()) { engine_logger(dbg_checks, basic) << "Discarding passive host check result because passive host " "checks are disabled globally."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding passive host check result because passive host " "checks are disabled globally."); @@ -1293,7 +1286,8 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { engine_logger(dbg_checks, basic) << "Discarding passive host check result because passive checks " "are disabled for this host."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding passive host check result because passive checks " "are disabled for this host."); return ERROR; @@ -1303,7 +1297,7 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { /* * clear the freshening flag (it would have been set if this host was * determined to be stale) */ - if (queued_check_result->get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) + if (queued_check_result.get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) set_is_being_freshened(false); /* DISCARD INVALID FRESHNESS CHECK RESULTS */ @@ -1314,13 +1308,14 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { ** make the host fresh again, so we do a quick check to make sure the ** host is still stale before we accept the check result. */ - if ((queued_check_result->get_check_options() & + if ((queued_check_result.get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) && is_result_fresh(current_time, false)) { engine_logger(dbg_checks, basic) << "Discarding host freshness check result because the host is " "currently fresh (race condition avoided)."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding host freshness check result because the host is " "currently fresh (race condition avoided)."); return OK; @@ -1333,18 +1328,18 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { set_last_hard_state_change(get_last_check()); /* was this check passive or active? */ - set_check_type((queued_check_result->get_check_type() == check_active) + set_check_type((queued_check_result.get_check_type() == check_active) ? check_active : check_passive); /* update check statistics for passive results */ - if (queued_check_result->get_check_type() == check_passive) + if (queued_check_result.get_check_type() == check_passive) update_check_stats(PASSIVE_HOST_CHECK_STATS, - queued_check_result->get_start_time().tv_sec); + queued_check_result.get_start_time().tv_sec); /* should we reschedule the next check of the host? NOTE: this might be * overridden later... */ - reschedule_check = queued_check_result->get_reschedule_check(); + reschedule_check = queued_check_result.get_reschedule_check(); // Inherit the should reschedule flag from the host. It is used when // rescheduled checks were discarded because only one check can be executed @@ -1352,14 +1347,14 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { // and this check should be rescheduled regardless of what it was meant // to initially. if (get_should_reschedule_current_check() && - !queued_check_result->get_reschedule_check()) + !queued_check_result.get_reschedule_check()) reschedule_check = true; // Clear the should reschedule flag. set_should_reschedule_current_check(false); /* check latency is passed to us for both active and passive checks */ - set_latency(queued_check_result->get_latency()); + set_latency(queued_check_result.get_latency()); /* update the execution time for this check (millisecond resolution) */ set_execution_time(execution_time); @@ -1368,14 +1363,14 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { set_has_been_checked(true); /* clear the execution flag if this was an active check */ - if (queued_check_result->get_check_type() == check_active) + if (queued_check_result.get_check_type() == check_active) set_is_executing(false); /* get the last check time */ - set_last_check(queued_check_result->get_start_time().tv_sec); + set_last_check(queued_check_result.get_start_time().tv_sec); /* was this check passive or active? */ - set_check_type((queued_check_result->get_check_type() == check_active) + set_check_type((queued_check_result.get_check_type() == check_active) ? check_active : check_passive); @@ -1391,7 +1386,7 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { /* parse check output to get: (1) short output, (2) long output, (3) perf data */ - std::string output{queued_check_result->get_output()}; + std::string output{queued_check_result.get_output()}; std::string plugin_output; std::string long_plugin_output; std::string perf_data; @@ -1421,7 +1416,8 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { << "\n" << "Perf Data:\n" << (get_perf_data().empty() ? "NULL" : get_perf_data()); - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Parsing check output... Short Output: {} Long Output: {} " "Perf Data: {}", get_plugin_output().empty() ? "NULL" : get_plugin_output(), @@ -1430,20 +1426,21 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { /* get the unprocessed return code */ /* NOTE: for passive checks, this is the final/processed state */ svc_res = static_cast( - queued_check_result->get_return_code()); - hst_res = static_cast( - queued_check_result->get_return_code()); + queued_check_result.get_return_code()); + hst_res = + static_cast(queued_check_result.get_return_code()); /* adjust return code (active checks only) */ - if (queued_check_result->get_check_type() == check_active) { + if (queued_check_result.get_check_type() == check_active) { /* if there was some error running the command, just skip it (this shouldn't * be happening) */ - if (!queued_check_result->get_exited_ok()) { + if (!queued_check_result.get_exited_ok()) { engine_logger(log_runtime_warning, basic) - << "Warning: Check of host '" << get_name() + << "Warning: Check of host '" << name() << "' did not exit properly!"; - log_v2::runtime()->warn( - "Warning: Check of host '{}' did not exit properly!", get_name()); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Warning: Check of host '{}' did not exit properly!", + name()); set_plugin_output("(Host check did not exit properly)"); set_long_plugin_output(""); @@ -1453,31 +1450,32 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { } /* make sure the return code is within bounds */ - else if (queued_check_result->get_return_code() < 0 || - queued_check_result->get_return_code() > 3) { + else if (queued_check_result.get_return_code() < 0 || + queued_check_result.get_return_code() > 3) { engine_logger(log_runtime_warning, basic) << "Warning: return (code of " - << queued_check_result->get_return_code() << " for check of host '" - << get_name() << "' was out of bounds." - << ((queued_check_result->get_return_code() == 126 || - queued_check_result->get_return_code() == 127) + << queued_check_result.get_return_code() << " for check of host '" + << name() << "' was out of bounds." + << ((queued_check_result.get_return_code() == 126 || + queued_check_result.get_return_code() == 127) ? " Make sure the plugin you're trying to run actually " "exists." : ""); - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: return (code of {} for check of host '{}' was out of " "bounds.", - queued_check_result->get_return_code(), get_name(), - (queued_check_result->get_return_code() == 126 || - queued_check_result->get_return_code() == 127) + queued_check_result.get_return_code(), name(), + (queued_check_result.get_return_code() == 126 || + queued_check_result.get_return_code() == 127) ? " Make sure the plugin you're trying to run actually exists." : ""); std::ostringstream oss; - oss << "(Return code of " << queued_check_result->get_return_code() + oss << "(Return code of " << queued_check_result.get_return_code() << " is out of bounds" - << ((queued_check_result->get_return_code() == 126 || - queued_check_result->get_return_code() == 127) + << ((queued_check_result.get_return_code() == 126 || + queued_check_result.get_return_code() == 127) ? " - plugin may be missing" : "") << ")"; @@ -1500,7 +1498,7 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { * determination is made later */ /* NOTE: only do this for active checks - passive check results already have * the final state */ - if (queued_check_result->get_check_type() == check_active) { + if (queued_check_result.get_check_type() == check_active) { /* Let WARNING states indicate the host is up * (fake the result to be state_ok) */ if (svc_res == service::state_warning) @@ -1523,29 +1521,29 @@ int host::handle_async_check_result_3x(check_result* queued_check_result) { config->cached_host_check_horizon()); engine_logger(dbg_checks, more) - << "** Async check result for host '" << get_name() + << "** Async check result for host '" << name() << "' handled: new state=" << get_current_state(); - log_v2::checks()->debug( - "** Async check result for host '{}' handled: new state={}", get_name(), + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), + "** Async check result for host '{}' handled: new state={}", name(), get_current_state()); /* high resolution start time for event broker */ - start_time_hires = queued_check_result->get_start_time(); + start_time_hires = queued_check_result.get_start_time(); /* high resolution end time for event broker */ gettimeofday(&end_time_hires, nullptr); /* send data to event broker */ - broker_host_check(NEBTYPE_HOSTCHECK_PROCESSED, NEBFLAG_NONE, NEBATTR_NONE, - this, get_check_type(), get_current_state(), - get_state_type(), start_time_hires, end_time_hires, - check_command().c_str(), get_latency(), - get_execution_time(), config->host_check_timeout(), - queued_check_result->get_early_timeout(), - queued_check_result->get_return_code(), nullptr, - const_cast(get_plugin_output().c_str()), - const_cast(get_long_plugin_output().c_str()), - const_cast(get_perf_data().c_str()), nullptr); + broker_host_check( + NEBTYPE_HOSTCHECK_PROCESSED, NEBFLAG_NONE, NEBATTR_NONE, this, + get_check_type(), get_current_state(), get_state_type(), start_time_hires, + end_time_hires, get_latency(), get_execution_time(), + config->host_check_timeout(), queued_check_result.get_early_timeout(), + queued_check_result.get_return_code(), nullptr, + const_cast(get_plugin_output().c_str()), + const_cast(get_long_plugin_output().c_str()), + const_cast(get_perf_data().c_str()), nullptr); return OK; } @@ -1558,15 +1556,16 @@ int host::run_scheduled_check(int check_options, double latency) { bool time_is_valid = true; engine_logger(dbg_functions, basic) << "run_scheduled_host_check_3x()"; - log_v2::functions()->trace("run_scheduled_host_check_3x()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "run_scheduled_host_check_3x()"); engine_logger(dbg_checks, basic) - << "Attempting to run scheduled check of host '" << get_name() + << "Attempting to run scheduled check of host '" << name() << "': check options=" << check_options << ", latency=" << latency; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Attempting to run scheduled check of host '{}': check options={}, " "latency={}", - get_name(), check_options, latency); + name(), check_options, latency); /* attempt to run the check */ result = run_async_check(check_options, latency, true, true, &time_is_valid, @@ -1576,7 +1575,8 @@ int host::run_scheduled_check(int check_options, double latency) { if (result == ERROR) { engine_logger(dbg_checks, more) << "Unable to run scheduled host check at this time"; - log_v2::checks()->debug("Unable to run scheduled host check at this time"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Unable to run scheduled host check at this time"); /* only attempt to (re)schedule checks that should get checked... */ if (get_should_be_scheduled()) { @@ -1607,19 +1607,21 @@ int host::run_scheduled_check(int check_options, double latency) { set_next_check((time_t)(next_valid_time + 60 * 60 * 24 * 7)); engine_logger(log_runtime_warning, basic) - << "Warning: Check of host '" << get_name() + << "Warning: Check of host '" << name() << "' could not be " "rescheduled properly. Scheduling check for next week... " << " next_check " << get_next_check(); - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: Check of host '{}' could not be rescheduled properly. " "Scheduling check for next week... next_check {}", - get_name(), get_next_check()); + name(), get_next_check()); engine_logger(dbg_checks, more) << "Unable to find any valid times to reschedule the next" " host check!"; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Unable to find any valid times to reschedule the next host " "check!"); } @@ -1630,8 +1632,9 @@ int host::run_scheduled_check(int check_options, double latency) { engine_logger(dbg_checks, more) << "Rescheduled next host check for " << my_ctime(&next_valid_time); - log_v2::checks()->debug("Rescheduled next host check for {}", - my_ctime(&next_valid_time)); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Rescheduled next host check for {}", + my_ctime(&next_valid_time)); } } @@ -1662,25 +1665,27 @@ int host::run_async_check(int check_options, << "host::run_async_check, check_options=" << check_options << ", latency=" << latency << ", scheduled_check=" << scheduled_check << ", reschedule_check=" << reschedule_check; - log_v2::functions()->trace( - "host::run_async_check, check_options={}, latency={}, " - "scheduled_check={}, reschedule_check={}", - check_options, latency, scheduled_check, reschedule_check); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "host::run_async_check, check_options={}, latency={}, " + "scheduled_check={}, reschedule_check={}", + check_options, latency, scheduled_check, + reschedule_check); // Preamble. if (!get_check_command_ptr()) { engine_logger(log_runtime_error, basic) - << "Error: Attempt to run active check on host '" << get_name() + << "Error: Attempt to run active check on host '" << name() << "' with no check command"; log_v2::runtime()->error( "Error: Attempt to run active check on host '{}' with no check command", - get_name()); + name()); return ERROR; } engine_logger(dbg_checks, basic) - << "** Running async check of host '" << get_name() << "'..."; - log_v2::checks()->trace("** Running async check of host '{}'...", get_name()); + << "** Running async check of host '" << name() << "'..."; + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "** Running async check of host '{}'...", name()); // Check if the host is viable now. if (!verify_check_viability(check_options, time_is_valid, preferred_time)) @@ -1696,12 +1701,13 @@ int host::run_async_check(int check_options, // Don't execute a new host check if one is already running. if (get_is_executing() && !(check_options & CHECK_OPTION_FORCE_EXECUTION)) { engine_logger(dbg_checks, basic) - << "A check of this host (" << get_name() + << "A check of this host (" << name() << ") is already being executed, so we'll pass for the moment..."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "A check of this host ({}) is already being executed, so we'll pass " "for the moment...", - get_name()); + name()); return OK; } @@ -1710,37 +1716,36 @@ int host::run_async_check(int check_options, timeval end_time; memset(&start_time, 0, sizeof(start_time)); memset(&end_time, 0, sizeof(end_time)); - int res = broker_host_check(NEBTYPE_HOSTCHECK_ASYNC_PRECHECK, NEBFLAG_NONE, - NEBATTR_NONE, this, checkable::check_active, - get_current_state(), get_state_type(), start_time, - end_time, check_command().c_str(), get_latency(), - 0.0, config->host_check_timeout(), false, 0, - nullptr, nullptr, nullptr, nullptr, nullptr); + int res = broker_host_check( + NEBTYPE_HOSTCHECK_ASYNC_PRECHECK, NEBFLAG_NONE, NEBATTR_NONE, this, + checkable::check_active, get_current_state(), get_state_type(), + start_time, end_time, get_latency(), 0.0, config->host_check_timeout(), + false, 0, nullptr, nullptr, nullptr, nullptr, nullptr); // Host check was cancel by NEB module. Reschedule check later. if (NEBERROR_CALLBACKCANCEL == res) { engine_logger(log_runtime_error, basic) - << "Error: Some broker module cancelled check of host '" << get_name() + << "Error: Some broker module cancelled check of host '" << name() << "'"; log_v2::runtime()->error( - "Error: Some broker module cancelled check of host '{}'", get_name()); + "Error: Some broker module cancelled check of host '{}'", name()); return ERROR; } // Host check was overriden by NEB module. else if (NEBERROR_CALLBACKOVERRIDE == res) { engine_logger(dbg_functions, basic) - << "Some broker module overrode check of host '" << get_name() + << "Some broker module overrode check of host '" << name() << "' so we'll bail out"; - log_v2::functions()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::functions(), "Some broker module overrode check of host '{}' so we'll bail out", - get_name()); + name()); return OK; } // Checking starts. - engine_logger(dbg_functions, basic) - << "Checking host '" << get_name() << "'..."; - log_v2::functions()->trace("Checking host '{}'...", get_name()); + engine_logger(dbg_functions, basic) << "Checking host '" << name() << "'..."; + SPDLOG_LOGGER_TRACE(log_v2::functions(), "Checking host '{}'...", name()); // Clear check options. if (scheduled_check) @@ -1775,14 +1780,13 @@ int host::run_async_check(int check_options, set_is_executing(true); // Get command object. - commands::command* cmd = get_check_command_ptr(); + commands::command* cmd = get_check_command_ptr().get(); std::string processed_cmd(cmd->process_cmd(macros)); // Send event broker. broker_host_check(NEBTYPE_HOSTCHECK_INITIATE, NEBFLAG_NONE, NEBATTR_NONE, this, checkable::check_active, get_current_state(), - get_state_type(), start_time, end_time, - check_command().c_str(), get_latency(), 0.0, + get_state_type(), start_time, end_time, get_latency(), 0.0, config->host_check_timeout(), false, 0, processed_cmd.c_str(), nullptr, nullptr, nullptr, nullptr); @@ -1797,22 +1801,19 @@ int host::run_async_check(int check_options, // Run command. bool retry; - std::unique_ptr check_result_info; + check_result::pointer check_result_info; do { // Init check result info. - check_result_info.reset( - new check_result(host_check, this, checkable::check_active, - check_options, reschedule_check, latency, start_time, - start_time, false, true, service::state_ok, "")); + check_result_info = std::make_shared( + host_check, this, checkable::check_active, check_options, + reschedule_check, latency, start_time, start_time, false, true, + service::state_ok, ""); retry = false; try { // Run command. - uint64_t id = - cmd->run(processed_cmd, *macros, config->host_check_timeout()); - if (id != 0) - checks::checker::instance().add_check_result( - id, check_result_info.release()); + uint64_t id = cmd->run(processed_cmd, *macros, + config->host_check_timeout(), check_result_info); } catch (com::centreon::exceptions::interruption const& e) { retry = true; } catch (std::exception const& e) { @@ -1826,13 +1827,13 @@ int host::run_async_check(int check_options, check_result_info->set_output("(Execute command failed)"); // Queue check result. - checks::checker::instance().add_check_result_to_reap( - check_result_info.release()); + checks::checker::instance().add_check_result_to_reap(check_result_info); engine_logger(log_runtime_warning, basic) << "Error: Host check command execution failed: " << e.what(); - log_v2::runtime()->warn("Error: Host check command execution failed: {}", - e.what()); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Error: Host check command execution failed: {}", + e.what()); } } while (retry); @@ -1850,29 +1851,30 @@ int host::run_async_check(int check_options, * @return a boolean telling if yes or not the host status is sent to broker. */ bool host::schedule_check(time_t check_time, - int options, + uint32_t options, bool no_update_status_now) { timed_event* temp_event = nullptr; int use_original_event = true; engine_logger(dbg_functions, basic) << "schedule_host_check()"; - log_v2::functions()->trace("schedule_host_check()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "schedule_host_check()"); engine_logger(dbg_checks, basic) << "Scheduling a " << (options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced") - << ", active check of host '" << get_name() << "' @ " + << ", active check of host '" << name() << "' @ " << my_ctime(&check_time); - log_v2::checks()->trace( - "Scheduling a {}, active check of host '{}' @ {}", - options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced", - get_name(), my_ctime(&check_time)); + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Scheduling a {}, active check of host '{}' @ {}", + options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced", name(), + my_ctime(&check_time)); /* don't schedule a check if active checks of this host are disabled */ if (!active_checks_enabled() && !(options & CHECK_OPTION_FORCE_EXECUTION)) { engine_logger(dbg_checks, basic) << "Active checks are disabled for this host."; - log_v2::checks()->trace("Active checks are disabled for this host."); + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "Active checks are disabled for this host."); return false; } @@ -1896,8 +1898,9 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "Found another host check event for this host @ " << my_ctime(&temp_event->run_time); - log_v2::checks()->debug("Found another host check event for this host @ {}", - my_ctime(&temp_event->run_time)); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Found another host check event for this host @ {}", + my_ctime(&temp_event->run_time)); /* use the originally scheduled check unless we decide otherwise */ use_original_event = true; @@ -1910,7 +1913,8 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New host check event is forced and occurs before the " "existing event, so the new event be used instead."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New host check event is forced and occurs before the " "existing event, so the new event be used instead."); use_original_event = false; @@ -1925,7 +1929,8 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New host check event is forced, so it will be used " "instead of the existing event."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New host check event is forced, so it will be used " "instead of the existing event."); } @@ -1937,7 +1942,8 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New host check event occurs before the existing (older) " "event, so it will be used instead."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New host check event occurs before the existing (older) " "event, so it will be used instead."); } @@ -1947,7 +1953,8 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New host check event occurs after the existing event, " "so we'll ignore it."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New host check event occurs after the existing event, " "so we'll ignore it."); } @@ -1963,7 +1970,7 @@ bool host::schedule_check(time_t check_time, /* use the new event */ if (!use_original_event) { engine_logger(dbg_checks, most) << "Scheduling new host check event."; - log_v2::checks()->debug("Scheduling new host check event."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Scheduling new host check event."); /* set the next host check time */ set_next_check(check_time); @@ -1981,7 +1988,8 @@ bool host::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "Keeping original host check event (ignoring the new one)."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Keeping original host check event at {:%Y-%m-%dT%H:%M:%S} (ignoring " "the new one at {:%Y-%m-%dT%H:%M:%S}).", fmt::localtime(get_next_check()), fmt::localtime(check_time)); @@ -2014,11 +2022,12 @@ void host::check_for_flapping(bool update, double high_curve_value = 1.25; engine_logger(dbg_functions, basic) << "host::check_for_flapping()"; - log_v2::functions()->trace("host::check_for_flapping()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "host::check_for_flapping()"); engine_logger(dbg_flapping, more) - << "Checking host '" << get_name() << "' for flapping..."; - log_v2::checks()->debug("Checking host '{}' for flapping...", get_name()); + << "Checking host '" << name() << "' for flapping..."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Checking host '{}' for flapping...", + name()); time(¤t_time); @@ -2107,9 +2116,9 @@ void host::check_for_flapping(bool update, << com::centreon::logging::setprecision(2) << "LFT=" << low_threshold << ", HFT=" << high_threshold << ", CPC=" << curved_percent_change << ", PSC=" << curved_percent_change << "%"; - log_v2::checks()->debug("LFT={:.2f}, HFT={}, CPC={}, PSC={}%", low_threshold, - high_threshold, curved_percent_change, - curved_percent_change); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "LFT={:.2f}, HFT={}, CPC={}, PSC={}%", + low_threshold, high_threshold, curved_percent_change, + curved_percent_change); /* don't do anything if we don't have flap detection enabled on a program-wide * basis */ @@ -2138,8 +2147,8 @@ void host::check_for_flapping(bool update, engine_logger(dbg_flapping, more) << "Host " << (is_flapping ? "is" : "is not") << " flapping (" << curved_percent_change << "% state change)."; - log_v2::checks()->debug("Host {} flapping ({}% state change).", - is_flapping ? "is" : "is not", curved_percent_change); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host {} flapping ({}% state change).", + is_flapping ? "is" : "is not", curved_percent_change); /* did the host just start flapping? */ if (is_flapping && !get_is_flapping()) @@ -2156,22 +2165,23 @@ void host::set_flap(double percent_change, double low_threshold, bool allow_flapstart_notification) { engine_logger(dbg_functions, basic) << "set_host_flap()"; - log_v2::functions()->trace("set_host_flap()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "set_host_flap()"); engine_logger(dbg_flapping, more) - << "Host '" << get_name() << "' started flapping!"; - log_v2::checks()->debug("Host '{}' started flapping!", get_name()); + << "Host '" << name() << "' started flapping!"; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host '{}' started flapping!", name()); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_runtime_warning, basic) << com::centreon::logging::setprecision(1) - << "HOST FLAPPING ALERT: " << get_name() + << "HOST FLAPPING ALERT: " << name() << ";STARTED; Host appears to have started flapping (" << percent_change << "% change > " << high_threshold << "% threshold)"; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "HOST FLAPPING ALERT: {};STARTED; Host appears to have started flapping " "({:.1f}% change > {:.1f}% threshold)", - get_name(), percent_change, high_threshold); + name(), percent_change, high_threshold); /* add a non-persistent comment to the host */ std::ostringstream oss; @@ -2184,10 +2194,10 @@ void host::set_flap(double percent_change, << "% threshold). When the host state stabilizes and the " << "flapping stops, notifications will be re-enabled."; - std::shared_ptr com{ - new comment(comment::host, comment::flapping, get_host_id(), 0, - time(nullptr), "(Centreon Engine Process)", oss.str(), false, - comment::internal, false, (time_t)0)}; + auto com = std::make_shared( + comment::host, comment::flapping, get_host_id(), 0, time(nullptr), + "(Centreon Engine Process)", oss.str(), false, comment::internal, false, + (time_t)0); comment::comments.insert({com->get_comment_id(), com}); @@ -2212,22 +2222,23 @@ void host::clear_flap(double percent_change, double high_threshold, double low_threshold) { engine_logger(dbg_functions, basic) << "host::clear_flap()"; - log_v2::functions()->trace("host::clear_flap()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "host::clear_flap()"); engine_logger(dbg_flapping, basic) - << "Host '" << get_name() << "' stopped flapping."; - log_v2::checks()->debug("Host '{}' stopped flapping.", get_name()); + << "Host '" << name() << "' stopped flapping."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host '{}' stopped flapping.", name()); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_info_message, basic) << com::centreon::logging::setprecision(1) - << "HOST FLAPPING ALERT: " << get_name() + << "HOST FLAPPING ALERT: " << name() << ";STOPPED; Host appears to have stopped flapping (" << percent_change << "% change < " << low_threshold << "% threshold)"; - log_v2::events()->info( + SPDLOG_LOGGER_INFO( + log_v2::events(), "HOST FLAPPING ALERT: {};STOPPED; Host appears to have stopped flapping " "({:.1f}% change < {:.1f}% threshold)", - get_name(), percent_change, low_threshold); + name(), percent_change, low_threshold); /* delete the comment we added earlier */ if (get_flapping_comment_id() != 0) @@ -2267,9 +2278,9 @@ void host::check_for_expired_acknowledgement() { time_t now = time(nullptr); if (last_acknowledgement() + acknowledgement_timeout() >= now) { engine_logger(log_info_message, basic) - << "Acknowledgement of host '" << get_name() << "' just expired"; - log_v2::events()->info("Acknowledgement of host '{}' just expired", - get_name()); + << "Acknowledgement of host '" << name() << "' just expired"; + SPDLOG_LOGGER_INFO(log_v2::events(), + "Acknowledgement of host '{}' just expired", name()); set_problem_has_been_acknowledged(false); set_acknowledgement_type(ACKNOWLEDGEMENT_NONE); // FIXME DBO: could be improved with something smaller. @@ -2286,7 +2297,7 @@ int host::handle_state() { time_t current_time; engine_logger(dbg_functions, basic) << "handle_host_state()"; - log_v2::functions()->trace("handle_host_state()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "handle_host_state()"); /* get current time */ time(¤t_time); @@ -2451,7 +2462,7 @@ bool host::verify_check_viability(int check_options, int check_interval = 0; engine_logger(dbg_functions, basic) << "check_host_check_viability_3x()"; - log_v2::functions()->trace("check_host_check_viability_3x()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_host_check_viability_3x()"); /* get the check interval to use if we need to reschedule the check */ if (this->get_state_type() == soft && @@ -2516,8 +2527,8 @@ void host::grab_macros_r(nagios_macros* mac) { int host::notify_contact(nagios_macros* mac, contact* cntct, notifier::reason_type type, - std::string const& not_author, - std::string const& not_data, + const std::string& not_author, + const std::string& not_data, int options __attribute((unused)), int escalated) { std::string raw_command; @@ -2530,7 +2541,7 @@ int host::notify_contact(nagios_macros* mac, int neb_result; engine_logger(dbg_functions, basic) << "notify_contact_of_host()"; - log_v2::functions()->trace("notify_contact_of_host()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "notify_contact_of_host()"); engine_logger(dbg_notifications, most) << "** Notifying contact '" << cntct->get_name() << "'"; log_v2::notifications()->info("** Notifying contact '{}'", cntct->get_name()); @@ -2562,15 +2573,15 @@ int host::notify_contact(nagios_macros* mac, neb_result = broker_contact_notification_method_data( NEBTYPE_CONTACTNOTIFICATIONMETHOD_START, NEBFLAG_NONE, NEBATTR_NONE, host_notification, type, method_start_time, method_end_time, - (void*)this, cntct, cmd->get_command_line().c_str(), not_author.c_str(), - not_data.c_str(), escalated, nullptr); + (void*)this, cntct, not_author.c_str(), not_data.c_str(), escalated, + nullptr); if (NEBERROR_CALLBACKCANCEL == neb_result) break; else if (NEBERROR_CALLBACKOVERRIDE == neb_result) continue; /* get the raw command line */ - get_raw_command_line_r(mac, cmd.get(), cmd->get_command_line().c_str(), + get_raw_command_line_r(mac, cmd, cmd->get_command_line().c_str(), raw_command, macro_options); if (raw_command.empty()) continue; @@ -2618,11 +2629,11 @@ int host::notify_contact(nagios_macros* mac, .append(")"); engine_logger(log_host_notification, basic) - << "HOST NOTIFICATION: " << cntct->get_name() << ';' - << this->get_name() << ';' << host_notification_state << ";" - << cmd->get_name() << ';' << this->get_plugin_output() << info; + << "HOST NOTIFICATION: " << cntct->get_name() << ';' << this->name() + << ';' << host_notification_state << ";" << cmd->get_name() << ';' + << this->get_plugin_output() << info; log_v2::notifications()->info("HOST NOTIFICATION: {};{};{};{};{};{}", - cntct->get_name(), this->get_name(), + cntct->get_name(), this->name(), host_notification_state, cmd->get_name(), this->get_plugin_output(), info); } @@ -2661,8 +2672,8 @@ int host::notify_contact(nagios_macros* mac, broker_contact_notification_method_data( NEBTYPE_CONTACTNOTIFICATIONMETHOD_END, NEBFLAG_NONE, NEBATTR_NONE, host_notification, type, method_start_time, method_end_time, - (void*)this, cntct, cmd->get_command_line().c_str(), not_author.c_str(), - not_data.c_str(), escalated, nullptr); + (void*)this, cntct, not_author.c_str(), not_data.c_str(), escalated, + nullptr); } /* get end time */ @@ -2693,12 +2704,11 @@ void host::disable_flap_detection() { unsigned long attr = MODATTR_FLAP_DETECTION_ENABLED; engine_logger(dbg_functions, basic) << "disable_host_flap_detection()"; - log_v2::functions()->trace("disable_host_flap_detection()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "disable_host_flap_detection()"); engine_logger(dbg_functions, more) - << "Disabling flap detection for host '" << get_name() << "'."; - log_v2::functions()->debug("Disabling flap detection for host '{}'.", - get_name()); + << "Disabling flap detection for host '" << name() << "'."; + log_v2::functions()->debug("Disabling flap detection for host '{}'.", name()); /* nothing to do... */ if (!flap_detection_enabled()) @@ -2724,11 +2734,12 @@ void host::enable_flap_detection() { unsigned long attr = MODATTR_FLAP_DETECTION_ENABLED; engine_logger(dbg_functions, basic) << "host::enable_flap_detection()"; - log_v2::functions()->trace("host::enable_flap_detection()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "host::enable_flap_detection()"); engine_logger(dbg_flapping, more) - << "Enabling flap detection for host '" << get_name() << "'."; - log_v2::checks()->debug("Enabling flap detection for host '{}'.", get_name()); + << "Enabling flap detection for host '" << name() << "'."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Enabling flap detection for host '{}'.", name()); /* nothing to do... */ if (flap_detection_enabled()) @@ -2763,7 +2774,8 @@ bool host::is_valid_escalation_for_notification(escalation const* e, engine_logger(dbg_functions, basic) << "host::is_valid_escalation_for_notification()"; - log_v2::functions()->trace("host::is_valid_escalation_for_notification()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "host::is_valid_escalation_for_notification()"); /* get the current time */ time(¤t_time); @@ -2829,8 +2841,9 @@ bool host::is_result_fresh(time_t current_time, int log_this) { int tseconds = 0; engine_logger(dbg_checks, most) - << "Checking freshness of host '" << _name << "'..."; - log_v2::checks()->debug("Checking freshness of host '{}'...", _name); + << "Checking freshness of host '" << name() << "'..."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Checking freshness of host '{}'...", + name()); /* use user-supplied freshness threshold or auto-calculate a freshness * threshold to use? */ @@ -2849,8 +2862,8 @@ bool host::is_result_fresh(time_t current_time, int log_this) { engine_logger(dbg_checks, most) << "Freshness thresholds: host=" << get_freshness_threshold() << ", use=" << freshness_threshold; - log_v2::checks()->debug("Freshness thresholds: host={}, use={}", - get_freshness_threshold(), freshness_threshold); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Freshness thresholds: host={}, use={}", + get_freshness_threshold(), freshness_threshold); /* calculate expiration time */ /* CHANGED 11/10/05 EG - program start is only used in expiration time @@ -2876,9 +2889,10 @@ bool host::is_result_fresh(time_t current_time, int log_this) { << "HBC: " << has_been_checked() << ", PS: " << program_start << ", ES: " << event_start << ", LC: " << get_last_check() << ", CT: " << current_time << ", ET: " << expiration_time; - log_v2::checks()->debug("HBC: {}, PS: {}, ES: {}, LC: {}, CT: {}, ET: {}", - has_been_checked(), program_start, event_start, - get_last_check(), current_time, expiration_time); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "HBC: {}, PS: {}, ES: {}, LC: {}, CT: {}, ET: {}", + has_been_checked(), program_start, event_start, + get_last_check(), current_time, expiration_time); /* the results for the last check of this host are stale */ if (expiration_time < current_time) { @@ -2890,39 +2904,41 @@ bool host::is_result_fresh(time_t current_time, int log_this) { /* log a warning */ if (log_this) engine_logger(log_runtime_warning, basic) - << "Warning: The results of host '" << _name << "' are stale by " + << "Warning: The results of host '" << name() << "' are stale by " << days << "d " << hours << "h " << minutes << "m " << seconds << "s (threshold=" << tdays << "d " << thours << "h " << tminutes << "m " << tseconds << "s). I'm forcing an immediate check of" " the host."; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: The results of host '{}' are stale by {}d {}h {}m {}s " "(threshold={}d {}h {}m {}s). I'm forcing an immediate check of the " "host.", - _name, days, hours, minutes, seconds, tdays, thours, tminutes, + name(), days, hours, minutes, seconds, tdays, thours, tminutes, tseconds); engine_logger(dbg_checks, more) - << "Check results for host '" << _name << "' are stale by " << days + << "Check results for host '" << name() << "' are stale by " << days << "d " << hours << "h " << minutes << "m " << seconds << "s (threshold=" << tdays << "d " << thours << "h " << tminutes << "m " << tseconds << "s). " "Forcing an immediate check of the host..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Check results for host '{}' are stale by {}d {}h {}m {}s " "(threshold={}d {}h {}m {}s). Forcing an immediate check of the " "host...", - _name, days, hours, minutes, seconds, tdays, thours, tminutes, + name(), days, hours, minutes, seconds, tdays, thours, tminutes, tseconds); return false; } else engine_logger(dbg_checks, more) - << "Check results for host '" << this->get_name() << "' are fresh."; - log_v2::checks()->debug("Check results for host '{}' are fresh.", - this->get_name()); + << "Check results for host '" << this->name() << "' are fresh."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check results for host '{}' are fresh.", this->name()); return true; } @@ -2932,7 +2948,8 @@ bool host::is_result_fresh(time_t current_time, int log_this) { void host::handle_flap_detection_disabled() { engine_logger(dbg_functions, basic) << "handle_host_flap_detection_disabled()"; - log_v2::functions()->trace("handle_host_flap_detection_disabled()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "handle_host_flap_detection_disabled()"); /* if the host was flapping, remove the flapping indicator */ if (get_is_flapping()) { this->set_is_flapping(false); @@ -2944,11 +2961,12 @@ void host::handle_flap_detection_disabled() { /* log a notice - this one is parsed by the history CGI */ engine_logger(log_info_message, basic) - << "HOST FLAPPING ALERT: " << this->get_name() + << "HOST FLAPPING ALERT: " << this->name() << ";DISABLED; Flap detection has been disabled"; - log_v2::events()->info( + SPDLOG_LOGGER_INFO( + log_v2::events(), "HOST FLAPPING ALERT: {};DISABLED; Flap detection has been disabled", - this->get_name()); + this->name()); /* send data to event broker */ broker_flapping_data(NEBTYPE_FLAPPING_STOP, NEBFLAG_NONE, @@ -2971,7 +2989,7 @@ int host::perform_on_demand_check(enum host::host_state* check_return_code, int use_cached_result, unsigned long check_timestamp_horizon) { engine_logger(dbg_functions, basic) << "perform_on_demand_host_check()"; - log_v2::functions()->trace("perform_on_demand_host_check()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "perform_on_demand_host_check()"); perform_on_demand_check_3x(check_return_code, check_options, use_cached_result, check_timestamp_horizon); @@ -2986,11 +3004,12 @@ int host::perform_on_demand_check_3x(host::host_state* check_result_code, int result = OK; engine_logger(dbg_functions, basic) << "perform_on_demand_host_check_3x()"; - log_v2::functions()->trace("perform_on_demand_host_check_3x()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "perform_on_demand_host_check_3x()"); engine_logger(dbg_checks, basic) - << "** On-demand check for host '" << _name << "'..."; - log_v2::checks()->trace("** On-demand check for host '{}'...", _name); + << "** On-demand check for host '" << name() << "'..."; + SPDLOG_LOGGER_TRACE(log_v2::checks(), "** On-demand check for host '{}'...", + name()); /* check the status of the host */ result = this->run_sync_check_3x(check_result_code, check_options, @@ -3009,7 +3028,8 @@ int host::run_sync_check_3x(enum host::host_state* check_result_code, << ", check_options=" << check_options << ", use_cached_result=" << use_cached_result << ", check_timestamp_horizon=" << check_timestamp_horizon; - log_v2::functions()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::functions(), "run_sync_host_check_3x: hst={}, check_options={}, use_cached_result={}, " "check_timestamp_horizon={}", (void*)this, check_options, use_cached_result, check_timestamp_horizon); @@ -3028,7 +3048,7 @@ int host::run_sync_check_3x(enum host::host_state* check_result_code, /* processes the result of a synchronous or asynchronous host check */ int host::process_check_result_3x(enum host::host_state new_state, - std::string const& old_plugin_output, + const std::string& old_plugin_output, int check_options, int reschedule_check, int use_cached_result, @@ -3046,18 +3066,19 @@ int host::process_check_result_3x(enum host::host_state new_state, bool has_parent; engine_logger(dbg_functions, basic) << "process_host_check_result_3x()"; - log_v2::functions()->trace("process_host_check_result_3x()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "process_host_check_result_3x()"); engine_logger(dbg_checks, more) - << "HOST: " << _name << ", ATTEMPT=" << get_current_attempt() << "/" + << "HOST: " << name() << ", ATTEMPT=" << get_current_attempt() << "/" << max_check_attempts() << ", CHECK TYPE=" << (get_check_type() == check_active ? "ACTIVE" : "PASSIVE") << ", STATE TYPE=" << (get_state_type() == hard ? "HARD" : "SOFT") << ", OLD STATE=" << get_current_state() << ", NEW STATE=" << new_state; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "HOST: {}, ATTEMPT={}/{}, CHECK TYPE={}, STATE TYPE={}, OLD STATE={}, " "NEW STATE={}", - _name, get_current_attempt(), max_check_attempts(), + name(), get_current_attempt(), max_check_attempts(), get_check_type() == check_active ? "ACTIVE" : "PASSIVE", get_state_type() == hard ? "HARD" : "SOFT", get_current_state(), new_state); @@ -3072,16 +3093,16 @@ int host::process_check_result_3x(enum host::host_state new_state, if (get_check_type() == check_passive) { if (config->log_passive_checks()) engine_logger(log_passive_check, basic) - << "PASSIVE HOST CHECK: " << _name << ";" << new_state << ";" + << "PASSIVE HOST CHECK: " << name() << ";" << new_state << ";" << get_plugin_output(); - log_v2::checks()->debug("PASSIVE HOST CHECK: {};{};{}", _name, new_state, - get_plugin_output()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "PASSIVE HOST CHECK: {};{};{}", + name(), new_state, get_plugin_output()); } /******* HOST WAS DOWN/UNREACHABLE INITIALLY *******/ if (_current_state != host::state_up) { engine_logger(dbg_checks, more) << "Host was DOWN/UNREACHABLE."; - log_v2::checks()->debug("Host was DOWN/UNREACHABLE."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host was DOWN/UNREACHABLE."); /***** HOST IS NOW UP *****/ /* the host just recovered! */ @@ -3101,8 +3122,9 @@ int host::process_check_result_3x(enum host::host_state new_state, << "Host experienced a " << (get_state_type() == hard ? "HARD" : "SOFT") << " recovery (it's now UP)."; - log_v2::checks()->debug("Host experienced a {} recovery (it's now UP).", - get_state_type() == hard ? "HARD" : "SOFT"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host experienced a {} recovery (it's now UP).", + get_state_type() == hard ? "HARD" : "SOFT"); /* reschedule the next check of the host at the normal interval */ reschedule_check = true; @@ -3112,7 +3134,8 @@ int host::process_check_result_3x(enum host::host_state new_state, * somewhere and we should catch the recovery as soon as possible */ engine_logger(dbg_checks, more) << "Propagating checks to parent host(s)..."; - log_v2::checks()->debug("Propagating checks to parent host(s)..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Propagating checks to parent host(s)..."); for (host_map_unsafe::iterator it{parent_hosts.begin()}, end{parent_hosts.end()}; @@ -3122,8 +3145,8 @@ int host::process_check_result_3x(enum host::host_state new_state, if (it->second->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) << "Check of parent host '" << it->first << "' queued."; - log_v2::checks()->debug("Check of parent host '{}' queued.", - it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check of parent host '{}' queued.", it->first); check_hostlist.push_back(it->second); } } @@ -3133,7 +3156,8 @@ int host::process_check_result_3x(enum host::host_state new_state, * result of this recovery) switch to UP or DOWN states */ engine_logger(dbg_checks, more) << "Propagating checks to child host(s)..."; - log_v2::checks()->debug("Propagating checks to child host(s)..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Propagating checks to child host(s)..."); for (host_map_unsafe::iterator it{child_hosts.begin()}, end{child_hosts.end()}; @@ -3143,8 +3167,8 @@ int host::process_check_result_3x(enum host::host_state new_state, if (it->second->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) << "Check of child host '" << it->first << "' queued."; - log_v2::checks()->debug("Check of child host '{}' queued.", - it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check of child host '{}' queued.", it->first); check_hostlist.push_back(it->second); } } @@ -3154,7 +3178,7 @@ int host::process_check_result_3x(enum host::host_state new_state, /* we're still in a problem state... */ else { engine_logger(dbg_checks, more) << "Host is still DOWN/UNREACHABLE."; - log_v2::checks()->debug("Host is still DOWN/UNREACHABLE."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host is still DOWN/UNREACHABLE."); /* set the state type */ /* we've maxed out on the retries */ @@ -3186,13 +3210,13 @@ int host::process_check_result_3x(enum host::host_state new_state, /******* HOST WAS UP INITIALLY *******/ else { engine_logger(dbg_checks, more) << "Host was UP."; - log_v2::checks()->debug("Host was UP."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host was UP."); /***** HOST IS STILL UP *****/ /* either the host never went down since last check */ if (new_state == host::state_up) { engine_logger(dbg_checks, more) << "Host is still UP."; - log_v2::checks()->debug("Host is still UP."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host is still UP."); /* set the current state */ _current_state = host::state_up; @@ -3204,12 +3228,12 @@ int host::process_check_result_3x(enum host::host_state new_state, /***** HOST IS NOW DOWN/UNREACHABLE *****/ else { engine_logger(dbg_checks, more) << "Host is now DOWN/UNREACHABLE."; - log_v2::checks()->debug("Host is now DOWN/UNREACHABLE."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Host is now DOWN/UNREACHABLE."); /***** SPECIAL CASE FOR HOSTS WITH MAX_ATTEMPTS==1 *****/ if (max_check_attempts() == 1) { engine_logger(dbg_checks, more) << "Max attempts = 1!."; - log_v2::checks()->debug("Max attempts = 1!."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Max attempts = 1!."); /* set the state type */ set_state_type(hard); @@ -3231,7 +3255,8 @@ int host::process_check_result_3x(enum host::host_state new_state, engine_logger(dbg_checks, more) << "** WARNING: Max attempts = 1, so we have to run serial " "checks of all parent hosts!"; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "** WARNING: Max attempts = 1, so we have to run serial " "checks of all parent hosts!"); @@ -3245,8 +3270,9 @@ int host::process_check_result_3x(enum host::host_state new_state, engine_logger(dbg_checks, more) << "Running serial check parent host '" << it->first << "'..."; - log_v2::checks()->debug("Running serial check parent host '{}'...", - it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Running serial check parent host '{}'...", + it->first); /* run an immediate check of the parent host */ it->second->run_sync_check_3x(&parent_state, check_options, @@ -3257,8 +3283,8 @@ int host::process_check_result_3x(enum host::host_state new_state, if (parent_state == host::state_up) { engine_logger(dbg_checks, more) << "Parent host is UP, so this one is DOWN."; - log_v2::checks()->debug( - "Parent host is UP, so this one is DOWN."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Parent host is UP, so this one is DOWN."); /* set the current state */ _current_state = host::state_down; @@ -3271,13 +3297,15 @@ int host::process_check_result_3x(enum host::host_state new_state, if (parent_hosts.empty()) { engine_logger(dbg_checks, more) << "Host has no parents, so it's DOWN."; - log_v2::checks()->debug("Host has no parents, so it's DOWN."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host has no parents, so it's DOWN."); _current_state = host::state_down; } else { /* no parents were up, so this host is UNREACHABLE */ engine_logger(dbg_checks, more) << "No parents were UP, so this host is UNREACHABLE."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "No parents were UP, so this host is UNREACHABLE."); _current_state = host::state_unreachable; } @@ -3295,7 +3323,8 @@ int host::process_check_result_3x(enum host::host_state new_state, /* we do this because we may now be blocking the route to child hosts */ engine_logger(dbg_checks, more) << "Propagating check to immediate non-UNREACHABLE child hosts..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Propagating check to immediate non-UNREACHABLE child hosts..."); for (host_map_unsafe::iterator it{child_hosts.begin()}, @@ -3306,8 +3335,8 @@ int host::process_check_result_3x(enum host::host_state new_state, if (it->second->get_current_state() != host::state_unreachable) { engine_logger(dbg_checks, more) << "Check of child host '" << it->first << "' queued."; - log_v2::checks()->debug("Check of child host '{}' queued.", - it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check of child host '{}' queued.", it->first); check_hostlist.push_back(it->second); } } @@ -3339,9 +3368,9 @@ int host::process_check_result_3x(enum host::host_state new_state, engine_logger(dbg_checks, more) << "Propagating checks to immediate parent hosts that " "are UP..."; - log_v2::checks()->debug( - "Propagating checks to immediate parent hosts that " - "are UP..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Propagating checks to immediate parent hosts that " + "are UP..."); for (host_map_unsafe::iterator it{parent_hosts.begin()}, end{parent_hosts.end()}; @@ -3352,7 +3381,8 @@ int host::process_check_result_3x(enum host::host_state new_state, check_hostlist.push_back(it->second); engine_logger(dbg_checks, more) << "Check of host '" << it->first << "' queued."; - log_v2::checks()->debug("Check of host '{}' queued.", it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Check of host '{}' queued.", + it->first); } } @@ -3361,9 +3391,9 @@ int host::process_check_result_3x(enum host::host_state new_state, engine_logger(dbg_checks, more) << "Propagating checks to immediate non-UNREACHABLE " "child hosts..."; - log_v2::checks()->debug( - "Propagating checks to immediate non-UNREACHABLE " - "child hosts..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Propagating checks to immediate non-UNREACHABLE " + "child hosts..."); for (host_map_unsafe::iterator it{child_hosts.begin()}, end{child_hosts.end()}; @@ -3373,8 +3403,8 @@ int host::process_check_result_3x(enum host::host_state new_state, if (it->second->get_current_state() != host::state_unreachable) { engine_logger(dbg_checks, more) << "Check of child host '" << it->first << "' queued."; - log_v2::checks()->debug("Check of child host '{}' queued.", - it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check of child host '{}' queued.", it->first); check_hostlist.push_back(it->second); } } @@ -3389,23 +3419,24 @@ int host::process_check_result_3x(enum host::host_state new_state, engine_logger(dbg_checks, more) << "Propagating predictive dependency checks to hosts this " "one depends on..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Propagating predictive dependency checks to hosts this " "one depends on..."); for (hostdependency_mmap::const_iterator - it{hostdependency::hostdependencies.find(_name)}, + it{hostdependency::hostdependencies.find(name())}, end{hostdependency::hostdependencies.end()}; - it != end && it->first == _name; ++it) { + it != end && it->first == name(); ++it) { hostdependency* temp_dependency(it->second.get()); if (temp_dependency->dependent_host_ptr == this && temp_dependency->master_host_ptr != nullptr) { master_host = (host*)temp_dependency->master_host_ptr; engine_logger(dbg_checks, more) - << "Check of host '" << master_host->get_name() - << "' queued."; - log_v2::checks()->debug("Check of host '{}' queued.", - master_host->get_name()); + << "Check of host '" << master_host->name() << "' queued."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check of host '{}' queued.", + master_host->name()); check_hostlist.push_back(master_host); } } @@ -3415,28 +3446,30 @@ int host::process_check_result_3x(enum host::host_state new_state, } engine_logger(dbg_checks, more) - << "Pre-handle_host_state() Host: " << _name + << "Pre-handle_host_state() Host: " << name() << ", Attempt=" << get_current_attempt() << "/" << max_check_attempts() << ", Type=" << (get_state_type() == hard ? "HARD" : "SOFT") << ", Final State=" << _current_state; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Pre-handle_host_state() Host: {}, Attempt={}/{}, Type={}, Final " "State={}", - _name, get_current_attempt(), max_check_attempts(), + name(), get_current_attempt(), max_check_attempts(), get_state_type() == hard ? "HARD" : "SOFT", _current_state); /* handle the host state */ handle_state(); engine_logger(dbg_checks, more) - << "Post-handle_host_state() Host: " << _name + << "Post-handle_host_state() Host: " << name() << ", Attempt=" << get_current_attempt() << "/" << max_check_attempts() << ", Type=" << (get_state_type() == hard ? "HARD" : "SOFT") << ", Final State=" << _current_state; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Post-handle_host_state() Host: {}, Attempt={}/{}, Type={}, Final " "State={}", - _name, get_current_attempt(), max_check_attempts(), + name(), get_current_attempt(), max_check_attempts(), get_state_type() == hard ? "HARD" : "SOFT", _current_state); /******************** POST-PROCESSING STUFF *********************/ @@ -3465,11 +3498,12 @@ int host::process_check_result_3x(enum host::host_state new_state, if (reschedule_check) { engine_logger(dbg_checks, more) << "Rescheduling next check of host at " << my_ctime(&next_check); - log_v2::checks()->debug( - "Rescheduling next check of host: {} of last check at " - "{:%Y-%m-%dT%H:%M:%S} and next " - "check at {:%Y-%m-%dT%H:%M:%S}", - _name, fmt::localtime(get_last_check()), fmt::localtime(next_check)); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Rescheduling next check of host: {} of last check at " + "{:%Y-%m-%dT%H:%M:%S} and next " + "check at {:%Y-%m-%dT%H:%M:%S}", + name(), fmt::localtime(get_last_check()), + fmt::localtime(next_check)); /* default is to reschedule host check unless a test below fails... */ set_should_be_scheduled(true); @@ -3522,16 +3556,17 @@ int host::process_check_result_3x(enum host::host_state new_state, temp_host = *it; engine_logger(dbg_checks, most) - << "ASYNC CHECK OF HOST: " << temp_host->get_name() + << "ASYNC CHECK OF HOST: " << temp_host->name() << ", CURRENTTIME: " << current_time << ", LASTHOSTCHECK: " << temp_host->get_last_check() << ", CACHEDTIMEHORIZON: " << check_timestamp_horizon << ", USECACHEDRESULT: " << use_cached_result << ", ISEXECUTING: " << temp_host->get_is_executing(); - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "ASYNC CHECK OF HOST: {}, CURRENTTIME: {}, LASTHOSTCHECK: {}, " "CACHEDTIMEHORIZON: {}, USECACHEDRESULT: {}, ISEXECUTING: {}", - temp_host->get_name(), current_time, temp_host->get_last_check(), + temp_host->name(), current_time, temp_host->get_last_check(), check_timestamp_horizon, use_cached_result, temp_host->get_is_executing()); @@ -3562,26 +3597,29 @@ enum host::host_state host::determine_host_reachability( bool is_host_present = false; engine_logger(dbg_functions, basic) << "determine_host_reachability()"; - log_v2::functions()->trace("determine_host_reachability()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "determine_host_reachability()"); - engine_logger(dbg_checks, most) << "Determining state of host '" << _name + engine_logger(dbg_checks, most) << "Determining state of host '" << name() << "': current state=" << new_state; - log_v2::checks()->debug("Determining state of host '{}': current state= {}", - _name, new_state); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Determining state of host '{}': current state= {}", + name(), new_state); /* host is UP - no translation needed */ if (new_state == host::state_up) { state = host::state_up; engine_logger(dbg_checks, most) << "Host is UP, no state translation needed."; - log_v2::checks()->debug("Host is UP, no state translation needed."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host is UP, no state translation needed."); } /* host has no parents, so it is DOWN */ else if (parent_hosts.size() == 0) { state = host::state_down; engine_logger(dbg_checks, most) << "Host has no parents, so it is DOWN."; - log_v2::checks()->debug("Host has no parents, so it is DOWN."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host has no parents, so it is DOWN."); } /* check all parent hosts to see if we're DOWN or UNREACHABLE */ @@ -3599,8 +3637,9 @@ enum host::host_state host::determine_host_reachability( state = host::state_down; engine_logger(dbg_checks, most) << "At least one parent (" << it->first << ") is up, so host is DOWN."; - log_v2::checks()->debug( - "At least one parent ({}) is up, so host is DOWN.", it->first); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "At least one parent ({}) is up, so host is DOWN.", + it->first); break; } } @@ -3609,7 +3648,8 @@ enum host::host_state host::determine_host_reachability( state = host::state_unreachable; engine_logger(dbg_checks, most) << "No parents were up, so host is UNREACHABLE."; - log_v2::checks()->debug("No parents were up, so host is UNREACHABLE."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "No parents were up, so host is UNREACHABLE."); } } @@ -3634,9 +3674,10 @@ std::list& host::get_parent_groups() { */ bool host::authorized_by_dependencies(dependency::types dependency_type) const { engine_logger(dbg_functions, basic) << "host::authorized_by_dependencies()"; - log_v2::functions()->trace("host::authorized_by_dependencies()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "host::authorized_by_dependencies()"); - auto p(hostdependency::hostdependencies.equal_range(_name)); + auto p(hostdependency::hostdependencies.equal_range(name())); for (hostdependency_mmap::const_iterator it{p.first}, end{p.second}; it != end; ++it) { hostdependency* dep{it->second.get()}; @@ -3687,16 +3728,18 @@ void host::check_result_freshness() { time_t current_time = 0L; engine_logger(dbg_functions, basic) << "check_host_result_freshness()"; - log_v2::functions()->trace("check_host_result_freshness()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_host_result_freshness()"); engine_logger(dbg_checks, most) << "Attempting to check the freshness of host check results..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Attempting to check the freshness of host check results..."); /* bail out if we're not supposed to be checking freshness */ if (!config->check_host_freshness()) { engine_logger(dbg_checks, most) << "Host freshness checking is disabled."; - log_v2::checks()->debug("Host freshness checking is disabled."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host freshness checking is disabled."); return; } @@ -3753,17 +3796,18 @@ void host::check_result_freshness() { */ void host::adjust_check_attempt(bool is_active) { engine_logger(dbg_functions, basic) << "adjust_host_check_attempt_3x()"; - log_v2::functions()->trace("adjust_host_check_attempt_3x()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "adjust_host_check_attempt_3x()"); engine_logger(dbg_checks, most) - << "Adjusting check attempt number for host '" << _name + << "Adjusting check attempt number for host '" << name() << "': current attempt=" << get_current_attempt() << "/" << max_check_attempts() << ", state=" << _current_state << ", state type=" << get_state_type(); - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Adjusting check attempt number for host '{}': current attempt= {}/{}, " "state= {}, state type= {}", - _name, get_current_attempt(), max_check_attempts(), _current_state, + name(), get_current_attempt(), max_check_attempts(), _current_state, get_state_type()); /* if host is in a hard state, reset current attempt number */ if (get_state_type() == notifier::hard) @@ -3781,8 +3825,8 @@ void host::adjust_check_attempt(bool is_active) { engine_logger(dbg_checks, most) << "New check attempt number = " << get_current_attempt(); - log_v2::checks()->debug("New check attempt number = {}", - get_current_attempt()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "New check attempt number = {}", + get_current_attempt()); } /* check for hosts that never returned from a check... */ @@ -3791,7 +3835,7 @@ void host::check_for_orphaned() { time_t expected_time = 0L; engine_logger(dbg_functions, basic) << "check_for_orphaned_hosts()"; - log_v2::functions()->trace("check_for_orphaned_hosts()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_for_orphaned_hosts()"); /* get the current time */ time(¤t_time); @@ -3810,31 +3854,32 @@ void host::check_for_orphaned() { /* determine the time at which the check results should have come in (allow * 10 minutes slack time) */ - expected_time = - (time_t)(it->second->get_next_check() + it->second->get_latency() + - config->host_check_timeout() + - config->check_reaper_interval() + 600); + expected_time = (time_t)( + it->second->get_next_check() + it->second->get_latency() + + config->host_check_timeout() + config->check_reaper_interval() + 600); /* this host was supposed to have executed a while ago, but for some reason * the results haven't come back in... */ if (expected_time < current_time) { /* log a warning */ engine_logger(log_runtime_warning, basic) - << "Warning: The check of host '" << it->second->get_name() + << "Warning: The check of host '" << it->second->name() << "' looks like it was orphaned (results never came back). " "I'm scheduling an immediate check of the host..."; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: The check of host '{}' looks like it was orphaned (results " "never came back). " "I'm scheduling an immediate check of the host...", - it->second->get_name()); + it->second->name()); engine_logger(dbg_checks, more) - << "Host '" << it->second->get_name() + << "Host '" << it->second->name() << "' was orphaned, so we're scheduling an immediate check..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Host '{}' was orphaned, so we're scheduling an immediate check...", - it->second->get_name()); + it->second->name()); /* decrement the number of running host checks */ if (currently_running_host_checks > 0) @@ -3849,7 +3894,7 @@ void host::check_for_orphaned() { } } -std::string const& host::get_current_state_as_string() const { +const std::string& host::get_current_state_as_string() const { return tab_host_states[get_current_state()].second; } @@ -3878,26 +3923,26 @@ void host::resolve(int& w, int& e) { notifier::resolve(warnings, errors); } catch (std::exception const& e) { engine_logger(log_verification_error, basic) - << "Error: Host '" << _name + << "Error: Host '" << name() << "' has problem in its notifier part: " << e.what(); log_v2::config()->error( - "Error: Host '{}' has problem in its notifier part: {}", _name, + "Error: Host '{}' has problem in its notifier part: {}", name(), e.what()); } for (service_map::iterator it_svc{service::services.begin()}, end_svc{service::services.end()}; it_svc != end_svc; ++it_svc) { - if (_name == it_svc->first.first) + if (name() == it_svc->first.first) services.insert({it_svc->first, nullptr}); } if (services.empty()) { engine_logger(log_verification_error, basic) - << "Warning: Host '" << _name + << "Warning: Host '" << name() << "' has no services associated with it!"; log_v2::config()->warn( - "Warning: Host '{}' has no services associated with it!", _name); + "Warning: Host '{}' has no services associated with it!", name()); ++w; } else { for (service_map_unsafe::iterator it{services.begin()}, end{services.end()}; @@ -3905,10 +3950,10 @@ void host::resolve(int& w, int& e) { service_map::const_iterator found{service::services.find(it->first)}; if (found == service::services.end() || !found->second) { engine_logger(log_verification_error, basic) - << "Error: Host '" << _name << "' has a service '" + << "Error: Host '" << name() << "' has a service '" << it->first.second << "' that does not exist!"; log_v2::config()->error( - "Error: Host '{}' has a service '{}' that does not exist!", _name, + "Error: Host '{}' has a service '{}' that does not exist!", name(), it->first.second); ++errors; } else { @@ -3926,9 +3971,10 @@ void host::resolve(int& w, int& e) { engine_logger(log_verification_error, basic) << "Error: '" << it->first << "' is not a " "valid parent for host '" - << _name << "'!"; + << name() << "'!"; log_v2::config()->error( - "Error: '{}' is not a valid parent for host '{}'!", it->first, _name); + "Error: '{}' is not a valid parent for host '{}'!", it->first, + name()); errors++; } else { it->second = it_host->second.get(); @@ -3938,13 +3984,13 @@ void host::resolve(int& w, int& e) { } /* check for illegal characters in host name */ - if (contains_illegal_object_chars(_name.c_str())) { + if (contains_illegal_object_chars(name().c_str())) { engine_logger(log_verification_error, basic) - << "Error: The name of host '" << _name + << "Error: The name of host '" << name() << "' contains one or more illegal characters."; log_v2::config()->error( "Error: The name of host '{}' contains one or more illegal characters.", - _name); + name()); errors++; } @@ -3968,7 +4014,7 @@ void host::resolve(int& w, int& e) { e += errors; if (errors) - throw engine_error() << "Cannot resolve host '" << _name << "'"; + throw engine_error() << "Cannot resolve host '" << name() << "'"; } timeperiod* host::get_notification_timeperiod() const { diff --git a/engine/src/hostdependency.cc b/engine/src/hostdependency.cc index 41cfafb93f0..a725803d1ef 100644 --- a/engine/src/hostdependency.cc +++ b/engine/src/hostdependency.cc @@ -148,11 +148,10 @@ std::ostream& operator<<(std::ostream& os, hostdependency const& obj) { << obj.get_contains_circular_path() << "\n" " master_host_ptr: " - << (obj.master_host_ptr ? obj.master_host_ptr->get_name() : "\"NULL\"") + << (obj.master_host_ptr ? obj.master_host_ptr->name() : "\"NULL\"") << "\n" " dependent_host_ptr: " - << (obj.dependent_host_ptr ? obj.dependent_host_ptr->get_name() - : "\"NULL\"") + << (obj.dependent_host_ptr ? obj.dependent_host_ptr->name() : "\"NULL\"") << "\n" " dependency_period_ptr: " << dependency_period_str diff --git a/engine/src/log_v2.cc b/engine/src/log_v2.cc index 4e9bc3c68d7..1660b3dc65e 100644 --- a/engine/src/log_v2.cc +++ b/engine/src/log_v2.cc @@ -103,6 +103,7 @@ void log_v2::apply(const configuration::state& config) { sinks.push_back(std::make_shared()); auto create_logger = [&sinks, log_pid = config.log_pid(), + log_file_line = config.log_file_line(), log_flush_period = config.log_flush_period()]( const std::string& name, level::level_enum lvl) { spdlog::drop(name); @@ -113,10 +114,19 @@ void log_v2::apply(const configuration::state& config) { else log->flush_on(lvl); - if (log_pid) - log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%n] [%l] [%P] %v"); - else - log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%n] [%l] %v"); + if (log_pid) { + if (log_file_line) { + log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%s:%#] [%n] [%l] [%P] %v"); + } else { + log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%n] [%l] [%P] %v"); + } + } else { + if (log_file_line) { + log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%s:%#] [%n] [%l] %v"); + } else { + log->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%n] [%l] %v"); + } + } spdlog::register_logger(log); return log; }; diff --git a/engine/src/macros.cc b/engine/src/macros.cc index 8af8e0e5c5e..c3ae6c8b4ca 100644 --- a/engine/src/macros.cc +++ b/engine/src/macros.cc @@ -139,7 +139,7 @@ int grab_custom_macro_value_r(nagios_macros* mac, return ERROR; service_map::const_iterator found = service::services.find( - {mac->host_ptr ? mac->host_ptr->get_name() : "", arg2}); + {mac->host_ptr ? mac->host_ptr->name() : "", arg2}); if (found != service::services.end() && found->second) { /* get the service macro value */ diff --git a/engine/src/macros/grab_host.cc b/engine/src/macros/grab_host.cc old mode 100755 new mode 100644 index 9734e6c9f6c..bd2abdd45c2 --- a/engine/src/macros/grab_host.cc +++ b/engine/src/macros/grab_host.cc @@ -225,8 +225,7 @@ static std::string get_host_children(host& hst, nagios_macros* mac) { */ static std::string get_host_id(host& hst, nagios_macros* mac) { (void)mac; - return ( - string::from(com::centreon::engine::get_host_id(hst.get_name())).c_str()); + return (string::from(com::centreon::engine::get_host_id(hst.name())).c_str()); } /** @@ -256,7 +255,10 @@ struct grab_host_redirection { entry; entry routines{ {MACRO_HOSTNAME, - {&get_member_as_string, + {&get_member_as_string, true}}, {MACRO_HOSTDISPLAYNAME, {&get_member_as_stringhost_ptr->get_name(), arg2})); + service::services.find({mac->host_ptr->name(), arg2})); if (found == service::services.end() || !found->second) retval = ERROR; @@ -980,7 +980,12 @@ int grab_macro_value_r(nagios_macros* mac, else if (macro_name.size() > 3 && strncmp(macro_name.c_str(), "ARG", 3) == 0) { /* which arg do we want? */ - x = atoi(macro_name.c_str() + 3); + if (!absl::SimpleAtoi(macro_name.c_str() + 3, &x)) { + log_v2::macros()->error( + "Error: could not grab macro value : '{}' must be a positive integer", + macro_name.c_str() + 3); + return ERROR; + } if (!x || x > MAX_COMMAND_ARGUMENTS) { delete[] buf; @@ -995,7 +1000,12 @@ int grab_macro_value_r(nagios_macros* mac, else if (macro_name.size() > 4 && strncmp(macro_name.c_str(), "USER", 4) == 0) { /* which macro do we want? */ - x = atoi(macro_name.c_str() + 4); + if (!absl::SimpleAtoi(macro_name.c_str() + 4, &x)) { + log_v2::macros()->error( + "Error: could not grab macro value : '{}' must be a positive integer", + macro_name.c_str() + 4); + return ERROR; + } if (!x || x > MAX_USER_MACROS) { delete[] buf; @@ -1012,7 +1022,13 @@ int grab_macro_value_r(nagios_macros* mac, else if (macro_name.size() > 14 && strncmp(macro_name.c_str(), "CONTACTADDRESS", 14) == 0) { /* which address do we want? */ - x = atoi(macro_name.c_str() + 14) - 1; + if (!absl::SimpleAtoi(macro_name.c_str() + 14, &x)) { + log_v2::macros()->error( + "Error: could not grab macro value : '{}' must be a positive integer", + macro_name.c_str() + 14); + return ERROR; + } + x -= 1; /* regular macro */ if (arg[0] == nullptr) { diff --git a/engine/src/main.cc b/engine/src/main.cc index 27604171c23..bde20fced13 100644 --- a/engine/src/main.cc +++ b/engine/src/main.cc @@ -23,11 +23,13 @@ #include #endif // HAVE_GETOPT_H #include -#include -#include #include #include +#include +#include +#include + #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/broker/loader.hh" #include "com/centreon/engine/checks/checker.hh" @@ -349,8 +351,10 @@ int main(int argc, char* argv[]) { port = dis(gen); } + const std::string& listen_address = config.rpc_listen_address(); + std::unique_ptr > rpc( - new enginerpc("0.0.0.0", port), [](enginerpc* rpc) { + new enginerpc(listen_address, port), [](enginerpc* rpc) { rpc->shutdown(); delete rpc; }); diff --git a/engine/src/notifier.cc b/engine/src/notifier.cc index 6db89bde7eb..7e443ca8d4f 100644 --- a/engine/src/notifier.cc +++ b/engine/src/notifier.cc @@ -64,6 +64,7 @@ std::array const notifier::_is_notification_viable{{ uint64_t notifier::_next_notification_id{1L}; notifier::notifier(notifier::notifier_type notifier_type, + const std::string& name, std::string const& display_name, std::string const& check_command, bool checks_enabled, @@ -97,7 +98,8 @@ notifier::notifier(notifier::notifier_type notifier_type, bool retain_nonstatus_information, bool is_volatile, uint64_t icon_id) - : checkable{display_name, + : checkable{name, + display_name, check_command, checks_enabled, accept_passive_checks, @@ -160,7 +162,8 @@ notifier::notifier(notifier::notifier_type notifier_type, engine_logger(log_config_error, basic) << "Error: Invalid notification_interval value for notifier '" << display_name << "'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Invalid notification_interval value for notifier '{}'", display_name); throw engine_error() << "Could not register notifier '" << display_name @@ -211,9 +214,9 @@ void notifier::set_last_problem_id(unsigned long last_problem_id) noexcept { * @param num The notification number. */ void notifier::set_notification_number(int num) { - log_v2::notifications()->trace( - "_notification_number set_notification_number: {} => {}", - _notification_number, num); + SPDLOG_LOGGER_TRACE(log_v2::notifications(), + "_notification_number set_notification_number: {} => {}", + _notification_number, num); /* set the notification number */ _notification_number = num; @@ -226,7 +229,8 @@ bool notifier::_is_notification_viable_normal(reason_type type notification_option options) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_normal()"; - log_v2::functions()->trace("notifier::is_notification_viable_normal()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_normal()"); /* forced notifications bust through everything */ uint32_t notification_interval = @@ -237,8 +241,8 @@ bool notifier::_is_notification_viable_normal(reason_type type if (options & notification_option_forced) { engine_logger(dbg_notifications, more) << "This is a forced notification, so we'll send it out."; - log_v2::notifications()->debug( - "This is a forced notification, so we'll send it out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This is a forced notification, so we'll send it out."); return true; } @@ -247,9 +251,9 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); return false; } @@ -258,9 +262,9 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); return false; } @@ -270,7 +274,8 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "This notifier is currently in a scheduled downtime, so " "we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is currently in a scheduled downtime, so " "we won't send notifications."); return false; @@ -285,9 +290,9 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "This notifier shouldn't have notifications sent out " "at this time."; - log_v2::notifications()->debug( - "This notifier shouldn't have notifications sent out " - "at this time."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This notifier shouldn't have notifications sent out " + "at this time."); return false; } @@ -295,7 +300,8 @@ bool notifier::_is_notification_viable_normal(reason_type type if (get_is_flapping()) { engine_logger(dbg_notifications, more) << "This notifier is flapping, so we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is flapping, so we won't send notifications."); return false; } @@ -304,7 +310,8 @@ bool notifier::_is_notification_viable_normal(reason_type type if (get_is_volatile()) { engine_logger(dbg_notifications, more) << "This is a volatile service notification, so it is sent."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This is a volatile service notification, so it is sent."); return true; } @@ -312,7 +319,8 @@ bool notifier::_is_notification_viable_normal(reason_type type if (get_state_type() != hard) { engine_logger(dbg_notifications, more) << "This notifier is in soft state, so we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is in soft state, so we won't send notifications."); return false; } @@ -321,7 +329,8 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "This notifier problem has been acknowledged, so we won't send " "notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier problem has been acknowledged, so we won't send " "notifications."); return false; @@ -330,7 +339,8 @@ bool notifier::_is_notification_viable_normal(reason_type type if (get_current_state_int() == 0) { engine_logger(dbg_notifications, more) << "We don't send a normal notification when the state is ok/up"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "We don't send a normal notification when the state is ok/up"); return false; } @@ -340,7 +350,8 @@ bool notifier::_is_notification_viable_normal(reason_type type << "This notifier is unable to notify the state " << get_current_state_as_string() << ": not configured for that or, for a service, its host may be down"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is unable to notify the state {}: not configured for " "that or, for a service, its host may be down", get_current_state_as_string()); @@ -355,7 +366,8 @@ bool notifier::_is_notification_viable_normal(reason_type type << "This notifier is configured with a first notification delay, we " "won't send notification until timestamp " << (_first_notification_delay * config->interval_length()); - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is configured with a first notification delay, we " "won't send notification until timestamp {}", _first_notification_delay * config->interval_length()); @@ -366,7 +378,8 @@ bool notifier::_is_notification_viable_normal(reason_type type engine_logger(dbg_notifications, more) << "This notifier won't send any notification since it depends on" " another notifier that has already sent one"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier won't send any notification since it depends on" " another notifier that has already sent one"); return false; @@ -382,7 +395,8 @@ bool notifier::_is_notification_viable_normal(reason_type type << _last_notification << " so, since the notification interval is 0, it won't be sent" << " anymore"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier problem has already been sent at {} so, since the " "notification interval is 0, it won't be sent anymore", _last_notification); @@ -395,7 +409,8 @@ bool notifier::_is_notification_viable_normal(reason_type type << "This notifier problem has been sent at " << _last_notification << " so it won't be sent until " << (notification_interval * config->interval_length()); - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier problem has been sent at {} so it won't be sent " "until {}", _last_notification, @@ -414,7 +429,8 @@ bool notifier::_is_notification_viable_recovery(reason_type type __attribute__((unused))) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_recovery()"; - log_v2::functions()->trace("notifier::is_notification_viable_recovery()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_recovery()"); bool retval{true}; bool send_later{false}; @@ -423,9 +439,9 @@ bool notifier::_is_notification_viable_recovery(reason_type type engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); retval = false; } /* are notifications temporarily disabled for this notifier? */ @@ -433,9 +449,9 @@ bool notifier::_is_notification_viable_recovery(reason_type type engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); retval = false; } else { timeperiod* tp{get_notification_timeperiod()}; @@ -447,9 +463,9 @@ bool notifier::_is_notification_viable_recovery(reason_type type engine_logger(dbg_notifications, more) << "This notifier shouldn't have notifications sent out " "at this time."; - log_v2::notifications()->debug( - "This notifier shouldn't have notifications sent out " - "at this time."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This notifier shouldn't have notifications sent out " + "at this time."); retval = false; send_later = true; } @@ -459,7 +475,8 @@ bool notifier::_is_notification_viable_recovery(reason_type type engine_logger(dbg_notifications, more) << "This notifier is currently in a scheduled downtime, so " "we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is currently in a scheduled downtime, so " "we won't send notifications."); retval = false; @@ -469,14 +486,16 @@ bool notifier::_is_notification_viable_recovery(reason_type type else if (get_is_flapping()) { engine_logger(dbg_notifications, more) << "This notifier is flapping, so we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is flapping, so we won't send notifications."); retval = false; send_later = true; } else if (get_state_type() != hard) { engine_logger(dbg_notifications, more) << "This notifier is in soft state, so we won't send notifications."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is in soft state, so we won't send notifications."); retval = false; send_later = true; @@ -485,14 +504,16 @@ bool notifier::_is_notification_viable_recovery(reason_type type else if (get_current_state_int() != 0) { engine_logger(dbg_notifications, more) << "This notifier state is not UP/OK to send a recovery notification"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier state is not UP/OK to send a recovery notification"); retval = false; send_later = true; } else if (!(get_notify_on(up) || get_notify_on(ok))) { engine_logger(dbg_notifications, more) << "This notifier is not configured to send a recovery notification"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is not configured to send a recovery notification"); retval = false; send_later = false; @@ -504,7 +525,8 @@ bool notifier::_is_notification_viable_recovery(reason_type type << "It won't send any recovery notification until timestamp " << " so it won't be sent until " << (get_last_hard_state_change() + _recovery_notification_delay); - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "This notifier is configured with a recovery notification delay. " "It won't send any recovery notification until timestamp " "so it won't be sent until {}", @@ -516,7 +538,8 @@ bool notifier::_is_notification_viable_recovery(reason_type type << "No notification has been sent to " "announce a problem. So no recovery" << " notification will be sent"; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "No notification has been sent to " "announce a problem. So no recovery notification will be sent"); retval = false; @@ -525,10 +548,10 @@ bool notifier::_is_notification_viable_recovery(reason_type type << "We should not send a notification " "since no normal notification has" " been sent before"; - log_v2::notifications()->debug( - "We should not send a notification " - "since no normal notification has" - " been sent before"); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "We should not send a notification " + "since no normal notification has" + " been sent before"); retval = false; } } @@ -536,7 +559,8 @@ bool notifier::_is_notification_viable_recovery(reason_type type if (!retval) { if (!send_later) { _notification[cat_normal].reset(); - log_v2::notifications()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::notifications(), " _notification_number _is_notification_viable_recovery: {} => 0", _notification_number); _notification_number = 0; @@ -551,14 +575,14 @@ bool notifier::_is_notification_viable_acknowledgement( notification_option options) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_acknowledgement()"; - log_v2::functions()->trace( - "notifier::is_notification_viable_acknowledgement()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_acknowledgement()"); /* forced notifications bust through everything */ if (options & notification_option_forced) { engine_logger(dbg_notifications, more) << "This is a forced notification, so we'll send it out."; - log_v2::notifications()->debug( - "This is a forced notification, so we'll send it out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This is a forced notification, so we'll send it out."); return true; } @@ -567,9 +591,9 @@ bool notifier::_is_notification_viable_acknowledgement( engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); return false; } @@ -578,9 +602,9 @@ bool notifier::_is_notification_viable_acknowledgement( engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); return false; } @@ -588,9 +612,9 @@ bool notifier::_is_notification_viable_acknowledgement( engine_logger(dbg_notifications, more) << "The notifier is currently OK/UP, so we " "won't send an acknowledgement."; - log_v2::notifications()->debug( - "The notifier is currently OK/UP, so we " - "won't send an acknowledgement."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "The notifier is currently OK/UP, so we " + "won't send an acknowledgement."); return false; } return true; @@ -600,13 +624,14 @@ bool notifier::_is_notification_viable_flapping(reason_type type, notification_option options) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_flapping()"; - log_v2::functions()->trace("notifier::is_notification_viable_flapping()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_flapping()"); /* forced notifications bust through everything */ if (options & notification_option_forced) { engine_logger(dbg_notifications, more) << "This is a forced notification, so we'll send it out."; - log_v2::notifications()->debug( - "This is a forced notification, so we'll send it out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This is a forced notification, so we'll send it out."); return true; } @@ -615,9 +640,9 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); return false; } @@ -626,9 +651,9 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); return false; } @@ -645,7 +670,8 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "We shouldn't notify about " << tab_notification_str[type] << " events for this notifier."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "We shouldn't notify about {} events for this notifier.", tab_notification_str[type]); return false; @@ -657,7 +683,8 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "A flapping notification is already running, we can not send " "a start notification now."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "A flapping notification is already running, we can not send " "a start notification now."); return false; @@ -669,7 +696,8 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "A stop or cancellation flapping notification can only be sent " "after a start flapping notification."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "A stop or cancellation flapping notification can only be sent " "after a start flapping notification."); return false; @@ -682,9 +710,9 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "We shouldn't notify about a " << tab_notification_str[type] << " event: already sent."; - log_v2::notifications()->debug( - "We shouldn't notify about a {} event: already sent.", - tab_notification_str[type]); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "We shouldn't notify about a {} event: already sent.", + tab_notification_str[type]); return false; } @@ -693,9 +721,9 @@ bool notifier::_is_notification_viable_flapping(reason_type type, engine_logger(dbg_notifications, more) << "We shouldn't notify about FLAPPING " "events during scheduled downtime."; - log_v2::notifications()->debug( - "We shouldn't notify about FLAPPING " - "events during scheduled downtime."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "We shouldn't notify about FLAPPING " + "events during scheduled downtime."); return false; } return true; @@ -706,14 +734,15 @@ bool notifier::_is_notification_viable_downtime(reason_type type notification_option options) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_downtime()"; - log_v2::functions()->trace("notifier::is_notification_viable_downtime()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_downtime()"); /* forced notifications bust through everything */ if (options & notification_option_forced) { engine_logger(dbg_notifications, more) << "This is a forced notification, so we'll send it out."; - log_v2::notifications()->debug( - "This is a forced notification, so we'll send it out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This is a forced notification, so we'll send it out."); return true; } @@ -722,9 +751,9 @@ bool notifier::_is_notification_viable_downtime(reason_type type engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); return false; } @@ -733,16 +762,17 @@ bool notifier::_is_notification_viable_downtime(reason_type type engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); return false; } if (!config->enable_notifications()) { engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications won't be sent out."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "Notifications are disabled, so notifications won't be sent out."); return false; } @@ -751,7 +781,8 @@ bool notifier::_is_notification_viable_downtime(reason_type type if (!get_notify_on(downtime)) { engine_logger(dbg_notifications, more) << "We shouldn't notify about DOWNTIME events for this notifier."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "We shouldn't notify about DOWNTIME events for this notifier."); return false; } @@ -763,9 +794,9 @@ bool notifier::_is_notification_viable_downtime(reason_type type engine_logger(dbg_notifications, more) << "We shouldn't notify about DOWNTIME " "events during scheduled downtime."; - log_v2::notifications()->debug( - "We shouldn't notify about DOWNTIME " - "events during scheduled downtime."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "We shouldn't notify about DOWNTIME " + "events during scheduled downtime."); return false; } return true; @@ -776,13 +807,14 @@ bool notifier::_is_notification_viable_custom(reason_type type notification_option options) { engine_logger(dbg_functions, basic) << "notifier::is_notification_viable_custom()"; - log_v2::functions()->trace("notifier::is_notification_viable_custom()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::is_notification_viable_custom()"); /* forced notifications bust through everything */ if (options & notification_option_forced) { engine_logger(dbg_notifications, more) << "This is a forced notification, so we'll send it out."; - log_v2::notifications()->debug( - "This is a forced notification, so we'll send it out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "This is a forced notification, so we'll send it out."); return true; } @@ -791,9 +823,9 @@ bool notifier::_is_notification_viable_custom(reason_type type engine_logger(dbg_notifications, more) << "Notifications are disabled, so notifications will " "not be sent out."; - log_v2::notifications()->debug( - "Notifications are disabled, so notifications will " - "not be sent out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are disabled, so notifications will " + "not be sent out."); return false; } @@ -802,9 +834,9 @@ bool notifier::_is_notification_viable_custom(reason_type type engine_logger(dbg_notifications, more) << "Notifications are temporarily disabled for " "this notifier, so we won't send one out."; - log_v2::notifications()->debug( - "Notifications are temporarily disabled for " - "this notifier, so we won't send one out."); + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "Notifications are temporarily disabled for " + "this notifier, so we won't send one out."); return false; } @@ -812,7 +844,8 @@ bool notifier::_is_notification_viable_custom(reason_type type if (is_in_downtime()) { engine_logger(dbg_notifications, more) << "We shouldn't send a CUSTOM notification during scheduled downtime."; - log_v2::notifications()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), "We shouldn't send a CUSTOM notification during scheduled downtime."); return false; } @@ -921,7 +954,7 @@ int notifier::notify(notifier::reason_type type, std::string const& not_data, notification_option options) { engine_logger(dbg_functions, basic) << "notifier::notify()"; - log_v2::functions()->trace("notifier::notify({})", type); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "notifier::notify({})", type); notification_category cat{get_category(type)}; /* Has this notification got sense? */ @@ -931,9 +964,9 @@ int notifier::notify(notifier::reason_type type, /* For a first notification, we store what type of notification we try to * send and we fix the notification number to 1. */ if (type != reason_recovery) { - log_v2::notifications()->trace("_notification_number notify: {} -> {}", - _notification_number, - _notification_number + 1); + SPDLOG_LOGGER_TRACE(log_v2::notifications(), + "_notification_number notify: {} -> {}", + _notification_number, _notification_number + 1); ++_notification_number; } @@ -944,9 +977,9 @@ int notifier::notify(notifier::reason_type type, get_contacts_to_notify(cat, type, notification_interval, escalated)}; _current_notification_id = _next_notification_id++; - std::unique_ptr notif{new notification( + auto notif = std::make_unique( this, type, not_author, not_data, options, _current_notification_id, - _notification_number, notification_interval, escalated)}; + _notification_number, notification_interval, escalated); /* Let's make the notification. */ int retval{notif->execute(to_notify)}; @@ -986,8 +1019,9 @@ int notifier::notify(notifier::reason_type type, /* In case of an acknowledgement, we must keep the _notification_number * otherwise the recovery notification won't be sent when needed. */ if (cat != cat_acknowledgement && cat != cat_downtime) { - log_v2::notifications()->trace("_notification_number notify: {} => 0", - _notification_number); + SPDLOG_LOGGER_TRACE(log_v2::notifications(), + "_notification_number notify: {} => 0", + _notification_number); _notification_number = 0; } } @@ -1336,7 +1370,8 @@ void notifier::resolve(int& w, int& e) { << "Error: Event handler command '" << cmd_name << "' specified for host '" << get_display_name() << "' not defined anywhere"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Event handler command '{}' specified for host '{}' not " "defined anywhere", cmd_name, get_display_name()); @@ -1359,21 +1394,23 @@ void notifier::resolve(int& w, int& e) { << "Error: Notifier check command '" << cmd_name << "' specified for host '" << get_display_name() << "' is not defined anywhere!"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Notifier check command '{}' specified for host '{}' is not " "defined anywhere!", cmd_name, get_display_name()); errors++; } else /* save the pointer to the check command for later */ - set_check_command_ptr(cmd_found->second.get()); + set_check_command_ptr(cmd_found->second); } if (check_period().empty()) { engine_logger(log_verification_error, basic) << "Warning: Notifier '" << get_display_name() << "' has no check time period defined!"; - log_v2::config()->warn( + SPDLOG_LOGGER_WARN( + log_v2::config(), "Warning: Notifier '{}' has no check time period defined!", get_display_name()); warnings++; @@ -1387,7 +1424,8 @@ void notifier::resolve(int& w, int& e) { << "Error: Check period '" << check_period() << "' specified for host '" << get_display_name() << "' is not defined anywhere!"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Check period '{}' specified for host '{}' is not defined " "anywhere!", check_period(), get_display_name()); @@ -1407,7 +1445,8 @@ void notifier::resolve(int& w, int& e) { engine_logger(log_verification_error, basic) << "Error: Contact '" << it->first << "' specified in notifier '" << get_display_name() << "' is not defined anywhere!"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Contact '{}' specified in notifier '{}' is not defined " "anywhere!", it->first, get_display_name()); @@ -1429,7 +1468,8 @@ void notifier::resolve(int& w, int& e) { engine_logger(log_verification_error, basic) << "Error: Contact group '" << it->first << "' specified in host '" << get_display_name() << "' is not defined anywhere!"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Contact group '{}' specified in host '{}' is not defined " "anywhere!", it->first, get_display_name()); @@ -1448,7 +1488,8 @@ void notifier::resolve(int& w, int& e) { << "Error: Notification period '" << notification_period() << "' specified for notifier '" << get_display_name() << "' is not defined anywhere!"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Notification period '{}' specified for notifier '{}' is not " "defined anywhere!", notification_period(), get_display_name()); @@ -1461,7 +1502,8 @@ void notifier::resolve(int& w, int& e) { engine_logger(log_verification_error, basic) << "Warning: Notifier '" << get_display_name() << "' has no notification time period defined!"; - log_v2::config()->warn( + SPDLOG_LOGGER_WARN( + log_v2::config(), "Warning: Notifier '{}' has no notification time period defined!", get_display_name()); warnings++; @@ -1514,17 +1556,20 @@ time_t notifier::get_next_notification_time(time_t offset) { engine_logger(dbg_functions, basic) << "notifier::get_next_notification_time()"; - log_v2::functions()->trace("notifier::get_next_notification_time()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "notifier::get_next_notification_time()"); engine_logger(dbg_notifications, most) << "Calculating next valid notification time..."; - log_v2::notifications()->info("Calculating next valid notification time..."); + SPDLOG_LOGGER_INFO(log_v2::notifications(), + "Calculating next valid notification time..."); /* default notification interval */ uint32_t interval_to_use{_notification_interval}; engine_logger(dbg_notifications, most) << "Default interval: " << interval_to_use; - log_v2::notifications()->info("Default interval: {}", interval_to_use); + SPDLOG_LOGGER_INFO(log_v2::notifications(), "Default interval: {}", + interval_to_use); /* * search all the escalation entries for valid matches for this service (at @@ -1542,8 +1587,9 @@ time_t notifier::get_next_notification_time(time_t offset) { engine_logger(dbg_notifications, most) << "Found a valid escalation w/ interval of " << e->get_notification_interval(); - log_v2::notifications()->info("Found a valid escalation w/ interval of {}", - e->get_notification_interval()); + SPDLOG_LOGGER_INFO(log_v2::notifications(), + "Found a valid escalation w/ interval of {}", + e->get_notification_interval()); /* * if we haven't used a notification interval from an escalation yet, @@ -1559,7 +1605,8 @@ time_t notifier::get_next_notification_time(time_t offset) { engine_logger(dbg_notifications, most) << "New interval: " << interval_to_use; - log_v2::notifications()->info("New interval: {}", interval_to_use); + SPDLOG_LOGGER_INFO(log_v2::notifications(), "New interval: {}", + interval_to_use); } /* @@ -1575,10 +1622,10 @@ time_t notifier::get_next_notification_time(time_t offset) { << "Interval used for calculating next valid " "notification time: " << interval_to_use; - log_v2::notifications()->info( - "Interval used for calculating next valid " - "notification time: {}", - interval_to_use); + SPDLOG_LOGGER_INFO(log_v2::notifications(), + "Interval used for calculating next valid " + "notification time: {}", + interval_to_use); /* calculate next notification time */ time_t next_notification{ @@ -1619,7 +1666,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the line should start " "with 'type: '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the line should start " "with 'type: '"); return; @@ -1632,7 +1680,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the separator between " << "two fields is ', '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the separator between two " "fields is ', '"); return; @@ -1643,7 +1692,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'type' is 'author'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field after " "'type' is 'author'"); return; @@ -1659,7 +1709,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'author' is 'options'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field after " "'author' is 'options'"); return; @@ -1671,7 +1722,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the separator between " << "two fields is ', '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the separator between two " "fields is ', '"); return; @@ -1682,7 +1734,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'options' is 'escalated'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field " " after 'options' is 'escalated'"); return; @@ -1694,7 +1747,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the separator between " << "two fields is ', '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the separator between two " "fields is ', '"); return; @@ -1705,7 +1759,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'escalated' is 'id'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field " " after 'escalated' is 'id'"); return; @@ -1717,7 +1772,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the separator between " << "two fields is ', '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the separator between two " "fields is ', '"); return; @@ -1728,7 +1784,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'id' is 'number'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field " " after 'id' is 'number'"); return; @@ -1740,7 +1797,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the separator between " << "two fields is ', '"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the separator between two " "fields is ', '"); return; @@ -1751,7 +1809,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the expected field " " after 'number' is 'interval'"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the expected field " " after 'number' is 'interval'"); return; @@ -1763,7 +1822,8 @@ void notifier::set_notification(int32_t idx, std::string const& value) { engine_logger(log_config_error, basic) << "Error: Bad format in the notification part, the 'interval' value " "should be an integer"; - log_v2::config()->error( + SPDLOG_LOGGER_ERROR( + log_v2::config(), "Error: Bad format in the notification part, the 'interval' value " "should be an integer"); return; @@ -1780,10 +1840,9 @@ void notifier::set_notification(int32_t idx, std::string const& value) { } } } - - _notification[idx].reset(new notification(this, type, author, "", options, id, - number, interval, escalated, - contacts)); + _notification[idx] = + std::make_unique(this, type, author, "", options, id, + number, interval, escalated, contacts); } bool notifier::get_is_volatile() const noexcept { diff --git a/engine/src/retention/dump.cc b/engine/src/retention/dump.cc index 50a5f673cd4..bf2fd441620 100644 --- a/engine/src/retention/dump.cc +++ b/engine/src/retention/dump.cc @@ -54,7 +54,7 @@ std::ostream& dump::comment(std::ostream& os, auto it = host::hosts_by_id.find(obj.get_host_id()); if (it == host::hosts_by_id.end()) return os; - host_name = it->second->get_name().c_str(); + host_name = it->second->name().c_str(); service_description = ""; os << "hostcomment {\n"; } else { @@ -268,7 +268,7 @@ std::ostream& dump::host(std::ostream& os, com::centreon::engine::host const& obj) { os << "host {\n" "host_name=" - << obj.get_name() + << obj.name() << "\n" "host_id=" << obj.get_host_id() @@ -593,7 +593,7 @@ bool dump::save(std::string const& path) { std::ostream& dump::service(std::ostream& os, class service const& obj) { std::string hostname; if (obj.get_host_ptr()) - hostname = obj.get_host_ptr()->get_name(); + hostname = obj.get_host_ptr()->name(); os << "service {\n" "host_name=" diff --git a/engine/src/retention/object.cc b/engine/src/retention/object.cc index 792b68e89eb..76e8a283bae 100644 --- a/engine/src/retention/object.cc +++ b/engine/src/retention/object.cc @@ -94,23 +94,23 @@ bool retention::object::operator!=(object const& right) const throw() { retention::object_ptr retention::object::create(std::string const& type_name) { object_ptr obj; if (type_name == "service") - obj = object_ptr(new retention::service); + obj = std::make_shared(); else if (type_name == "host") - obj = object_ptr(new retention::host); + obj = std::make_shared(); else if (type_name == "contact") - obj = object_ptr(new retention::contact); + obj = std::make_shared(); else if (type_name == "hostcomment") - obj = object_ptr(new retention::comment(comment::host)); + obj = std::make_shared(comment::host); else if (type_name == "servicecomment") - obj = object_ptr(new retention::comment(comment::service)); + obj = std::make_shared(comment::service); else if (type_name == "hostdowntime") - obj = object_ptr(new retention::downtime(downtime::host)); + obj = std::make_shared(downtime::host); else if (type_name == "servicedowntime") - obj = object_ptr(new retention::downtime(downtime::service)); + obj = std::make_shared(downtime::service); else if (type_name == "info") - obj = object_ptr(new retention::info); + obj = std::make_shared(); else if (type_name == "program") - obj = object_ptr(new retention::program); + obj = std::make_shared(); return obj; } diff --git a/engine/src/sehandlers.cc b/engine/src/sehandlers.cc index 4b0b03caacf..0c0d771f371 100644 --- a/engine/src/sehandlers.cc +++ b/engine/src/sehandlers.cc @@ -118,11 +118,11 @@ int obsessive_compulsive_host_check_processor( if (early_timeout == true) engine_logger(log_runtime_warning, basic) << "Warning: OCHP command '" << processed_command << "' for host '" - << hst->get_name() << "' timed out after " << config->ochp_timeout() + << hst->name() << "' timed out after " << config->ochp_timeout() << " seconds"; log_v2::runtime()->warn( "Warning: OCHP command '{}' for host '{}' timed out after {} seconds", - processed_command, hst->get_name(), config->ochp_timeout()); + processed_command, hst->name(), config->ochp_timeout()); return OK; } @@ -469,9 +469,9 @@ int run_global_host_event_handler(nagios_macros* mac, return ERROR; engine_logger(dbg_eventhandlers, more) - << "Running global event handler for host '" << hst->get_name() << "'..."; + << "Running global event handler for host '" << hst->name() << "'..."; log_v2::events()->debug("Running global event handler for host '{}'...", - hst->get_name()); + hst->name()); /* get start time */ gettimeofday(&start_time, nullptr); @@ -503,7 +503,7 @@ int run_global_host_event_handler(nagios_macros* mac, if (config->log_event_handlers() == true) { std::ostringstream oss; - oss << "GLOBAL HOST EVENT HANDLER: " << hst->get_name() + oss << "GLOBAL HOST EVENT HANDLER: " << hst->name() << "$HOSTSTATE$;$HOSTSTATETYPE$;$HOSTATTEMPT$;" << config->global_host_event_handler(); process_macros_r(mac, oss.str(), processed_logentry, macro_options); @@ -596,9 +596,9 @@ int run_host_event_handler(nagios_macros* mac, return ERROR; engine_logger(dbg_eventhandlers, more) - << "Running event handler for host '" << hst->get_name() << "'..."; + << "Running event handler for host '" << hst->name() << "'..."; log_v2::events()->debug("Running event handler for host '{}'...", - hst->get_name()); + hst->name()); /* get start time */ gettimeofday(&start_time, nullptr); @@ -627,7 +627,7 @@ int run_host_event_handler(nagios_macros* mac, if (config->log_event_handlers() == true) { std::ostringstream oss; - oss << "HOST EVENT HANDLER: " << hst->get_name() + oss << "HOST EVENT HANDLER: " << hst->name() << ";$HOSTSTATE$;$HOSTSTATETYPE$;$HOSTATTEMPT$;" << hst->event_handler(); process_macros_r(mac, oss.str(), processed_logentry, macro_options); diff --git a/engine/src/service.cc b/engine/src/service.cc index 7b67a1d02e7..e6a169884da 100644 --- a/engine/src/service.cc +++ b/engine/src/service.cc @@ -57,10 +57,10 @@ std::array, 4> const service_map service::services; service_id_map service::services_by_id; -service::service(std::string const& hostname, - std::string const& description, - std::string const& display_name, - std::string const& check_command, +service::service(const std::string& hostname, + const std::string& description, + const std::string& display_name, + const std::string& check_command, bool checks_enabled, bool accept_passive_checks, enum service::service_state initial_state, @@ -70,26 +70,27 @@ service::service(std::string const& hostname, int max_attempts, uint32_t first_notification_delay, uint32_t recovery_notification_delay, - std::string const& notification_period, + const std::string& notification_period, bool notifications_enabled, bool is_volatile, - std::string const& check_period, - std::string const& event_handler, + const std::string& check_period, + const std::string& event_handler, bool event_handler_enabled, - std::string const& notes, - std::string const& notes_url, - std::string const& action_url, - std::string const& icon_image, - std::string const& icon_image_alt, + const std::string& notes, + const std::string& notes_url, + const std::string& action_url, + const std::string& icon_image, + const std::string& icon_image_alt, bool flap_detection_enabled, double low_flap_threshold, double high_flap_threshold, bool check_freshness, int freshness_threshold, bool obsess_over, - std::string const& timezone, + const std::string& timezone, uint64_t icon_id) : notifier{service_notification, + description, display_name, check_command, checks_enabled, @@ -126,7 +127,6 @@ service::service(std::string const& hostname, _host_id{0}, _service_id{0}, _hostname{hostname}, - _description{description}, _process_performance_data{0}, _check_flapping_recovery_notification{0}, _last_time_ok{0}, @@ -142,6 +142,12 @@ service::service(std::string const& hostname, set_current_attempt(initial_state == service::state_ok ? 1 : max_attempts); } +service::~service() noexcept { + if (get_check_command_ptr()) { + get_check_command_ptr()->remove_caller(this); + } +} + time_t service::get_last_time_ok() const { return _last_time_ok; } @@ -521,7 +527,7 @@ std::ostream& operator<<(std::ostream& os, << "\n modified_attributes: " << obj.get_modified_attributes() << "\n host_ptr: " - << (obj.get_host_ptr() ? obj.get_host_ptr()->get_name() : "\"nullptr\"") + << (obj.get_host_ptr() ? obj.get_host_ptr()->name() : "\"nullptr\"") << "\n event_handler_ptr: " << evt_str << "\n event_handler_args: " << obj.get_event_handler_args() @@ -616,10 +622,10 @@ std::ostream& operator<<(std::ostream& os, com::centreon::engine::service* add_service( uint64_t host_id, uint64_t service_id, - std::string const& host_name, - std::string const& description, - std::string const& display_name, - std::string const& check_period, + const std::string& host_name, + const std::string& description, + const std::string& display_name, + const std::string& check_period, com::centreon::engine::service::service_state initial_state, int max_attempts, double check_interval, @@ -627,7 +633,7 @@ com::centreon::engine::service* add_service( double notification_interval, uint32_t first_notification_delay, uint32_t recovery_notification_delay, - std::string const& notification_period, + const std::string& notification_period, bool notify_recovery, bool notify_unknown, bool notify_warning, @@ -636,9 +642,9 @@ com::centreon::engine::service* add_service( bool notify_downtime, bool notifications_enabled, bool is_volatile, - std::string const& event_handler, + const std::string& event_handler, bool event_handler_enabled, - std::string const& check_command, + const std::string& check_command, bool checks_enabled, bool accept_passive_checks, bool flap_detection_enabled, @@ -655,15 +661,15 @@ com::centreon::engine::service* add_service( int process_perfdata, bool check_freshness, int freshness_threshold, - std::string const& notes, - std::string const& notes_url, - std::string const& action_url, - std::string const& icon_image, - std::string const& icon_image_alt, + const std::string& notes, + const std::string& notes_url, + const std::string& action_url, + const std::string& icon_image, + const std::string& icon_image_alt, int retain_status_information, int retain_nonstatus_information, bool obsess_over_service, - std::string const& timezone, + const std::string& timezone, uint64_t icon_id) { // Make sure we have everything we need. if (!service_id) { @@ -818,11 +824,12 @@ void service::check_for_expired_acknowledgement() { if (last_acknowledgement() + acknowledgement_timeout() >= now) { engine_logger(log_info_message, basic) << "Acknowledgement of service '" << get_description() - << "' on host '" << this->get_host_ptr()->get_name() + << "' on host '" << this->get_host_ptr()->name() << "' just expired"; - log_v2::events()->info( + SPDLOG_LOGGER_INFO( + log_v2::events(), "Acknowledgement of service '{}' on host '{}' just expired", - get_description(), this->get_host_ptr()->get_name()); + get_description(), this->get_host_ptr()->name()); set_problem_has_been_acknowledged(false); set_acknowledgement_type(ACKNOWLEDGEMENT_NONE); // FIXME DBO: could be improved with something smaller. @@ -874,8 +881,8 @@ bool engine::is_service_exist(std::pair const& id) { * @return Pair of ID if found, pair of 0 otherwise. */ std::pair engine::get_host_and_service_id( - std::string const& host, - std::string const& svc) { + const std::string& host, + const std::string& svc) { service_map::const_iterator found = service::services.find({host, svc}); return found != service::services.end() ? std::pair{found->second->get_host_id(), @@ -891,8 +898,8 @@ std::pair engine::get_host_and_service_id( * * @return The service ID if found, 0 otherwise. */ -uint64_t engine::get_service_id(std::string const& host, - std::string const& svc) { +uint64_t engine::get_service_id(const std::string& host, + const std::string& svc) { return get_host_and_service_id(host, svc).second; } @@ -926,7 +933,7 @@ uint64_t service::get_service_id() const { return _service_id; } -void service::set_hostname(std::string const& name) { +void service::set_hostname(const std::string& name) { _hostname = name; } @@ -935,12 +942,12 @@ void service::set_hostname(std::string const& name) { * * @return A string reference to the host name. */ -std::string const& service::get_hostname() const { +const std::string& service::get_hostname() const { return _hostname; } -void service::set_description(std::string const& desc) { - _description = desc; +void service::set_description(const std::string& desc) { + set_name(desc); } /** @@ -948,8 +955,8 @@ void service::set_description(std::string const& desc) { * * @return A string reference to the description. */ -std::string const& service::get_description() const { - return _description; +const std::string& service::get_description() const { + return name(); } /** @@ -957,7 +964,7 @@ std::string const& service::get_description() const { * * @param[in] event_hdl_args the event handler arguments */ -void service::set_event_handler_args(std::string const& event_hdl_args) { +void service::set_event_handler_args(const std::string& event_hdl_args) { _event_handler_args = event_hdl_args; } @@ -966,7 +973,7 @@ void service::set_event_handler_args(std::string const& event_hdl_args) { * * @return A string reference to the event handler arguments. */ -std::string const& service::get_event_handler_args() const { +const std::string& service::get_event_handler_args() const { return _event_handler_args; } @@ -975,7 +982,7 @@ std::string const& service::get_event_handler_args() const { * * @param[in] cmd_args the command arguments */ -void service::set_check_command_args(std::string const& cmd_args) { +void service::set_check_command_args(const std::string& cmd_args) { _check_command_args = cmd_args; } @@ -984,7 +991,7 @@ void service::set_check_command_args(std::string const& cmd_args) { * * @return A string reference to the command arguments. */ -std::string const& service::get_check_command_args() const { +const std::string& service::get_check_command_args() const { return _check_command_args; } @@ -998,7 +1005,8 @@ static constexpr bool state_changes_use_cached_state = true; * @return OK or ERROR. * */ -int service::handle_async_check_result(check_result* queued_check_result) { +int service::handle_async_check_result( + const check_result& queued_check_result) { time_t next_service_check = 0L; time_t preferred_time = 0L; time_t next_valid_time = 0L; @@ -1015,70 +1023,67 @@ int service::handle_async_check_result(check_result* queued_check_result) { int flapping_check_done = false; engine_logger(dbg_functions, basic) << "handle_async_service_check_result()"; - log_v2::functions()->trace("handle_async_service_check_result()"); - - /* make sure we have what we need */ - if (!queued_check_result) - return ERROR; + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "handle_async_service_check_result()"); /* get the current time */ time_t current_time = std::time(nullptr); /* update the execution time for this check (millisecond resolution) */ double execution_time = - static_cast(queued_check_result->get_finish_time().tv_sec - - queued_check_result->get_start_time().tv_sec) + - static_cast(queued_check_result->get_finish_time().tv_usec - - queued_check_result->get_start_time().tv_usec) / + static_cast(queued_check_result.get_finish_time().tv_sec - + queued_check_result.get_start_time().tv_sec) + + static_cast(queued_check_result.get_finish_time().tv_usec - + queued_check_result.get_start_time().tv_usec) / 1000000.0; if (execution_time < 0.0) execution_time = 0.0; engine_logger(dbg_checks, basic) - << "** Handling check result for service '" << _description - << "' on host '" << _hostname << "'..."; - log_v2::checks()->trace( - "** Handling check result for service '{}' on host '{}'...", _description, + << "** Handling check result for service '" << name() << "' on host '" + << _hostname << "'..."; + SPDLOG_LOGGER_TRACE( + log_v2::checks(), + "** Handling check result for service '{}' on host '{}'...", name(), _hostname); engine_logger(dbg_checks, more) - << "HOST: " << _hostname << ", SERVICE: " << _description - << ", CHECK TYPE: " - << (queued_check_result->get_check_type() == check_active ? "Active" - : "Passive") - << ", OPTIONS: " << queued_check_result->get_check_options() + << "HOST: " << _hostname << ", SERVICE: " << name() << ", CHECK TYPE: " + << (queued_check_result.get_check_type() == check_active ? "Active" + : "Passive") + << ", OPTIONS: " << queued_check_result.get_check_options() << ", RESCHEDULE: " - << (queued_check_result->get_reschedule_check() ? "Yes" : "No") - << ", EXITED OK: " - << (queued_check_result->get_exited_ok() ? "Yes" : "No") + << (queued_check_result.get_reschedule_check() ? "Yes" : "No") + << ", EXITED OK: " << (queued_check_result.get_exited_ok() ? "Yes" : "No") << ", EXEC TIME: " << execution_time - << ", return CODE: " << queued_check_result->get_return_code() - << ", OUTPUT: " << queued_check_result->get_output(); - log_v2::checks()->debug( + << ", return CODE: " << queued_check_result.get_return_code() + << ", OUTPUT: " << queued_check_result.get_output(); + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "HOST: {}, SERVICE: {}, CHECK TYPE: {}, OPTIONS: {}, RESCHEDULE: {}, " "EXITED OK: {}, EXEC TIME: {}, return CODE: {}, OUTPUT: {}", - _hostname, _description, - queued_check_result->get_check_type() == check_active ? "Active" - : "Passive", - queued_check_result->get_check_options(), - queued_check_result->get_reschedule_check() ? "Yes" : "No", - queued_check_result->get_exited_ok() ? "Yes" : "No", execution_time, - queued_check_result->get_return_code(), - queued_check_result->get_output()); + _hostname, name(), + queued_check_result.get_check_type() == check_active ? "Active" + : "Passive", + queued_check_result.get_check_options(), + queued_check_result.get_reschedule_check() ? "Yes" : "No", + queued_check_result.get_exited_ok() ? "Yes" : "No", execution_time, + queued_check_result.get_return_code(), queued_check_result.get_output()); /* decrement the number of service checks still out there... */ - if (queued_check_result->get_check_type() == check_active && + if (queued_check_result.get_check_type() == check_active && currently_running_service_checks > 0) currently_running_service_checks--; /* * skip this service check results if its passive and we aren't accepting * passive check results */ - if (queued_check_result->get_check_type() == check_passive) { + if (queued_check_result.get_check_type() == check_passive) { if (!config->accept_passive_service_checks()) { engine_logger(dbg_checks, basic) << "Discarding passive service check result because passive " "service checks are disabled globally."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding passive service check result because passive " "service checks are disabled globally."); return ERROR; @@ -1087,7 +1092,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, basic) << "Discarding passive service check result because passive " "checks are disabled for this service."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding passive service check result because passive " "checks are disabled for this service."); return ERROR; @@ -1098,11 +1104,11 @@ int service::handle_async_check_result(check_result* queued_check_result) { * clear the freshening flag (it would have been set if this service was * determined to be stale) */ - if (queued_check_result->get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) + if (queued_check_result.get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) set_is_being_freshened(false); /* clear the execution flag if this was an active check */ - if (queued_check_result->get_check_type() == check_active) + if (queued_check_result.get_check_type() == check_active) set_is_executing(false); /* DISCARD INVALID FRESHNESS CHECK RESULTS */ @@ -1114,39 +1120,40 @@ int service::handle_async_check_result(check_result* queued_check_result) { ** make sure the service is still stale before we accept the check ** result. */ - if ((queued_check_result->get_check_options() & + if ((queued_check_result.get_check_options() & CHECK_OPTION_FRESHNESS_CHECK) && is_result_fresh(current_time, false)) { engine_logger(dbg_checks, basic) << "Discarding service freshness check result because the service " "is currently fresh (race condition avoided)."; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Discarding service freshness check result because the service " "is currently fresh (race condition avoided)."); return OK; } /* check latency is passed to us */ - set_latency(queued_check_result->get_latency()); + set_latency(queued_check_result.get_latency()); set_execution_time(execution_time); /* get the last check time */ - set_last_check(queued_check_result->get_start_time().tv_sec); + set_last_check(queued_check_result.get_start_time().tv_sec); /* was this check passive or active? */ - set_check_type(queued_check_result->get_check_type()); + set_check_type(queued_check_result.get_check_type()); /* update check statistics for passive checks */ - if (queued_check_result->get_check_type() == check_passive) + if (queued_check_result.get_check_type() == check_passive) update_check_stats(PASSIVE_SERVICE_CHECK_STATS, - queued_check_result->get_start_time().tv_sec); + queued_check_result.get_start_time().tv_sec); /* * should we reschedule the next service check? NOTE: This may be overridden * later... */ - reschedule_check = queued_check_result->get_reschedule_check(); + reschedule_check = queued_check_result.get_reschedule_check(); /* save the old service status info */ _last_state = _current_state; @@ -1158,47 +1165,49 @@ int service::handle_async_check_result(check_result* queued_check_result) { * if there was some error running the command, just skip it (this * shouldn't be happening) */ - if (!queued_check_result->get_exited_ok()) { + if (!queued_check_result.get_exited_ok()) { engine_logger(log_runtime_warning, basic) - << "Warning: Check of service '" << _description << "' on host '" + << "Warning: Check of service '" << name() << "' on host '" << _hostname << "' did not exit properly!"; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: Check of service '{}' on host '{}' did not exit properly!", - _description, _hostname); + name(), _hostname); set_plugin_output("(Service check did not exit properly)"); _current_state = service::state_unknown; } /* make sure the return code is within bounds */ - else if (queued_check_result->get_return_code() < 0 || - queued_check_result->get_return_code() > 3) { + else if (queued_check_result.get_return_code() < 0 || + queued_check_result.get_return_code() > 3) { engine_logger(log_runtime_warning, basic) - << "Warning: return (code of " << queued_check_result->get_return_code() - << " for check of service '" << _description << "' on host '" - << _hostname << "' was out of bounds." - << (queued_check_result->get_return_code() == 126 + << "Warning: return (code of " << queued_check_result.get_return_code() + << " for check of service '" << name() << "' on host '" << _hostname + << "' was out of bounds." + << (queued_check_result.get_return_code() == 126 ? "Make sure the plugin you're trying to run is executable." - : (queued_check_result->get_return_code() == 127 + : (queued_check_result.get_return_code() == 127 ? " Make sure the plugin you're trying to run actually " "exists." : "")); - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: return (code of {} for check of service '{}' on host '{}' " "was out of bounds.{}", - queued_check_result->get_return_code(), _description, _hostname, - (queued_check_result->get_return_code() == 126 + queued_check_result.get_return_code(), name(), _hostname, + (queued_check_result.get_return_code() == 126 ? "Make sure the plugin you're trying to run is executable." - : (queued_check_result->get_return_code() == 127 + : (queued_check_result.get_return_code() == 127 ? " Make sure the plugin you're trying to run actually " "exists." : ""))); std::ostringstream oss; - oss << "(Return code of " << queued_check_result->get_return_code() + oss << "(Return code of " << queued_check_result.get_return_code() << " is out of bounds" - << (queued_check_result->get_return_code() == 126 + << (queued_check_result.get_return_code() == 126 ? " - plugin may not be executable" - : (queued_check_result->get_return_code() == 127 + : (queued_check_result.get_return_code() == 127 ? " - plugin may be missing" : "")) << ')'; @@ -1212,7 +1221,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { * parse check output to get: (1) short output, (2) long output, * (3) perf data */ - std::string output{queued_check_result->get_output()}; + std::string output{queued_check_result.get_output()}; std::string plugin_output; std::string long_plugin_output; std::string perf_data; @@ -1244,7 +1253,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { << "\n" << "Perf Data:\n" << (get_perf_data().empty() ? "NULL" : get_perf_data()); - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Parsing check output Short Output: {} Long Output: {} Perf Data: {}", get_plugin_output().empty() ? "NULL" : get_plugin_output(), get_long_plugin_output().empty() ? "NULL" : get_long_plugin_output(), @@ -1252,7 +1262,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* grab the return code */ _current_state = static_cast( - queued_check_result->get_return_code()); + queued_check_result.get_return_code()); } /* record the last state time */ @@ -1284,10 +1294,10 @@ int service::handle_async_check_result(check_result* queued_check_result) { if (get_check_type() == check_passive) { if (config->log_passive_checks()) engine_logger(log_passive_check, basic) - << "PASSIVE SERVICE CHECK: " << _hostname << ";" << _description - << ";" << _current_state << ";" << get_plugin_output(); - log_v2::checks()->info("PASSIVE SERVICE CHECK: {};{};{};{}", _hostname, - _description, _current_state, get_plugin_output()); + << "PASSIVE SERVICE CHECK: " << _hostname << ";" << name() << ";" + << _current_state << ";" << get_plugin_output(); + SPDLOG_LOGGER_INFO(log_v2::checks(), "PASSIVE SERVICE CHECK: {};{};{};{}", + _hostname, name(), _current_state, get_plugin_output()); } host* hst{get_host_ptr()}; @@ -1320,16 +1330,17 @@ int service::handle_async_check_result(check_result* queued_check_result) { << " CA: " << get_current_attempt() << " MA: " << max_check_attempts() << " CS: " << _current_state << " LS: " << _last_state << " LHS: " << _last_hard_state; - log_v2::checks()->debug("ST: {} CA: {} MA: {} CS: {} LS: {} LHS: {}", - (get_state_type() == soft ? "SOFT" : "HARD"), - get_current_attempt(), max_check_attempts(), - _current_state, _last_state, _last_hard_state); + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "ST: {} CA: {} MA: {} CS: {} LS: {} LHS: {}", + (get_state_type() == soft ? "SOFT" : "HARD"), get_current_attempt(), + max_check_attempts(), _current_state, _last_state, _last_hard_state); /* check for a state change (either soft or hard) */ if (_current_state != _last_state) { engine_logger(dbg_checks, most) << "Service has changed state since last check!"; - log_v2::checks()->debug("Service has changed state since last check!"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service has changed state since last check!"); state_change = true; } @@ -1341,7 +1352,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { */ if (_host_problem_at_last_check && _current_state == service::state_ok) { engine_logger(dbg_checks, most) << "Service had a HARD STATE CHANGE!!"; - log_v2::checks()->debug("Service had a HARD STATE CHANGE!!"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Service had a HARD STATE CHANGE!!"); hard_state_change = true; } @@ -1353,7 +1364,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { (_current_state != _last_hard_state || get_last_state_change() > get_last_hard_state_change())) { engine_logger(dbg_checks, most) << "Service had a HARD STATE CHANGE!!"; - log_v2::checks()->debug("Service had a HARD STATE CHANGE!!"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Service had a HARD STATE CHANGE!!"); hard_state_change = true; } @@ -1443,7 +1454,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* if the service is up and running OK... */ if (_current_state == service::state_ok) { engine_logger(dbg_checks, more) << "Service is OK."; - log_v2::checks()->debug("Service is OK."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Service is OK."); /* reset the acknowledgement flag (this should already have been done, but * just in case...) */ @@ -1454,7 +1465,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { if (hst->get_current_state() != host::state_up) { engine_logger(dbg_checks, more) << "Host is NOT UP, so we'll check it to see if it recovered..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Host is NOT UP, so we'll check it to see if it recovered..."); /* 09/23/07 EG don't launch a new host check if we already did so earlier @@ -1463,7 +1475,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, more) << "First host check was already initiated, so we'll skip a " "new host check."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "First host check was already initiated, so we'll skip a " "new host check."); } else { @@ -1476,8 +1489,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { config->cached_host_check_horizon())) { engine_logger(dbg_checks, more) << "* Using cached host state: " << hst->get_current_state(); - log_v2::checks()->debug("* Using cached host state: {}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "* Using cached host state: {}", + hst->get_current_state()); update_check_stats(ACTIVE_ONDEMAND_HOST_CHECK_STATS, current_time); update_check_stats(ACTIVE_CACHED_HOST_CHECK_STATS, current_time); } @@ -1492,7 +1505,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* if a hard service recovery has occurred... */ if (hard_state_change) { engine_logger(dbg_checks, more) << "Service experienced a HARD RECOVERY."; - log_v2::checks()->debug("Service experienced a HARD RECOVERY."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service experienced a HARD RECOVERY."); /* set the state type macro */ set_state_type(hard); @@ -1521,7 +1535,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* else if a soft service recovery has occurred... */ else if (state_change) { engine_logger(dbg_checks, more) << "Service experienced a SOFT RECOVERY."; - log_v2::checks()->debug("Service experienced a SOFT RECOVERY."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service experienced a SOFT RECOVERY."); /* this is a soft recovery */ set_state_type(soft); @@ -1539,7 +1554,7 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* else no service state change has occurred... */ else { engine_logger(dbg_checks, more) << "Service did not change state."; - log_v2::checks()->debug("Service did not change state."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Service did not change state."); } /* Check if we need to send a recovery notification */ notify(reason_recovery, "", "", notification_option_none); @@ -1559,9 +1574,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { set_no_more_notifications(false); if (reschedule_check) - next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + next_service_check = (time_t)( + get_last_check() + check_interval() * config->interval_length()); } /*******************************************/ @@ -1571,16 +1585,16 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* hey, something's not working quite like it should... */ else { engine_logger(dbg_checks, more) << "Service is in a non-OK state!"; - log_v2::checks()->debug("Service is in a non-OK state!"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Service is in a non-OK state!"); /* check the route to the host if its up right now... */ if (hst->get_current_state() == host::state_up) { engine_logger(dbg_checks, more) << "Host is currently UP, so we'll recheck its state to " "make sure..."; - log_v2::checks()->debug( - "Host is currently UP, so we'll recheck its state to " - "make sure..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host is currently UP, so we'll recheck its state to " + "make sure..."); /* previous logic was to simply run a sync (serial) host check */ /* can we use the last cached host state? */ @@ -1593,8 +1607,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { route_result = hst->get_current_state(); engine_logger(dbg_checks, more) << "* Using cached host state: " << hst->get_current_state(); - log_v2::checks()->debug("* Using cached host state: {}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "* Using cached host state: {}", + hst->get_current_state()); update_check_stats(ACTIVE_ONDEMAND_HOST_CHECK_STATS, current_time); update_check_stats(ACTIVE_CACHED_HOST_CHECK_STATS, current_time); } @@ -1615,8 +1629,9 @@ int service::handle_async_check_result(check_result* queued_check_result) { route_result = hst->get_current_state(); engine_logger(dbg_checks, more) << "* Using last known host state: " << hst->get_current_state(); - log_v2::checks()->debug("* Using last known host state: {}", - hst->get_current_state()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "* Using last known host state: {}", + hst->get_current_state()); update_check_stats(ACTIVE_ONDEMAND_HOST_CHECK_STATS, current_time); update_check_stats(ACTIVE_CACHED_HOST_CHECK_STATS, current_time); } @@ -1626,7 +1641,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { */ else { engine_logger(dbg_checks, more) << "Host is currently DOWN/UNREACHABLE."; - log_v2::checks()->debug("Host is currently DOWN/UNREACHABLE."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host is currently DOWN/UNREACHABLE."); /* the service wobbled between non-OK states, so check the host... */ if ((state_change && !state_changes_use_cached_state) && @@ -1634,7 +1650,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, more) << "Service wobbled between non-OK states, so we'll recheck" " the host state..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Service wobbled between non-OK states, so we'll recheck" " the host state..."); /* previous logic was to simply run a sync (serial) host check */ @@ -1651,7 +1668,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { else { engine_logger(dbg_checks, more) << "Assuming host is in same state as before..."; - log_v2::checks()->debug("Assuming host is in same state as before..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Assuming host is in same state as before..."); /* if the host has never been checked before, set the checked flag and * last check time */ @@ -1677,7 +1695,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { if (route_result != host::state_up) { engine_logger(dbg_checks, most) << "Host is not UP, so we mark state changes if appropriate"; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Host is not UP, so we mark state changes if appropriate"); /* "fake" a hard state change for the service - well, its not really fake, @@ -1723,8 +1742,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, more) << "Current/Max Attempt(s): " << get_current_attempt() << '/' << max_check_attempts(); - log_v2::checks()->debug("Current/Max Attempt(s): {}/{}", - get_current_attempt(), max_check_attempts()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), "Current/Max Attempt(s): {}/{}", + get_current_attempt(), max_check_attempts()); /* if we should retry the service check, do so (except it the host is down * or unreachable!) */ @@ -1734,15 +1753,15 @@ int service::handle_async_check_result(check_result* queued_check_result) { if (route_result != host::state_up) { engine_logger(dbg_checks, more) << "Host isn't UP, so we won't retry the service check..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Host isn't UP, so we won't retry the service check..."); /* the host is not up, so reschedule the next service check at regular * interval */ if (reschedule_check) - next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + next_service_check = (time_t)( + get_last_check() + check_interval() * config->interval_length()); /* log the problem as a hard state if the host just went down */ if (hard_state_change) { @@ -1758,8 +1777,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { else { engine_logger(dbg_checks, more) << "Host is UP, so we'll retry the service check..."; - log_v2::checks()->debug( - "Host is UP, so we'll retry the service check..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Host is UP, so we'll retry the service check..."); /* this is a soft state */ set_state_type(soft); @@ -1772,9 +1791,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { handle_service_event(); if (reschedule_check) - next_service_check = - (time_t)(get_last_check() + - retry_interval() * config->interval_length()); + next_service_check = (time_t)( + get_last_check() + retry_interval() * config->interval_length()); } /* perform dependency checks on the second to last check of the service */ @@ -1783,15 +1801,15 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, more) << "Looking for services to check for predictive " "dependency checks..."; - log_v2::checks()->debug( - "Looking for services to check for predictive " - "dependency checks..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Looking for services to check for predictive " + "dependency checks..."); /* check services that THIS ONE depends on for notification AND * execution */ /* we do this because we might be sending out a notification soon and we * want the dependency logic to be accurate */ - std::pair id({_hostname, _description}); + std::pair id({_hostname, name()}); auto p(servicedependency::servicedependencies.equal_range(id)); for (servicedependency_mmap::const_iterator it{p.first}, end{p.second}; it != end; ++it) { @@ -1804,7 +1822,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { << "Predictive check of service '" << master_service->get_description() << "' on host '" << master_service->get_hostname() << "' queued."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Predictive check of service '{}' on host '{}' queued.", master_service->get_description(), master_service->get_hostname()); @@ -1820,7 +1839,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { engine_logger(dbg_checks, more) << "Service has reached max number of rechecks, so we'll " "handle the error..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Service has reached max number of rechecks, so we'll " "handle the error..."); @@ -1872,9 +1892,8 @@ int service::handle_async_check_result(check_result* queued_check_result) { /* reschedule the next check at the regular interval */ if (reschedule_check) - next_service_check = - (time_t)(get_last_check() + - check_interval() * config->interval_length()); + next_service_check = (time_t)( + get_last_check() + check_interval() * config->interval_length()); } /* should we obsessive over service checks? */ @@ -1887,8 +1906,9 @@ int service::handle_async_check_result(check_result* queued_check_result) { if (reschedule_check) { engine_logger(dbg_checks, more) << "Rescheduling next check of service at " << my_ctime(&next_service_check); - log_v2::checks()->debug("Rescheduling next check of service at {}", - my_ctime(&next_service_check)); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Rescheduling next check of service at {}", + my_ctime(&next_service_check)); /* default is to reschedule service check unless a test below fails... */ set_should_be_scheduled(true); @@ -1946,13 +1966,13 @@ int service::handle_async_check_result(check_result* queued_check_result) { } /* send data to event broker */ - broker_service_check( - NEBTYPE_SERVICECHECK_PROCESSED, NEBFLAG_NONE, NEBATTR_NONE, this, - get_check_type(), queued_check_result->get_start_time(), - queued_check_result->get_finish_time(), nullptr, get_latency(), - get_execution_time(), config->service_check_timeout(), - queued_check_result->get_early_timeout(), - queued_check_result->get_return_code(), nullptr, nullptr); + broker_service_check(NEBTYPE_SERVICECHECK_PROCESSED, NEBFLAG_NONE, + NEBATTR_NONE, this, get_check_type(), + queued_check_result.get_start_time(), + queued_check_result.get_finish_time(), get_latency(), + get_execution_time(), config->service_check_timeout(), + queued_check_result.get_early_timeout(), + queued_check_result.get_return_code(), nullptr, nullptr); if (!(reschedule_check && get_should_be_scheduled() && has_been_checked()) || !active_checks_enabled()) { @@ -2017,15 +2037,15 @@ int service::log_event() { log_options = tab_service_states[_current_state].first; state = tab_service_states[_current_state].second.c_str(); } - std::string const& state_type{tab_state_type[get_state_type()]}; + const std::string& state_type{tab_state_type[get_state_type()]}; engine_logger(log_options, basic) - << "SERVICE ALERT: " << _hostname << ";" << _description << ";" << state - << ";" << state_type << ";" << get_current_attempt() << ";" + << "SERVICE ALERT: " << _hostname << ";" << name() << ";" << state << ";" + << state_type << ";" << get_current_attempt() << ";" << get_plugin_output(); - log_v2::events()->info("SERVICE ALERT: {};{};{};{};{};{}", _hostname, - _description, state, state_type, get_current_attempt(), - get_plugin_output()); + SPDLOG_LOGGER_INFO(log_v2::events(), "SERVICE ALERT: {};{};{};{};{};{}", + _hostname, name(), state, state_type, + get_current_attempt(), get_plugin_output()); return OK; } @@ -2049,13 +2069,14 @@ void service::check_for_flapping(bool update, * change calculation */ engine_logger(dbg_functions, basic) << "check_for_flapping()"; - log_v2::functions()->trace("check_for_flapping()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_for_flapping()"); engine_logger(dbg_flapping, more) - << "Checking service '" << _description << "' on host '" << _hostname + << "Checking service '" << name() << "' on host '" << _hostname << "' for flapping..."; - log_v2::checks()->debug("Checking service '{}' on host '{}' for flapping...", - _description, _hostname); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Checking service '{}' on host '{}' for flapping...", + name(), _hostname); /* what threshold values should we use (global or service-specific)? */ low_threshold = (get_low_flap_threshold() <= 0.0) @@ -2125,9 +2146,10 @@ void service::check_for_flapping(bool update, << com::centreon::logging::setprecision(2) << "LFT=" << low_threshold << ", HFT=" << high_threshold << ", CPC=" << curved_percent_change << ", PSC=" << curved_percent_change << "%"; - log_v2::checks()->debug("LFT={:.2f}, HFT={:.2f}, CPC={:.2f}, PSC={:.2f}%", - low_threshold, high_threshold, curved_percent_change, - curved_percent_change); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "LFT={:.2f}, HFT={:.2f}, CPC={:.2f}, PSC={:.2f}%", + low_threshold, high_threshold, curved_percent_change, + curved_percent_change); /* don't do anything if we don't have flap detection enabled on a program-wide * basis */ @@ -2158,8 +2180,9 @@ void service::check_for_flapping(bool update, << com::centreon::logging::setprecision(2) << "Service " << (is_flapping ? "is" : "is not") << " flapping (" << curved_percent_change << "% state change)."; - log_v2::checks()->debug("Service {} flapping ({:.2f}% state change).", - is_flapping ? "is" : "is not", curved_percent_change); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service {} flapping ({:.2f}% state change).", + is_flapping ? "is" : "is not", curved_percent_change); /* did the service just start flapping? */ if (is_flapping && !get_is_flapping()) @@ -2176,7 +2199,7 @@ int service::handle_service_event() { nagios_macros* mac(get_global_macros()); engine_logger(dbg_functions, basic) << "handle_service_event()"; - log_v2::functions()->trace("handle_service_event()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "handle_service_event()"); /* send event data to broker */ broker_statechange_data(NEBTYPE_STATECHANGE_END, NEBFLAG_NONE, NEBATTR_NONE, @@ -2226,7 +2249,8 @@ int service::obsessive_compulsive_service_check_processor() { engine_logger(dbg_functions, basic) << "obsessive_compulsive_service_check_processor()"; - log_v2::functions()->trace("obsessive_compulsive_service_check_processor()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "obsessive_compulsive_service_check_processor()"); /* bail out if we shouldn't be obsessing */ if (config->obsess_over_services() == false) @@ -2258,10 +2282,10 @@ int service::obsessive_compulsive_service_check_processor() { << "Raw obsessive compulsive service processor " "command line: " << raw_command; - log_v2::checks()->debug( - "Raw obsessive compulsive service processor " - "command line: {}", - raw_command); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Raw obsessive compulsive service processor " + "command line: {}", + raw_command); /* process any macros in the raw command line */ process_macros_r(mac, raw_command, processed_command, macro_options); @@ -2273,10 +2297,10 @@ int service::obsessive_compulsive_service_check_processor() { engine_logger(dbg_checks, most) << "Processed obsessive compulsive service " "processor command line: " << processed_command; - log_v2::checks()->debug( - "Processed obsessive compulsive service " - "processor command line: {}", - processed_command); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Processed obsessive compulsive service " + "processor command line: {}", + processed_command); /* run the command */ try { @@ -2287,7 +2311,8 @@ int service::obsessive_compulsive_service_check_processor() { engine_logger(log_runtime_error, basic) << "Error: can't execute compulsive service processor command line '" << processed_command << "' : " << e.what(); - log_v2::runtime()->error( + SPDLOG_LOGGER_ERROR( + log_v2::runtime(), "Error: can't execute compulsive service processor command line '{}' : " "{}", processed_command, e.what()); @@ -2299,12 +2324,13 @@ int service::obsessive_compulsive_service_check_processor() { if (early_timeout == true) engine_logger(log_runtime_warning, basic) << "Warning: OCSP command '" << processed_command << "' for service '" - << _description << "' on host '" << _hostname << "' timed out after " + << name() << "' on host '" << _hostname << "' timed out after " << config->ocsp_timeout() << " seconds"; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: OCSP command '{}' for service '{}' on host '{}' timed out " "after {} seconds", - processed_command, _description, _hostname, config->ocsp_timeout()); + processed_command, name(), _hostname, config->ocsp_timeout()); return OK; } @@ -2333,15 +2359,16 @@ int service::run_scheduled_check(int check_options, double latency) { bool time_is_valid = true; engine_logger(dbg_functions, basic) << "run_scheduled_service_check()"; - log_v2::functions()->trace("run_scheduled_service_check()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "run_scheduled_service_check()"); engine_logger(dbg_checks, basic) - << "Attempting to run scheduled check of service '" << _description + << "Attempting to run scheduled check of service '" << name() << "' on host '" << _hostname << "': check options=" << check_options << ", latency=" << latency; - log_v2::checks()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Attempting to run scheduled check of service '{}' on host '{}': check " "options={}, latency={}", - _description, _hostname, check_options, latency); + name(), _hostname, check_options, latency); /* attempt to run the check */ result = run_async_check(check_options, latency, true, true, &time_is_valid, @@ -2351,8 +2378,8 @@ int service::run_scheduled_check(int check_options, double latency) { if (result == ERROR) { engine_logger(dbg_checks, more) << "Unable to run scheduled service check at this time"; - log_v2::checks()->debug( - "Unable to run scheduled service check at this time"); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Unable to run scheduled service check at this time"); /* only attempt to (re)schedule checks that should get checked... */ if (get_should_be_scheduled()) { @@ -2384,18 +2411,20 @@ int service::run_scheduled_check(int check_options, double latency) { next_valid_time, this->check_period_ptr)) { set_next_check((time_t)(next_valid_time + 60 * 60 * 24 * 7)); engine_logger(log_runtime_warning, basic) - << "Warning: Check of service '" << _description << "' on host '" + << "Warning: Check of service '" << name() << "' on host '" << _hostname << "' could not be " "rescheduled properly. Scheduling check for next week..."; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: Check of service '{}' on host '{}' could not be " "rescheduled properly. Scheduling check for next week...", - _description, _hostname); + name(), _hostname); engine_logger(dbg_checks, more) << "Unable to find any valid times to reschedule the next " "service check!"; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Unable to find any valid times to reschedule the next " "service check!"); } @@ -2406,8 +2435,9 @@ int service::run_scheduled_check(int check_options, double latency) { engine_logger(dbg_checks, more) << "Rescheduled next service check for " << my_ctime(&next_valid_time); - log_v2::checks()->debug("Rescheduled next service check for {}", - my_ctime(&next_valid_time)); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Rescheduled next service check for {}", + my_ctime(&next_valid_time)); } } } @@ -2444,30 +2474,32 @@ int service::run_async_check(int check_options, << "service::run_async_check, check_options=" << check_options << ", latency=" << latency << ", scheduled_check=" << scheduled_check << ", reschedule_check=" << reschedule_check; - log_v2::functions()->trace( - "service::run_async_check, check_options={}, latency={}, " - "scheduled_check={}, reschedule_check={}", - check_options, latency, scheduled_check, reschedule_check); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "service::run_async_check, check_options={}, latency={}, " + "scheduled_check={}, reschedule_check={}", + check_options, latency, scheduled_check, + reschedule_check); // Preamble. if (!get_check_command_ptr()) { engine_logger(log_runtime_error, basic) << "Error: Attempt to run active check on service '" - << get_description() << "' on host '" << get_host_ptr()->get_name() + << get_description() << "' on host '" << get_host_ptr()->name() << "' with no check command"; - log_v2::runtime()->error( + SPDLOG_LOGGER_ERROR( + log_v2::runtime(), "Error: Attempt to run active check on service '{}' on host '{}' with " "no check command", - get_description(), get_host_ptr()->get_name()); + get_description(), get_host_ptr()->name()); return ERROR; } engine_logger(dbg_checks, basic) << "** Running async check of service '" << get_description() << "' on host '" << get_hostname() << "'..."; - log_v2::checks()->trace( - "** Running async check of service '{} on host '{}'...", - get_description(), get_hostname()); + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "** Running async check of service '{} on host '{}'...", + get_description(), get_hostname()); // Check if the service is viable now. if (!verify_check_viability(check_options, time_is_valid, preferred_time)) @@ -2478,8 +2510,8 @@ int service::run_async_check(int check_options, timeval end_time = {0, 0}; int res = broker_service_check( NEBTYPE_SERVICECHECK_ASYNC_PRECHECK, NEBFLAG_NONE, NEBATTR_NONE, this, - checkable::check_active, start_time, end_time, check_command().c_str(), - get_latency(), 0.0, 0, false, 0, nullptr, nullptr); + checkable::check_active, start_time, end_time, get_latency(), 0.0, 0, + false, 0, nullptr, nullptr); // Service check was cancelled by NEB module. reschedule check later. if (NEBERROR_CALLBACKCANCEL == res) { @@ -2489,7 +2521,8 @@ int service::run_async_check(int check_options, engine_logger(log_runtime_error, basic) << "Error: Some broker module cancelled check of service '" << get_description() << "' on host '" << get_hostname(); - log_v2::runtime()->error( + SPDLOG_LOGGER_ERROR( + log_v2::runtime(), "Error: Some broker module cancelled check of service '{}' on host " "'{}'", get_description(), get_hostname()); @@ -2500,7 +2533,8 @@ int service::run_async_check(int check_options, engine_logger(dbg_functions, basic) << "Some broker module overrode check of service '" << get_description() << "' on host '" << get_hostname() << "' so we'll bail out"; - log_v2::functions()->trace( + SPDLOG_LOGGER_TRACE( + log_v2::functions(), "Some broker module overrode check of service '{}' on host '{}' so " "we'll bail out", get_description(), get_hostname()); @@ -2510,8 +2544,8 @@ int service::run_async_check(int check_options, // Checking starts. engine_logger(dbg_checks, basic) << "Checking service '" << get_description() << "' on host '" << get_hostname() << "'..."; - log_v2::checks()->trace("Checking service '{}' on host '{}'...", - get_description(), get_hostname()); + SPDLOG_LOGGER_TRACE(log_v2::checks(), "Checking service '{}' on host '{}'...", + get_description(), get_hostname()); // Clear check options. if (scheduled_check) @@ -2536,22 +2570,22 @@ int service::run_async_check(int check_options, ++currently_running_service_checks; engine_logger(dbg_checks, basic) << "Current running service checks: " << currently_running_service_checks; - log_v2::checks()->trace("Current running service checks: {}", - currently_running_service_checks); + SPDLOG_LOGGER_TRACE(log_v2::checks(), "Current running service checks: {}", + currently_running_service_checks); // Set the execution flag. set_is_executing(true); // Get command object. - commands::command* cmd = get_check_command_ptr(); + commands::command* cmd = get_check_command_ptr().get(); std::string processed_cmd(cmd->process_cmd(macros)); // Send event broker. - res = broker_service_check( - NEBTYPE_SERVICECHECK_INITIATE, NEBFLAG_NONE, NEBATTR_NONE, this, - checkable::check_active, start_time, end_time, check_command().c_str(), - get_latency(), 0.0, config->service_check_timeout(), false, 0, - processed_cmd.c_str(), nullptr); + res = broker_service_check(NEBTYPE_SERVICECHECK_INITIATE, NEBFLAG_NONE, + NEBATTR_NONE, this, checkable::check_active, + start_time, end_time, get_latency(), 0.0, + config->service_check_timeout(), false, 0, + processed_cmd.c_str(), nullptr); // Restore latency. set_latency(old_latency); @@ -2568,22 +2602,23 @@ int service::run_async_check(int check_options, start_time.tv_sec); bool retry; - std::unique_ptr check_result_info; + check_result::pointer check_result_info; do { // Init check result info. - check_result_info.reset( - new check_result(service_check, this, checkable::check_active, - check_options, reschedule_check, latency, start_time, - start_time, false, true, service::state_ok, "")); + check_result_info = std::make_shared( + service_check, this, checkable::check_active, check_options, + reschedule_check, latency, start_time, start_time, false, true, + service::state_ok, ""); retry = false; try { // Run command. uint64_t id = - cmd->run(processed_cmd, *macros, config->service_check_timeout()); - if (id != 0) - checks::checker::instance().add_check_result( - id, check_result_info.release()); + cmd->run(processed_cmd, *macros, config->service_check_timeout(), + check_result_info, this); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "run id={} {} for service {} host {}", id, + processed_cmd, _service_id, _hostname); } catch (com::centreon::exceptions::interruption const& e) { retry = true; } catch (std::exception const& e) { @@ -2597,13 +2632,13 @@ int service::run_async_check(int check_options, check_result_info->set_output("(Execute command failed)"); // Queue check result. - checks::checker::instance().add_check_result_to_reap( - check_result_info.release()); + checks::checker::instance().add_check_result_to_reap(check_result_info); engine_logger(log_runtime_warning, basic) << "Error: Service check command execution failed: " << e.what(); - log_v2::runtime()->warn( - "Error: Service check command execution failed: {}", e.what()); + SPDLOG_LOGGER_WARN(log_v2::runtime(), + "Error: Service check command execution failed: {}", + e.what()); } } while (retry); @@ -2623,27 +2658,29 @@ int service::run_async_check(int check_options, * no_update_status_now is true, if it should be sent. */ bool service::schedule_check(time_t check_time, - int options, + uint32_t options, bool no_update_status_now) { engine_logger(dbg_functions, basic) << "schedule_service_check()"; - log_v2::functions()->trace("schedule_service_check()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "schedule_service_check()"); engine_logger(dbg_checks, basic) << "Scheduling a " << (options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced") - << ", active check of service '" << _description << "' on host '" - << _hostname << "' @ " << my_ctime(&check_time); - log_v2::checks()->trace( + << ", active check of service '" << name() << "' on host '" << _hostname + << "' @ " << my_ctime(&check_time); + SPDLOG_LOGGER_TRACE( + log_v2::checks(), "Scheduling a {}, active check of service '{}' on host '{}' @ {}", - options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced", - _description, _hostname, my_ctime(&check_time)); + options & CHECK_OPTION_FORCE_EXECUTION ? "forced" : "non-forced", name(), + _hostname, my_ctime(&check_time)); // Don't schedule a check if active checks // of this service are disabled. if (!active_checks_enabled() && !(options & CHECK_OPTION_FORCE_EXECUTION)) { engine_logger(dbg_checks, basic) << "Active checks of this service are disabled."; - log_v2::checks()->trace("Active checks of this service are disabled."); + SPDLOG_LOGGER_TRACE(log_v2::checks(), + "Active checks of this service are disabled."); return false; } @@ -2658,7 +2695,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "Found another service check event for this service @ " << my_ctime(&temp_event->run_time); - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Found another service check event for this service @ {}", my_ctime(&temp_event->run_time)); @@ -2675,7 +2713,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New service check event is forced and occurs before the " "existing event, so the new event will be used instead."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New service check event is forced and occurs before the " "existing event, so the new event will be used instead."); } @@ -2688,7 +2727,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New service check event is forced, so it will be used " "instead of the existing event."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New service check event is forced, so it will be used " "instead of the existing event."); } @@ -2699,7 +2739,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New service check event occurs before the existing " "(older) event, so it will be used instead."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New service check event occurs before the existing " "(older) event, so it will be used instead."); } @@ -2708,7 +2749,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "New service check event occurs after the existing event, " "so we'll ignore it."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "New service check event occurs after the existing event, " "so we'll ignore it."); } @@ -2728,7 +2770,8 @@ bool service::schedule_check(time_t check_time, } engine_logger(dbg_checks, most) << "Scheduling new service check event."; - log_v2::checks()->debug("Scheduling new service check event."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Scheduling new service check event."); // Allocate memory for a new event item. try { @@ -2756,7 +2799,8 @@ bool service::schedule_check(time_t check_time, engine_logger(dbg_checks, most) << "Keeping original service check event (ignoring the new one)."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Keeping original service check event (ignoring the new one)."); } @@ -2771,24 +2815,25 @@ void service::set_flap(double percent_change, double low_threshold, int allow_flapstart_notification) { engine_logger(dbg_functions, basic) << "set_service_flap()"; - log_v2::functions()->trace("set_service_flap()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "set_service_flap()"); - engine_logger(dbg_flapping, more) - << "Service '" << _description << "' on host '" << _hostname - << "' started flapping!"; - log_v2::checks()->debug("Service '{}' on host '{}' started flapping!", - _description, _hostname); + engine_logger(dbg_flapping, more) << "Service '" << name() << "' on host '" + << _hostname << "' started flapping!"; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service '{}' on host '{}' started flapping!", name(), + _hostname); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_runtime_warning, basic) << com::centreon::logging::setprecision(1) - << "SERVICE FLAPPING ALERT: " << _hostname << ";" << _description + << "SERVICE FLAPPING ALERT: " << _hostname << ";" << name() << ";STARTED; Service appears to have started flapping (" << percent_change << "% change >= " << high_threshold << "% threshold)"; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "SERVICE FLAPPING ALERT: {};{};STARTED; Service appears to have started " "flapping ({:.1f}% change >= {:.1f}% threshold)", - _hostname, _description, percent_change, high_threshold); + _hostname, name(), percent_change, high_threshold); /* add a non-persistent comment to the service */ std::ostringstream oss; @@ -2829,24 +2874,25 @@ void service::clear_flap(double percent_change, double high_threshold, double low_threshold) { engine_logger(dbg_functions, basic) << "clear_service_flap()"; - log_v2::functions()->trace("clear_service_flap()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "clear_service_flap()"); - engine_logger(dbg_flapping, more) - << "Service '" << _description << "' on host '" << _hostname - << "' stopped flapping."; - log_v2::checks()->debug("Service '{}' on host '{}' stopped flapping.", - _description, _hostname); + engine_logger(dbg_flapping, more) << "Service '" << name() << "' on host '" + << _hostname << "' stopped flapping."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service '{}' on host '{}' stopped flapping.", name(), + _hostname); /* log a notice - this one is parsed by the history CGI */ engine_logger(log_info_message, basic) << com::centreon::logging::setprecision(1) - << "SERVICE FLAPPING ALERT: " << _hostname << ";" << _description + << "SERVICE FLAPPING ALERT: " << _hostname << ";" << name() << ";STOPPED; Service appears to have stopped flapping (" << percent_change << "% change < " << low_threshold << "% threshold)"; - log_v2::events()->info( + SPDLOG_LOGGER_INFO( + log_v2::events(), "SERVICE FLAPPING ALERT: {};{};STOPPED; Service appears to have stopped " "flapping ({:.1f}% change < {:.1f}% threshold)", - _hostname, _description, percent_change, low_threshold); + _hostname, name(), percent_change, low_threshold); /* delete the comment we added earlier */ if (this->get_flapping_comment_id() != 0) @@ -2873,14 +2919,14 @@ void service::enable_flap_detection() { unsigned long attr = MODATTR_FLAP_DETECTION_ENABLED; engine_logger(dbg_functions, basic) << "service::enable_flap_detection()"; - log_v2::functions()->trace("service::enable_flap_detection()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "service::enable_flap_detection()"); engine_logger(dbg_flapping, more) - << "Enabling flap detection for service '" << _description - << "' on host '" << _hostname << "'."; - log_v2::checks()->debug( - "Enabling flap detection for service '{}' on host '{}'.", _description, - _hostname); + << "Enabling flap detection for service '" << name() << "' on host '" + << _hostname << "'."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Enabling flap detection for service '{}' on host '{}'.", + name(), _hostname); /* nothing to do... */ if (flap_detection_enabled()) @@ -2911,14 +2957,14 @@ void service::disable_flap_detection() { unsigned long attr = MODATTR_FLAP_DETECTION_ENABLED; engine_logger(dbg_functions, basic) << "disable_service_flap_detection()"; - log_v2::functions()->trace("disable_service_flap_detection()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "disable_service_flap_detection()"); engine_logger(dbg_flapping, more) - << "Disabling flap detection for service '" << _description - << "' on host '" << _hostname << "'."; - log_v2::checks()->debug( - "Disabling flap detection for service '{}' on host '{}'.", _description, - _hostname); + << "Disabling flap detection for service '" << name() << "' on host '" + << _hostname << "'."; + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Disabling flap detection for service '{}' on host '{}'.", + name(), _hostname); /* nothing to do... */ if (!flap_detection_enabled()) @@ -2971,7 +3017,7 @@ bool service::verify_check_viability(int check_options, int check_interval = 0; engine_logger(dbg_functions, basic) << "check_service_check_viability()"; - log_v2::functions()->trace("check_service_check_viability()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_service_check_viability()"); /* get the check interval to use if we need to reschedule the check */ if (get_state_type() == soft && _current_state != service::state_ok) @@ -2996,7 +3042,8 @@ bool service::verify_check_viability(int check_options, engine_logger(dbg_checks, most) << "Active checks of the service are currently disabled."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Active checks of the service are currently disabled."); } @@ -3012,7 +3059,8 @@ bool service::verify_check_viability(int check_options, engine_logger(dbg_checks, most) << "This is not a valid time for this service to be actively " "checked."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "This is not a valid time for this service to be actively " "checked."); } @@ -3026,7 +3074,8 @@ bool service::verify_check_viability(int check_options, engine_logger(dbg_checks, most) << "Execution dependencies for this service failed, so it will " "not be actively checked."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Execution dependencies for this service failed, so it will " "not be actively checked."); } @@ -3048,8 +3097,8 @@ void service::grab_macros_r(nagios_macros* mac) { int service::notify_contact(nagios_macros* mac, contact* cntct, reason_type type, - std::string const& not_author, - std::string const& not_data, + const std::string& not_author, + const std::string& not_data, int options __attribute__((unused)), int escalated) { std::string raw_command; @@ -3062,7 +3111,7 @@ int service::notify_contact(nagios_macros* mac, int neb_result; engine_logger(dbg_functions, basic) << "notify_contact_of_service()"; - log_v2::functions()->trace("notify_contact_of_service()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "notify_contact_of_service()"); engine_logger(dbg_notifications, most) << "** Notifying contact '" << cntct->get_name() << "'"; log_v2::notifications()->info("** Notifying contact '{}'", cntct->get_name()); @@ -3094,15 +3143,15 @@ int service::notify_contact(nagios_macros* mac, neb_result = broker_contact_notification_method_data( NEBTYPE_CONTACTNOTIFICATIONMETHOD_START, NEBFLAG_NONE, NEBATTR_NONE, service_notification, type, method_start_time, method_end_time, - (void*)this, cntct, cmd->get_command_line().c_str(), not_author.c_str(), - not_data.c_str(), escalated, nullptr); + (void*)this, cntct, not_author.c_str(), not_data.c_str(), escalated, + nullptr); if (NEBERROR_CALLBACKCANCEL == neb_result) break; else if (NEBERROR_CALLBACKOVERRIDE == neb_result) continue; /* get the raw command line */ - get_raw_command_line_r(mac, cmd.get(), cmd->get_command_line().c_str(), + get_raw_command_line_r(mac, cmd, cmd->get_command_line().c_str(), raw_command, macro_options); if (raw_command.empty()) continue; @@ -3168,9 +3217,9 @@ int service::notify_contact(nagios_macros* mac, engine_logger(log_runtime_error, basic) << "Error: can't execute service notification '" << cntct->get_name() << "' : " << e.what(); - log_v2::runtime()->error( - "Error: can't execute service notification '{}' : {}", - cntct->get_name(), e.what()); + SPDLOG_LOGGER_ERROR(log_v2::runtime(), + "Error: can't execute service notification '{}' : {}", + cntct->get_name(), e.what()); } /* check to see if the notification command timed out */ @@ -3193,8 +3242,8 @@ int service::notify_contact(nagios_macros* mac, broker_contact_notification_method_data( NEBTYPE_CONTACTNOTIFICATIONMETHOD_END, NEBFLAG_NONE, NEBATTR_NONE, service_notification, type, method_start_time, method_end_time, - (void*)this, cntct, cmd->get_command_line().c_str(), not_author.c_str(), - not_data.c_str(), escalated, nullptr); + (void*)this, cntct, not_author.c_str(), not_data.c_str(), escalated, + nullptr); } /* get end time */ @@ -3232,7 +3281,8 @@ bool service::is_valid_escalation_for_notification(escalation const* e, engine_logger(dbg_functions, basic) << "service::is_valid_escalation_for_notification()"; - log_v2::functions()->trace("service::is_valid_escalation_for_notification()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "service::is_valid_escalation_for_notification()"); /* get the current time */ time(¤t_time); @@ -3304,8 +3354,9 @@ bool service::is_result_fresh(time_t current_time, int log_this) { engine_logger(dbg_checks, most) << "Checking freshness of service '" << this->get_description() << "' on host '" << this->get_hostname() << "'..."; - log_v2::checks()->debug("Checking freshness of service '{}' on host '{}'...", - this->get_description(), this->get_hostname()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Checking freshness of service '{}' on host '{}'...", + this->get_description(), this->get_hostname()); /* use user-supplied freshness threshold or auto-calculate a freshness * threshold to use? */ @@ -3324,8 +3375,9 @@ bool service::is_result_fresh(time_t current_time, int log_this) { engine_logger(dbg_checks, most) << "Freshness thresholds: service=" << this->get_freshness_threshold() << ", use=" << freshness_threshold; - log_v2::checks()->debug("Freshness thresholds: service={}, use={}", - this->get_freshness_threshold(), freshness_threshold); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Freshness thresholds: service={}, use={}", + this->get_freshness_threshold(), freshness_threshold); /* calculate expiration time */ /* CHANGED 11/10/05 EG - program start is only used in expiration time @@ -3345,9 +3397,9 @@ bool service::is_result_fresh(time_t current_time, int log_this) { * suggested by Altinity */ else if (this->active_checks_enabled() && event_start > get_last_check() && this->get_freshness_threshold() == 0) - expiration_time = (time_t)(event_start + freshness_threshold + - (config->max_service_check_spread() * - config->interval_length())); + expiration_time = (time_t)( + event_start + freshness_threshold + + (config->max_service_check_spread() * config->interval_length())); else expiration_time = (time_t)(get_last_check() + freshness_threshold); @@ -3355,9 +3407,10 @@ bool service::is_result_fresh(time_t current_time, int log_this) { << "HBC: " << this->has_been_checked() << ", PS: " << program_start << ", ES: " << event_start << ", LC: " << get_last_check() << ", CT: " << current_time << ", ET: " << expiration_time; - log_v2::checks()->debug("HBC: {}, PS: {}, ES: {}, LC: {}, CT: {}, ET: {}", - this->has_been_checked(), program_start, event_start, - get_last_check(), current_time, expiration_time); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "HBC: {}, PS: {}, ES: {}, LC: {}, CT: {}, ET: {}", + this->has_been_checked(), program_start, event_start, + get_last_check(), current_time, expiration_time); /* the results for the last check of this service are stale */ if (expiration_time < current_time) { @@ -3376,7 +3429,8 @@ bool service::is_result_fresh(time_t current_time, int log_this) { << "m " << tseconds << "s). I'm forcing an immediate check " "of the service."; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: The results of service '{}' on host '{}' are stale by {}d " "{}h {}m {}s (threshold={}d {}h {}m {}s). I'm forcing an immediate " "check " @@ -3392,7 +3446,8 @@ bool service::is_result_fresh(time_t current_time, int log_this) { << "m " << tseconds << "s). Forcing an immediate check of " "the service..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Check results for service '{}' on host '{}' are stale by {}d {}h {}m " "{}s (threshold={}d {}h {}m {}s). Forcing an immediate check of the " "service...", @@ -3405,9 +3460,9 @@ bool service::is_result_fresh(time_t current_time, int log_this) { engine_logger(dbg_checks, more) << "Check results for service '" << this->get_description() << "' on host '" << this->get_hostname() << "' are fresh."; - log_v2::checks()->debug( - "Check results for service '{}' on host '{}' are fresh.", - this->get_description(), this->get_hostname()); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Check results for service '{}' on host '{}' are fresh.", + this->get_description(), this->get_hostname()); return true; } @@ -3419,7 +3474,8 @@ bool service::is_result_fresh(time_t current_time, int log_this) { void service::handle_flap_detection_disabled() { engine_logger(dbg_functions, basic) << "handle_service_flap_detection_disabled()"; - log_v2::functions()->trace("handle_service_flap_detection_disabled()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "handle_service_flap_detection_disabled()"); /* if the service was flapping, remove the flapping indicator */ if (get_is_flapping()) { @@ -3483,10 +3539,11 @@ bool service::authorized_by_dependencies( dependency::types dependency_type) const { engine_logger(dbg_functions, basic) << "service::authorized_by_dependencies()"; - log_v2::functions()->trace("service::authorized_by_dependencies()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), + "service::authorized_by_dependencies()"); - auto p(servicedependency::servicedependencies.equal_range( - {_hostname, _description})); + auto p( + servicedependency::servicedependencies.equal_range({_hostname, name()})); for (servicedependency_mmap::const_iterator it{p.first}, end{p.second}; it != end; ++it) { servicedependency* dep{it->second.get()}; @@ -3539,7 +3596,7 @@ void service::check_for_orphaned() { time_t expected_time{0L}; engine_logger(dbg_functions, basic) << "check_for_orphaned_services()"; - log_v2::functions()->trace("check_for_orphaned_services()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_for_orphaned_services()"); /* get the current time */ time(¤t_time); @@ -3569,7 +3626,8 @@ void service::check_for_orphaned() { << "' looks like it was orphaned " "(results never came back). I'm scheduling an immediate check " "of the service..."; - log_v2::runtime()->warn( + SPDLOG_LOGGER_WARN( + log_v2::runtime(), "Warning: The check of service '{}' on host '{}' looks like it was " "orphaned " "(results never came back). I'm scheduling an immediate check " @@ -3579,7 +3637,8 @@ void service::check_for_orphaned() { engine_logger(dbg_checks, more) << "Service '" << it->first.second << "' on host '" << it->first.first << "' was orphaned, so we're scheduling an immediate check..."; - log_v2::checks()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::checks(), "Service '{}' on host '{}' was orphaned, so we're scheduling an " "immediate check...", it->first.second, it->first.first); @@ -3602,16 +3661,18 @@ void service::check_result_freshness() { time_t current_time{0L}; engine_logger(dbg_functions, basic) << "check_service_result_freshness()"; - log_v2::functions()->trace("check_service_result_freshness()"); + SPDLOG_LOGGER_TRACE(log_v2::functions(), "check_service_result_freshness()"); engine_logger(dbg_checks, more) << "Checking the freshness of service check results..."; - log_v2::checks()->debug("Checking the freshness of service check results..."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Checking the freshness of service check results..."); /* bail out if we're not supposed to be checking freshness */ if (!config->check_service_freshness()) { engine_logger(dbg_checks, more) << "Service freshness checking is disabled."; - log_v2::checks()->debug("Service freshness checking is disabled."); + SPDLOG_LOGGER_DEBUG(log_v2::checks(), + "Service freshness checking is disabled."); return; } /* get the current time */ @@ -3667,7 +3728,7 @@ void service::check_result_freshness() { } } -std::string const& service::get_current_state_as_string() const { +const std::string& service::get_current_state_as_string() const { return tab_service_states[get_current_state()].second; } @@ -3703,12 +3764,12 @@ void service::resolve(int& w, int& e) { notifier::resolve(warnings, errors); } catch (std::exception const& e) { engine_logger(log_verification_error, basic) - << "Error: Service description '" << _description << "' of host '" + << "Error: Service description '" << name() << "' of host '" << _hostname << "' has problem in its notifier part: " << e.what(); log_v2::config()->error( "Error: Service description '{}' of host '{}' has problem in its " "notifier part: {}", - _description, _hostname, e.what()); + name(), _hostname, e.what()); } { @@ -3722,10 +3783,10 @@ void service::resolve(int& w, int& e) { << "Error: Host '" << _hostname << "' specified in service " "'" - << _description << "' not defined anywhere!"; + << name() << "' not defined anywhere!"; log_v2::config()->error( "Error: Host '{}' specified in service '{}' not defined anywhere!", - _hostname, _description); + _hostname, name()); errors++; set_host_ptr(nullptr); } else { @@ -3735,7 +3796,7 @@ void service::resolve(int& w, int& e) { /* add a reverse link from the host to the service for faster lookups * later */ - it->second->services.insert({{_hostname, _description}, this}); + it->second->services.insert({{_hostname, name()}, this}); // Notify event broker. timeval tv(get_broker_timestamp(NULL)); @@ -3748,7 +3809,7 @@ void service::resolve(int& w, int& e) { if (get_notifications_enabled() && get_notify_on(notifier::ok) && !get_notify_on(notifier::warning) && !get_notify_on(notifier::critical)) { engine_logger(log_verification_error, basic) - << "Warning: Recovery notification option in service '" << _description + << "Warning: Recovery notification option in service '" << name() << "' for host '" << _hostname << "' doesn't make any sense - specify warning and /or critical " "options as well"; @@ -3756,7 +3817,7 @@ void service::resolve(int& w, int& e) { "Warning: Recovery notification option in service '{}' for host '{}' " "doesn't make any sense - specify warning and /or critical " "options as well", - _description, _hostname); + name(), _hostname); warnings++; } @@ -3764,7 +3825,7 @@ void service::resolve(int& w, int& e) { if (get_notifications_enabled() && get_notification_interval() && get_notification_interval() < check_interval()) { engine_logger(log_verification_error, basic) - << "Warning: Service '" << _description << "' on host '" << _hostname + << "Warning: Service '" << name() << "' on host '" << _hostname << "' has a notification interval less than " "its check interval! Notifications are only re-sent after " "checks are made, so the effective notification interval will " @@ -3775,20 +3836,20 @@ void service::resolve(int& w, int& e) { "its check interval! Notifications are only re-sent after " "checks are made, so the effective notification interval will " "be that of the check interval.", - _description, _hostname); + name(), _hostname); warnings++; } /* check for illegal characters in service description */ - if (contains_illegal_object_chars(_description.c_str())) { + if (contains_illegal_object_chars(name().c_str())) { engine_logger(log_verification_error, basic) - << "Error: The description string for service '" << _description + << "Error: The description string for service '" << name() << "' on host '" << _hostname << "' contains one or more illegal characters."; log_v2::config()->error( "Error: The description string for service '{}' on host '{}' contains " "one or more illegal characters.", - _description, _hostname); + name(), _hostname); errors++; } @@ -3796,7 +3857,7 @@ void service::resolve(int& w, int& e) { e += errors; if (errors) - throw engine_error() << "Cannot resolve service '" << _description + throw engine_error() << "Cannot resolve service '" << name() << "' of host '" << _hostname << "'"; } diff --git a/engine/src/utils.cc b/engine/src/utils.cc index 2c4dead5d2e..1751ae319da 100644 --- a/engine/src/utils.cc +++ b/engine/src/utils.cc @@ -76,7 +76,7 @@ int my_system_r(nagios_macros* mac, } engine_logger(dbg_commands, more) << "Running command '" << cmd << "'..."; - log_v2::commands()->debug("Running command '{}'...", cmd); + SPDLOG_LOGGER_DEBUG(log_v2::commands(), "Running command '{}'...", cmd); timeval start_time = timeval(); timeval end_time = timeval(); @@ -109,7 +109,8 @@ int my_system_r(nagios_macros* mac, << "Execution time=" << *exectime << " sec, early timeout=" << *early_timeout << ", result=" << result << ", output=" << output; - log_v2::commands()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::commands(), "Execution time={:.3f} sec, early timeout={}, result={}, output={}", *exectime, *early_timeout, result, output); @@ -156,8 +157,8 @@ int get_raw_command_line_r(nagios_macros* mac, engine_logger(dbg_commands | dbg_checks | dbg_macros, most) << "Raw Command Input: " << cmd_ptr->get_command_line(); - log_v2::commands()->debug("Raw Command Input: {}", - cmd_ptr->get_command_line()); + SPDLOG_LOGGER_DEBUG(log_v2::commands(), "Raw Command Input: {}", + cmd_ptr->get_command_line()); /* get the full command line */ full_command = cmd_ptr->get_command_line(); @@ -209,7 +210,8 @@ int get_raw_command_line_r(nagios_macros* mac, engine_logger(dbg_commands | dbg_checks | dbg_macros, most) << "Expanded Command Output: " << full_command; - log_v2::commands()->debug("Expanded Command Output: {}", full_command); + SPDLOG_LOGGER_DEBUG(log_v2::commands(), "Expanded Command Output: {}", + full_command); return OK; } diff --git a/engine/src/xpddefault.cc b/engine/src/xpddefault.cc index 7828227ed11..52ff2ab4f66 100644 --- a/engine/src/xpddefault.cc +++ b/engine/src/xpddefault.cc @@ -426,12 +426,12 @@ int xpddefault_run_host_performance_data_command(nagios_macros* mac, if (early_timeout == true) engine_logger(log_runtime_warning, basic) << "Warning: Host performance data command '" << processed_command_line - << "' for host '" << hst->get_name() << "' timed out after " + << "' for host '" << hst->name() << "' timed out after " << config->perfdata_timeout() << " seconds"; log_v2::runtime()->warn( "Warning: Host performance data command '{}' for host '{}' timed out " "after {} seconds", - processed_command_line, hst->get_name(), config->perfdata_timeout()); + processed_command_line, hst->name(), config->perfdata_timeout()); return result; } diff --git a/engine/src/xsddefault.cc b/engine/src/xsddefault.cc index c46ad533a0c..661324d08a1 100644 --- a/engine/src/xsddefault.cc +++ b/engine/src/xsddefault.cc @@ -293,7 +293,7 @@ int xsddefault_save_status_data() { stream << "hoststatus {\n" "\thost_name=" - << it->second->get_name() + << it->second->name() << "\n" "\tmodified_attributes=" << it->second->get_modified_attributes() diff --git a/engine/tests/CMakeLists.txt b/engine/tests/CMakeLists.txt index bc796da2afa..4022af51bcf 100755 --- a/engine/tests/CMakeLists.txt +++ b/engine/tests/CMakeLists.txt @@ -1,48 +1,48 @@ -## -## Copyright 2016, 2020-2021 Centreon -## - -## This file is part of Centreon Engine. -## -## Centreon Engine is free software : you can redistribute it and / or -## modify it under the terms of the GNU General Public License version 2 -## as published by the Free Software Foundation. -## -## Centreon Engine is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -## General Public License for more details. -## -## You should have received a copy of the GNU General Public License -## along with Centreon Engine. If not, see -## . -## +# # +# # Copyright 2016, 2020-2021 Centreon +# # -# Enable unit tests or not . -if (WITH_TESTING) +# # This file is part of Centreon Engine. +# # +# # Centreon Engine is free software : you can redistribute it and / or +# # modify it under the terms of the GNU General Public License version 2 +# # as published by the Free Software Foundation. +# # +# # Centreon Engine is distributed in the hope that it will be useful, +# # but WITHOUT ANY WARRANTY; without even the implied warranty of +# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# # General Public License for more details. +# # +# # You should have received a copy of the GNU General Public License +# # along with Centreon Engine. If not, see +# # . +# # -# Tests directory. +# Enable unit tests or not . +if(WITH_TESTING) + # Tests directory. # Add root inclusion direction. set(MODULE_DIR "${PROJECT_SOURCE_DIR}/modules/external_commands") set(INC_DIR "${MODULE_DIR}/inc/com/centreon/engine/modules/external_commands") include_directories(${PROJECT_SOURCE_DIR} ${MODULE_DIR}/inc) -#Set directory. + # Set directory. set(TESTS_DIR "${PROJECT_SOURCE_DIR}/tests") include_directories(${PROJECT_SOURCE_DIR}/enginerpc) add_definitions(-DENGINERPC_TESTS_PATH="${TESTS_DIR}/enginerpc") add_executable(rpc_client_engine - ${TESTS_DIR}/enginerpc/client.cc) + ${TESTS_DIR}/enginerpc/client.cc) target_link_libraries(rpc_client_engine cerpc CONAN_PKG::grpc CONAN_PKG::openssl CONAN_PKG::zlib dl pthread) add_executable(bin_connector_test_run - "${TESTS_DIR}/commands/bin_connector_test_run.cc") + "${TESTS_DIR}/commands/bin_connector_test_run.cc") target_link_libraries(bin_connector_test_run cce_core pthread) - target_precompile_headers(bin_connector_test_run PRIVATE ../precomp_inc/precomp.hh) + target_precompile_headers(bin_connector_test_run REUSE_FROM cce_core) set(ut_sources + # Sources. "${TESTS_DIR}/parse-check-output.cc" "${TESTS_DIR}/checks/service_check.cc" @@ -114,7 +114,8 @@ if (WITH_TESTING) "${TESTS_DIR}/timeperiod/get_next_valid_time/skip_interval.cc" "${TESTS_DIR}/timeperiod/get_next_valid_time/specific_month_date.cc" "${TESTS_DIR}/timeperiod/utils.cc" -# # Headers. + + # # Headers. "${TESTS_DIR}/test_engine.hh" "${TESTS_DIR}/timeperiod/utils.hh" ) @@ -123,32 +124,32 @@ if (WITH_TESTING) include_directories(${TESTS_DIR}) add_executable(ut_engine ${ut_sources}) - target_precompile_headers(ut_engine PRIVATE ../precomp_inc/precomp.hh) + target_precompile_headers(ut_engine REUSE_FROM cce_core) set_target_properties( - ut_engine rpc_client_engine bin_connector_test_run - PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${CMAKE_BINARY_DIR}/tests - RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests + ut_engine rpc_client_engine bin_connector_test_run + PROPERTIES + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests ) - #file used by timeperiod-test.cc - file(COPY ${TESTS_DIR}/configuration/timeperiods.cfg DESTINATION ${CMAKE_BINARY_DIR}/tests/ ) + # file used by timeperiod-test.cc + file(COPY ${TESTS_DIR}/configuration/timeperiods.cfg DESTINATION ${CMAKE_BINARY_DIR}/tests/) add_test(NAME tests COMMAND ut_engine) - if (WITH_COVERAGE) + if(WITH_COVERAGE) set(COVERAGE_EXCLUDES '${PROJECT_BINARY_DIR}/*' '${PROJECT_SOURCE_DIR}/tests/*' '/usr/include/* ') SETUP_TARGET_FOR_COVERAGE( - NAME engine-test-coverage - EXECUTABLE ut_engine - DEPENDENCIES ut_engine + NAME engine-test-coverage + EXECUTABLE ut_engine + DEPENDENCIES ut_engine ) set(GCOV gcov) - endif () - target_link_libraries(ut_engine ${ENGINERPC} cce_core pthread ${GCOV} CONAN_PKG::gtest CONAN_PKG::grpc CONAN_PKG::openssl CONAN_PKG::zlib CONAN_PKG::fmt dl) + endif() -endif () + target_link_libraries(ut_engine ${ENGINERPC} cce_core pthread ${GCOV} CONAN_PKG::gtest CONAN_PKG::grpc CONAN_PKG::openssl CONAN_PKG::zlib CONAN_PKG::fmt dl) +endif() diff --git a/engine/tests/checks/anomalydetection.cc b/engine/tests/checks/anomalydetection.cc index 3e01b451cd9..afd92032180 100644 --- a/engine/tests/checks/anomalydetection.cc +++ b/engine/tests/checks/anomalydetection.cc @@ -37,6 +37,7 @@ #include "com/centreon/engine/configuration/host.hh" #include "com/centreon/engine/configuration/service.hh" #include "com/centreon/engine/exceptions/error.hh" +#include "com/centreon/engine/log_v2.hh" #include "helper.hh" using namespace com::centreon; @@ -49,6 +50,9 @@ class AnomalydetectionCheck : public TestEngine { void SetUp() override { init_config_state(); + log_v2::checks()->set_level(spdlog::level::trace); + log_v2::commands()->set_level(spdlog::level::trace); + configuration::applier::contact ct_aply; configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); @@ -115,23 +119,27 @@ class AnomalydetectionCheck : public TestEngine { std::shared_ptr _ad; }; +// clang-format off + /* The following test comes from this array (inherited from Nagios behaviour): * - * | Time | Check # | State | State type | State change | - * ------------------------------------------------------ - * | 0 | 1 | OK | HARD | No | - * | 1 | 1 | CRTCL | SOFT | Yes | - * | 2 | 2 | CRTCL | SOFT | Yes | - * | 3 | 3 | CRTCL | HARD | Yes | - * | 4 | 3 | CRTCL | HARD | Yes | - * | 5 | 3 | CRTCL | HARD | No | - * | 6 | 1 | OK | HARD | Yes | - * | 7 | 1 | OK | HARD | No | - * | 8 | 1 | CRTCL | SOFT | Yes | - * | 9 | 2 | OK | SOFT | Yes | - * | 10 | 1 | OK | HARD | No | - * ------------------------------------------------------ + * | Time | Check # | State | State type | State change | perf data in range | ad State | ad state type | ad do check + * -------------------------------------------------------------------------------------------------------------------- + * | 0 | 1 | OK | HARD | No | Y | OK | H | N + * | 1 | 1 | CRTCL | SOFT | Yes | Y | OK | H | N + * | 2 | 2 | CRTCL | SOFT | No | Y | OK | H | N + * | 3 | 3 | CRTCL | HARD | Yes | Y | OK | H | N + * | 4 | 3 | OK | HARD | Yes | Y | OK | H | N + * | 5 | 3 | OK | HARD | No | N | CRTCL | S | Y + * | 6 | 1 | OK | HARD | No | N | CRTCL | S | Y + * | 7 | 1 | OK | HARD | No | N | CRTCL | H | Y + * | 8 | 1 | OK | HARD | No | Y | OK | H | Y + * | 9 | 1 | OK | HARD | No | Y | OK | H | N + * -------------------------------------------------------------------------------------------------------------------- */ + +// clang-format on + TEST_F(AnomalydetectionCheck, StatusChanges) { CreateFile( "/tmp/thresholds_status_change.json", @@ -158,15 +166,17 @@ TEST_F(AnomalydetectionCheck, StatusChanges) { _ad->set_current_state(engine::service::state_ok); _ad->set_last_hard_state(engine::service::state_ok); _ad->set_last_hard_state_change(50000); + _ad->set_last_state_change(50000); _ad->set_state_type(checkable::hard); _ad->set_current_attempt(1); _ad->set_last_check(50000); + // --- 1 ---- set_time(50500); time_t now = std::time(nullptr); std::string cmd(fmt::format( "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " - "critical| metric=90;25;60", + "critical| metric=80;25;60", now)); process_external_command(cmd.c_str()); checks::checker::instance().reap(); @@ -175,7 +185,7 @@ TEST_F(AnomalydetectionCheck, StatusChanges) { ASSERT_EQ(_svc->get_last_state_change(), now); ASSERT_EQ(_svc->get_current_attempt(), 1); ASSERT_EQ(_svc->get_plugin_output(), "service critical"); - ASSERT_EQ(_svc->get_perf_data(), "metric=90;25;60"); + ASSERT_EQ(_svc->get_perf_data(), "metric=80;25;60"); int check_options = 0; int latency = 0; bool time_is_valid; @@ -183,181 +193,189 @@ TEST_F(AnomalydetectionCheck, StatusChanges) { _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::soft); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); - ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); ASSERT_EQ(_ad->get_current_attempt(), 1); - ASSERT_EQ(_ad->get_plugin_output(), - "NON-OK: Unusual activity, the actual value of metric is 90.00 " - "which is outside the forecasting range [73.31 : 83.26]"); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); ASSERT_EQ(_ad->get_perf_data(), - "metric=90 metric_lower_thresholds=73.31 " + "metric=80 metric_lower_thresholds=73.31 " "metric_upper_thresholds=83.26"); + // --- 2 ---- set_time(51000); now = std::time(nullptr); cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning| " - "metric=50;25;60", + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " + "critical| " + "metric=80;25;60", now); process_external_command(cmd.c_str()); checks::checker::instance().reap(); ASSERT_EQ(_svc->get_state_type(), checkable::soft); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); - ASSERT_EQ(_svc->get_last_state_change(), now); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_svc->get_last_state_change(), 50500); ASSERT_EQ(_svc->get_current_attempt(), 2); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::soft); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); - ASSERT_EQ(_ad->get_plugin_output(), - "NON-OK: Unusual activity, the actual value of metric is 50.00 " - "which is outside the forecasting range [72.62 : 82.52]"); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); ASSERT_EQ(_ad->get_perf_data(), - "metric=50 metric_lower_thresholds=72.62 " + "metric=80 metric_lower_thresholds=72.62 " "metric_upper_thresholds=82.52"); - ASSERT_EQ(_ad->get_current_attempt(), 2); - set_time(51500); + // --- 3 ---- + set_time(51250); now = std::time(nullptr); - time_t previous = now; cmd = fmt::format( "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;2;service " "critical| " - "metric=110foo;25;60", + "metric=80;25;60", now); process_external_command(cmd.c_str()); checks::checker::instance().reap(); ASSERT_EQ(_svc->get_state_type(), checkable::hard); ASSERT_EQ(_svc->get_current_state(), engine::service::state_critical); - ASSERT_EQ(_svc->get_last_hard_state_change(), now); + ASSERT_EQ(_svc->get_last_state_change(), 50500); ASSERT_EQ(_svc->get_current_attempt(), 3); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); - ASSERT_EQ(_ad->get_last_hard_state_change(), now); ASSERT_EQ(_ad->get_state_type(), checkable::hard); - ASSERT_EQ(_ad->get_plugin_output(), - "NON-OK: Unusual activity, the actual value of metric is 110.00foo " - "which is outside the forecasting range [71.93foo : 81.78foo]"); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00"); ASSERT_EQ(_ad->get_perf_data(), - "metric=110foo metric_lower_thresholds=71.93foo " - "metric_upper_thresholds=81.78foo"); - ASSERT_EQ(_ad->get_current_attempt(), 3); + "metric=80 metric_lower_thresholds=72.28 " + "metric_upper_thresholds=82.15"); + // --- 4 ---- set_time(52000); now = std::time(nullptr); + time_t previous = now; cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning| " - "metric=30%;25;60", + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service " + "ok| " + "metric=80foo;25;60", now); process_external_command(cmd.c_str()); checks::checker::instance().reap(); ASSERT_EQ(_svc->get_state_type(), checkable::hard); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); ASSERT_EQ(_svc->get_last_hard_state_change(), now); - ASSERT_EQ(_svc->get_current_attempt(), 3); + ASSERT_EQ(_svc->get_current_attempt(), 1); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); ASSERT_EQ(_ad->get_state_type(), checkable::hard); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); - ASSERT_EQ(_ad->get_last_hard_state_change(), previous); - ASSERT_EQ(_ad->get_plugin_output(), - "NON-OK: Unusual activity, the actual value of metric is 30.00% " - "which is outside the forecasting range [71.24% : 81.04%]"); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=80.00foo"); ASSERT_EQ(_ad->get_perf_data(), - "metric=30% metric_lower_thresholds=71.24% " - "metric_upper_thresholds=81.04%"); - ASSERT_EQ(_ad->get_current_attempt(), 3); + "metric=80foo metric_lower_thresholds=71.24foo " + "metric_upper_thresholds=81.04foo"); + // --- 5 ---- set_time(52500); - previous = now; now = std::time(nullptr); cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;1;service warning| " - "metric=35%;25;60", + "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " + "metric=30%;25;60", now); process_external_command(cmd.c_str()); checks::checker::instance().reap(); ASSERT_EQ(_svc->get_state_type(), checkable::hard); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_warning); - ASSERT_EQ(_svc->get_last_hard_state_change(), previous); - ASSERT_EQ(_svc->get_current_attempt(), 3); - ASSERT_EQ(_svc->get_plugin_output(), "service warning"); - ASSERT_EQ(_svc->get_perf_data(), "metric=35%;25;60"); + ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); + ASSERT_EQ(_svc->get_last_state_change(), 52000); + ASSERT_EQ(_svc->get_current_attempt(), 1); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), now); ASSERT_EQ(_ad->get_plugin_output(), - "NON-OK: Unusual activity, the actual value of metric is 35.00% " + "NON-OK: Unusual activity, the actual value of metric is 30.00% " "which is outside the forecasting range [70.55% : 80.30%]"); ASSERT_EQ(_ad->get_perf_data(), - "metric=35% metric_lower_thresholds=70.55% " + "metric=30% metric_lower_thresholds=70.55% " "metric_upper_thresholds=80.30%"); - ASSERT_EQ(_ad->get_current_attempt(), 3); + ASSERT_EQ(_ad->get_current_attempt(), 1); + // --- 6 ---- set_time(53000); previous = now; now = std::time(nullptr); - cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " - "metric=70%;80;90", - now); - process_external_command(cmd.c_str()); - checks::checker::instance().reap(); - ASSERT_EQ(_svc->get_state_type(), checkable::hard); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_svc->get_last_hard_state_change(), now); - ASSERT_EQ(_svc->get_current_attempt(), 1); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); + checks::checker::instance().wait_completion(); checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::hard); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_ad->get_last_hard_state_change(), now); - ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=70.00%"); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_state_change(), previous); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.86 : 79.56]"); ASSERT_EQ(_ad->get_perf_data(), - "metric=70% metric_lower_thresholds=69.86% " - "metric_upper_thresholds=79.56%"); - ASSERT_EQ(_ad->get_current_attempt(), 1); + "metric=12 metric_lower_thresholds=69.86 " + "metric_upper_thresholds=79.56"); + ASSERT_EQ(_ad->get_current_attempt(), 2); + // --- 7 ---- set_time(53500); previous = now; now = std::time(nullptr); - cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " - "metric=71%;80;90", - now); - process_external_command(cmd.c_str()); + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion(); checks::checker::instance().reap(); - ASSERT_EQ(_svc->get_state_type(), checkable::hard); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_svc->get_last_hard_state_change(), previous); - ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_state_type(), checkable::hard); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), 52500); + ASSERT_EQ(_ad->get_plugin_output(), + "NON-OK: Unusual activity, the actual value of metric is 12.00 " + "which is outside the forecasting range [69.17 : 78.82]"); + ASSERT_EQ(_ad->get_perf_data(), + "metric=12 metric_lower_thresholds=69.17 " + "metric_upper_thresholds=78.82"); + ASSERT_EQ(_ad->get_current_attempt(), 3); + + // --- 8 ---- + set_time(54000); + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + previous = now; + now = std::time(nullptr); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); + checks::checker::instance().wait_completion(); checks::checker::instance().reap(); ASSERT_EQ(_ad->get_state_type(), checkable::hard); ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_ad->get_last_hard_state_change(), previous); - ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=71.00%"); + ASSERT_EQ(_ad->get_last_hard_state_change(), now); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=70.00%"); ASSERT_EQ(_ad->get_perf_data(), - "metric=71% metric_lower_thresholds=69.17% " - "metric_upper_thresholds=78.82%"); + "metric=70% metric_lower_thresholds=68.48% " + "metric_upper_thresholds=78.08%"); ASSERT_EQ(_ad->get_current_attempt(), 1); - set_time(54000); + // --- 9 ---- + set_time(54500); previous = now; now = std::time(nullptr); @@ -368,70 +386,21 @@ TEST_F(AnomalydetectionCheck, StatusChanges) { checks::checker::instance().reap(); ASSERT_EQ(_svc->get_state_type(), checkable::soft); ASSERT_EQ(_svc->get_current_state(), engine::service::state_unknown); - ASSERT_EQ(_svc->get_last_hard_state_change(), now - 1000); + ASSERT_EQ(_svc->get_last_hard_state_change(), 52000); ASSERT_EQ(_svc->get_last_state_change(), now); ASSERT_EQ(_svc->get_current_attempt(), 1); + ASSERT_EQ(_svc->get_plugin_output(), "service unknown"); _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); checks::checker::instance().reap(); ASSERT_EQ(_ad->get_state_type(), checkable::soft); ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); - ASSERT_EQ(_ad->get_last_hard_state_change(), now - 1000); + ASSERT_EQ(_ad->get_last_hard_state_change(), 54000); + ASSERT_EQ(_svc->get_last_state_change(), now); ASSERT_EQ(_ad->get_plugin_output(), "UNKNOWN: Unknown activity, metric did not return any values"); ASSERT_EQ(_ad->get_current_attempt(), 1); - set_time(54500); - - previous = now; - now = std::time(nullptr); - cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " - "metric=72%;80;90", - now); - process_external_command(cmd.c_str()); - checks::checker::instance().reap(); - ASSERT_EQ(_svc->get_state_type(), checkable::soft); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_svc->get_last_state_change(), now); - ASSERT_EQ(_svc->get_current_attempt(), 2); - _ad->run_async_check(check_options, latency, true, true, &time_is_valid, - &preferred_time); - checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::soft); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_ad->get_last_state_change(), now); - ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=72.00%"); - ASSERT_EQ(_ad->get_perf_data(), - "metric=72% metric_lower_thresholds=67.79% " - "metric_upper_thresholds=77.34%"); - ASSERT_EQ(_ad->get_current_attempt(), 2); - - set_time(55000); - - previous = now; - now = std::time(nullptr); - cmd = fmt::format( - "[{}] PROCESS_SERVICE_CHECK_RESULT;test_host;test_svc;0;service ok| " - "metric=71.7%;80;90;10;100", - now); - process_external_command(cmd.c_str()); - checks::checker::instance().reap(); - ASSERT_EQ(_svc->get_state_type(), checkable::hard); - ASSERT_EQ(_svc->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_svc->get_last_hard_state_change(), now); - ASSERT_EQ(_svc->get_current_attempt(), 1); - _ad->run_async_check(check_options, latency, true, true, &time_is_valid, - &preferred_time); - checks::checker::instance().reap(); - ASSERT_EQ(_ad->get_state_type(), checkable::hard); - ASSERT_EQ(_ad->get_current_state(), engine::service::state_ok); - ASSERT_EQ(_ad->get_last_hard_state_change(), now); - ASSERT_EQ(_ad->get_plugin_output(), "OK: Regular activity, metric=71.70%"); - ASSERT_EQ(_ad->get_perf_data(), - "metric=71.7%;;;10;100 metric_lower_thresholds=67.10%;;;10;100 " - "metric_upper_thresholds=76.60%;;;10;100"); - ASSERT_EQ(_ad->get_current_attempt(), 1); ::unlink("/tmp/thresholds_status_change.json"); } @@ -486,6 +455,7 @@ TEST_F(AnomalydetectionCheck, MetricWithQuotes) { time_t preferred_time; _ad->run_async_check(check_options, latency, true, true, &time_is_valid, &preferred_time); + checks::checker::instance().wait_completion(); checks::checker::instance().reap(); ASSERT_EQ(_ad->get_state_type(), checkable::soft); ASSERT_EQ(_ad->get_current_state(), engine::service::state_critical); @@ -497,4 +467,130 @@ TEST_F(AnomalydetectionCheck, MetricWithQuotes) { ASSERT_EQ(_ad->get_perf_data(), "'metric'=90MT;;;0;100 metric_lower_thresholds=73.31MT;;;0;100 " "metric_upper_thresholds=83.26MT;;;0;100"); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +TEST_F(AnomalydetectionCheck, BadThresholdsFile) { + ::unlink("/tmp/thresholds_status_change.json"); + set_time(50000); + std::time_t now{std::time(nullptr)}; + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(50000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(50000); + _svc->set_perf_data("metric=90MT;25;60;0;100"); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(50000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(50000); + + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), now); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is not viable for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=90MT;25;60;0;100"); + + set_time(51000); + now = std::time(nullptr); + // _ad is not OK so _ad will do the check + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion(); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 50000); + ASSERT_EQ(_ad->get_current_attempt(), 2); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is not viable for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=70%;50;75"); + + ::unlink("/tmp/thresholds_status_change.json"); +} + +TEST_F(AnomalydetectionCheck, FileTooOld) { + CreateFile( + "/tmp/thresholds_status_change.json", + "[{\n \"host_id\": \"12\",\n \"service_id\": \"9\",\n \"metric_name\": " + "\"metric\",\n \"predict\": [{\n \"timestamp\": 50000,\n \"upper\": " + "84,\n \"lower\": 74,\n \"fit\": 79\n }, {\n \"timestamp\": 100000,\n " + "\"upper\": 10,\n \"lower\": 5,\n \"fit\": 51.5\n }, {\n \"timestamp\": " + "150000,\n \"upper\": 100,\n \"lower\": 93,\n \"fit\": 96.5\n }, {\n " + "\"timestamp\": 200000,\n \"upper\": 100,\n \"lower\": 97,\n \"fit\": " + "98.5\n }, {\n \"timestamp\": 250000,\n \"upper\": 100,\n \"lower\": " + "21,\n \"fit\": 60.5\n }\n]}]"); + _ad->init_thresholds(); + _ad->set_status_change(true); + + set_time(300000); + _svc->set_current_state(engine::service::state_ok); + _svc->set_last_hard_state(engine::service::state_ok); + _svc->set_last_hard_state_change(300000); + _svc->set_state_type(checkable::hard); + _svc->set_accept_passive_checks(true); + _svc->set_current_attempt(1); + _svc->set_last_check(300000); + _svc->set_perf_data("metric=90MT;25;60;0;100"); + + _ad->set_current_state(engine::service::state_ok); + _ad->set_last_hard_state(engine::service::state_ok); + _ad->set_last_hard_state_change(300000); + _ad->set_state_type(checkable::hard); + _ad->set_current_attempt(1); + _ad->set_last_check(300000); + + int check_options = 0; + int latency = 0; + bool time_is_valid; + time_t preferred_time; + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 300000); + ASSERT_EQ(_ad->get_current_attempt(), 1); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is too old compared to the check timestamp " + "300000 for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=90MT;25;60;0;100"); + + set_time(301000); + // _ad is not OK so _ad will do the check + _ad->get_check_command_ptr()->set_command_line( + "echo 'output| metric=70%;50;75'"); + + _ad->run_async_check(check_options, latency, true, true, &time_is_valid, + &preferred_time); + checks::checker::instance().wait_completion(); + checks::checker::instance().reap(); + ASSERT_EQ(_ad->get_state_type(), checkable::soft); + ASSERT_EQ(_ad->get_current_state(), engine::service::state_unknown); + ASSERT_EQ(_ad->get_last_state_change(), 300000); + ASSERT_EQ(_ad->get_current_attempt(), 2); + ASSERT_EQ(_ad->get_plugin_output(), + "The thresholds file is too old compared to the check timestamp " + "301000 for metric metric"); + ASSERT_EQ(_ad->get_perf_data(), "metric=70%;50;75"); + + ::unlink("/tmp/thresholds_status_change.json"); } diff --git a/engine/tests/commands/connector.cc b/engine/tests/commands/connector.cc index 506a4028cfc..a82e0f399f2 100644 --- a/engine/tests/commands/connector.cc +++ b/engine/tests/commands/connector.cc @@ -105,7 +105,7 @@ TEST_F(Connector, RunConnectorAsync) { nagios_macros macros = nagios_macros(); connector cmd_connector("RunConnectorAsync", "tests/bin_connector_test_run"); cmd_connector.set_listener(lstnr.get()); - cmd_connector.run("commande", macros, 1); + cmd_connector.run("commande", macros, 1, std::make_shared()); int timeout = 0; int max_timeout{15}; @@ -123,10 +123,11 @@ TEST_F(Connector, RunWithConnectorSwitchedOff) { connector cmd_connector("RunWithConnectorSwitchedOff", "tests/bin_connector_test_run"); { - std::unique_ptr lstnr(new my_listener); + std::unique_ptr lstnr(std::make_unique()); nagios_macros macros = nagios_macros(); cmd_connector.set_listener(lstnr.get()); - cmd_connector.run("commande --kill=1", macros, 1); + cmd_connector.run("commande --kill=1", macros, 1, + std::make_shared()); int timeout = 0; int max_timeout{15}; @@ -146,7 +147,7 @@ TEST_F(Connector, RunConnectorSetCommandLine) { nagios_macros macros = nagios_macros(); connector cmd_connector("SetCommandLine", "tests/bin_connector_test_run"); cmd_connector.set_listener(&lstnr); - cmd_connector.run("commande1", macros, 1); + cmd_connector.run("commande1", macros, 1, std::make_shared()); int timeout = 0; int max_timeout{15}; @@ -161,7 +162,7 @@ TEST_F(Connector, RunConnectorSetCommandLine) { lstnr.clear(); cmd_connector.set_command_line("tests/bin_connector_test_run"); - cmd_connector.run("commande2", macros, 1); + cmd_connector.run("commande2", macros, 1, std::make_shared()); timeout = 0; max_timeout = 15; diff --git a/engine/tests/commands/simple-command.cc b/engine/tests/commands/simple-command.cc index 3657ba82117..a33306efd60 100755 --- a/engine/tests/commands/simple-command.cc +++ b/engine/tests/commands/simple-command.cc @@ -19,6 +19,7 @@ #include #include +#include "com/centreon/engine/log_v2.hh" #include "../timeperiod/utils.hh" #include "com/centreon/engine/commands/raw.hh" @@ -28,11 +29,17 @@ using namespace com::centreon; using namespace com::centreon::engine; using namespace com::centreon::engine::commands; +void CreateFile(std::string const& filename, std::string const& content) { + std::ofstream oss(filename); + oss << content; +} + class SimpleCommand : public ::testing::Test { public: void SetUp() override { set_time(-1); init_config_state(); + config->interval_length(1); } void TearDown() override { deinit_config_state(); } @@ -40,7 +47,7 @@ class SimpleCommand : public ::testing::Test { class my_listener : public commands::command_listener { public: - result const& get_result() const { + result& get_result() { std::lock_guard guard(_mutex); return _res; } @@ -107,7 +114,7 @@ TEST_F(SimpleCommand, NewCommandAsync) { nagios_macros* mac(get_global_macros()); std::string cc(cmd->process_cmd(mac)); ASSERT_EQ(cc, "/bin/echo bonjour"); - cmd->run(cc, *mac, 2); + cmd->run(cc, *mac, 2, std::make_shared()); int timeout{0}; int max_timeout{3000}; while (timeout < max_timeout && lstnr->get_result().output == "") { @@ -130,7 +137,7 @@ TEST_F(SimpleCommand, LongCommandAsync) { // We force the time to be coherent with now because the function gettimeofday // that is not simulated. set_time(std::time(nullptr)); - cmd->run(cc, *mac, 2); + cmd->run(cc, *mac, 2, std::make_shared()); int timeout{0}; int max_timeout{15}; while (timeout < max_timeout && lstnr->get_result().output == "") { @@ -140,3 +147,89 @@ TEST_F(SimpleCommand, LongCommandAsync) { } ASSERT_EQ(lstnr->get_result().output, "(Process Timeout)"); } + +TEST_F(SimpleCommand, TooRecentDoubleCommand) { + log_v2::commands()->set_level(spdlog::level::trace); + CreateFile("/tmp/TooRecentDoubleCommand.sh", + "echo -n tutu | tee -a /tmp/TooRecentDoubleCommand;"); + + const char* path = "/tmp/TooRecentDoubleCommand"; + ::unlink(path); + std::unique_ptr lstnr(std::make_unique()); + std::unique_ptr cmd{std::make_unique( + "test", "/bin/sh /tmp/TooRecentDoubleCommand.sh")}; + cmd->set_listener(lstnr.get()); + const void* caller[] = {nullptr, path}; + cmd->add_caller_group(caller, caller + 2); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + time_t now = 10000; + set_time(now); + cmd->run(cc, *mac, 2, std::make_shared(), caller[0]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + struct stat file_stat; + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); + ++now; + cmd->run(cc, *mac, 2, std::make_shared(), caller[1]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); +} + +TEST_F(SimpleCommand, SufficientOldDoubleCommand) { + log_v2::commands()->set_level(spdlog::level::trace); + CreateFile("/tmp/TooRecentDoubleCommand.sh", + "echo -n tutu | tee -a /tmp/TooRecentDoubleCommand;"); + + const char* path = "/tmp/TooRecentDoubleCommand"; + ::unlink(path); + std::unique_ptr lstnr(new my_listener); + std::unique_ptr cmd{ + new commands::raw("test", "/bin/sh /tmp/TooRecentDoubleCommand.sh")}; + cmd->set_listener(lstnr.get()); + const void* caller[] = {nullptr, path}; + cmd->add_caller_group(caller, caller + 2); + nagios_macros* mac(get_global_macros()); + std::string cc(cmd->process_cmd(mac)); + time_t now = 10000; + set_time(now); + cmd->run(cc, *mac, 2, std::make_shared(), caller[0]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + std::this_thread::sleep_for(std::chrono::seconds(1)); + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + struct stat file_stat; + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 4); + now += 10; + set_time(now); + lstnr->get_result().output = ""; + cmd->run(cc, *mac, 2, std::make_shared(), caller[1]); + for (int wait_ind = 0; wait_ind != 50 && lstnr->get_result().output == ""; + ++wait_ind) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + } + ASSERT_EQ(lstnr->get_result().exit_code, 0); + ASSERT_EQ(lstnr->get_result().exit_status, process::status::normal); + ASSERT_EQ(lstnr->get_result().output, "tutu"); + ASSERT_EQ(stat(path, &file_stat), 0); + ASSERT_EQ(file_stat.st_size, 8); +} diff --git a/engine/tests/configuration/applier/applier-global.cc b/engine/tests/configuration/applier/applier-global.cc index 2cd91df0d37..8df5466a0c8 100644 --- a/engine/tests/configuration/applier/applier-global.cc +++ b/engine/tests/configuration/applier/applier-global.cc @@ -88,3 +88,39 @@ TEST_F(ApplierGlobal, RpcPort) { ASSERT_EQ(st.rpc_port(), 42u); } + +TEST_F(ApplierGlobal, RpcListenAddress) { + configuration::parser parser; + configuration::state st; + + ASSERT_EQ(st.rpc_port(), 0u); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "rpc_listen_address=10.11.12.13" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", st); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.rpc_listen_address(), "10.11.12.13"); +} + +TEST_F(ApplierGlobal, NotDefinedRpcListenAddress) { + configuration::parser parser; + configuration::state st; + + ASSERT_EQ(st.rpc_port(), 0u); + + std::remove("/tmp/test-config.cfg"); + + std::ofstream ofs("/tmp/test-config.cfg"); + ofs << "rpc_port=42" << std::endl; + ofs.close(); + + parser.parse("/tmp/test-config.cfg", st); + std::remove("/tmp/test-config.cfg"); + + ASSERT_EQ(st.rpc_listen_address(), "localhost"); +} diff --git a/engine/tests/configuration/applier/applier-host.cc b/engine/tests/configuration/applier/applier-host.cc index 0712cbe56ae..c42ac5ab766 100644 --- a/engine/tests/configuration/applier/applier-host.cc +++ b/engine/tests/configuration/applier/applier-host.cc @@ -67,14 +67,14 @@ TEST_F(ApplierHost, HostRenamed) { host_map const& hm(engine::host::hosts); ASSERT_EQ(hm.size(), 1u); std::shared_ptr h1(hm.begin()->second); - ASSERT_TRUE(h1->get_name() == "test_host"); + ASSERT_TRUE(h1->name() == "test_host"); ASSERT_TRUE(hst.parse("host_name", "test_host1")); hst_aply.modify_object(hst); ASSERT_EQ(hm.size(), 1u); h1 = hm.begin()->second; - ASSERT_TRUE(h1->get_name() == "test_host1"); - ASSERT_EQ(get_host_id(h1->get_name()), 12u); + ASSERT_TRUE(h1->name() == "test_host1"); + ASSERT_EQ(get_host_id(h1->name()), 12u); } TEST_F(ApplierHost, HostRemoved) { @@ -87,7 +87,7 @@ TEST_F(ApplierHost, HostRemoved) { host_map const& hm(engine::host::hosts); ASSERT_EQ(hm.size(), 1u); std::shared_ptr h1(hm.begin()->second); - ASSERT_TRUE(h1->get_name() == "test_host"); + ASSERT_TRUE(h1->name() == "test_host"); ASSERT_TRUE(hst.parse("host_name", "test_host1")); hst_aply.remove_object(hst); @@ -97,8 +97,8 @@ TEST_F(ApplierHost, HostRemoved) { hst_aply.add_object(hst); h1 = hm.begin()->second; ASSERT_EQ(hm.size(), 1u); - ASSERT_TRUE(h1->get_name() == "test_host1"); - ASSERT_EQ(get_host_id(h1->get_name()), 12u); + ASSERT_TRUE(h1->name() == "test_host1"); + ASSERT_EQ(get_host_id(h1->name()), 12u); } TEST_F(ApplierHost, HostParentChildUnreachable) { diff --git a/engine/tests/helper.cc b/engine/tests/helper.cc index e0a1723ff15..fb1133a5e48 100644 --- a/engine/tests/helper.cc +++ b/engine/tests/helper.cc @@ -22,6 +22,7 @@ #include #include #include +#include "com/centreon/engine/log_v2.hh" using namespace com::centreon::engine; @@ -31,9 +32,14 @@ void init_config_state(void) { if (config == nullptr) config = new configuration::state; + config->log_file_line(true); + config->log_file(""); + // Hack to instanciate the logger. - configuration::applier::logging::instance(); - checks::checker::init(); + configuration::applier::logging::instance().apply(*config); + log_v2::instance().apply(*config); + + checks::checker::init(true); } void deinit_config_state(void) { diff --git a/engine/tests/macros/macro_hostname.cc b/engine/tests/macros/macro_hostname.cc index 18448704909..becb875df86 100644 --- a/engine/tests/macros/macro_hostname.cc +++ b/engine/tests/macros/macro_hostname.cc @@ -121,8 +121,8 @@ TEST_F(MacroHostname, HostProblemId) { CHECK_OPTION_NONE, 0, true, 0); } - process_macros_r(mac, fmt::format("$HOSTPROBLEMID:{}$", hst->get_name()), - out, 0); + process_macros_r(mac, fmt::format("$HOSTPROBLEMID:{}$", hst->name()), out, + 0); ASSERT_EQ(out, firstcheck); for (int i = 0; i < 2; i++) { @@ -1582,4 +1582,4 @@ TEST_F(MacroHostname, HostTimeZone) { host::hosts["test_host"]->set_has_been_checked(true); process_macros_r(mac, "$HOSTTIMEZONE:test_host$", out, 0); ASSERT_EQ(out, "test_timezone"); -} \ No newline at end of file +} diff --git a/engine/tests/notifications/host_normal_notification.cc b/engine/tests/notifications/host_normal_notification.cc index d5b25eced7c..8768de6f6cc 100644 --- a/engine/tests/notifications/host_normal_notification.cc +++ b/engine/tests/notifications/host_normal_notification.cc @@ -38,6 +38,7 @@ #include "com/centreon/engine/configuration/state.hh" #include "com/centreon/engine/downtimes/downtime_manager.hh" #include "com/centreon/engine/exceptions/error.hh" +#include "com/centreon/engine/log_v2.hh" #include "com/centreon/engine/retention/dump.hh" #include "com/centreon/engine/timezone_manager.hh" @@ -53,6 +54,8 @@ class HostNotification : public TestEngine { void SetUp() override { init_config_state(); + log_v2::events()->set_level(spdlog::level::off); + configuration::applier::contact ct_aply; configuration::contact ctct{new_configuration_contact("admin", true)}; ct_aply.add_object(ctct); diff --git a/packaging/rpm/centreon-collect.spec b/packaging/rpm/centreon-collect.spec index 59b6cb5be2c..e4d1863c0cd 100644 --- a/packaging/rpm/centreon-collect.spec +++ b/packaging/rpm/centreon-collect.spec @@ -226,6 +226,17 @@ Group: Applications/Communications Include files needed to develop a module Centreon Broker. +%package -n centreon-collect-client +Summary: Centreon Collect gRPC Client. It can be used to exchange with cbd or centengine +Group: Applications/Communications +Requires: centreon-broker-core = %{version}-%{release} +Requires: centreon-engine = %{version}-%{release} + +%description -n centreon-collect-client +This software is a gRPC client designed to easily send commands to cbd or +centengine. + + %prep %setup -q -n %{name}-%{version} @@ -452,6 +463,10 @@ fi %doc broker/LICENSE %{_includedir}/centreon-broker +%files -n centreon-collect-client +%defattr(-,root,root,-) +%{_bindir}/ccc + %files %{_exec_prefix}/lib/systemd/system/cbd.service %{_exec_prefix}/lib/systemd/system/centengine.service diff --git a/sonar-project.properties b/sonar-project.properties index 854c01872a6..c9cbd7b2d7f 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -13,7 +13,7 @@ sonar.exclusions=broker/rrd/test/**,broker/lua/test/**,broker/bam/test/**,broker broker/script/test/**,broker/simu/test/**,broker/sql/test/**,broker/stats/test/**, \ broker/storage/test/**,broker/tcp/test/**,broker/test/**,broker/tls/test/**, \ broker/tls2/test/**,broker/unified_sql/test/**,broker/watchdog/test/**, \ - clib/test/**,engine/tests/** + clib/test/**,engine/tests/**,connectors/ssh/test/**,connectors/perl/test/** sonar.tests=tests, grpc diff --git a/tests/.gitignore b/tests/.gitignore deleted file mode 100644 index 896a6884310..00000000000 --- a/tests/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -### -apps/ -build/* - -### node ### -node_modules/ - -### vscode ### -.vscode/ - -### temp build files ### -logs/* - -workspace.* - -# ingore local env files -.env.local - -src/config/centreon-engine/*.cfg -src/config/centreon-engine/config*/* -tags -.ctags - -*.pem diff --git a/tests/README.md b/tests/README.md index e6cc0804547..365ba26f02a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -55,11 +55,27 @@ Here is the list of the currently implemented tests: ### Bam - [x] **BEBAMIDT1**: A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. The downtime is removed from the service, the inherited downtime is then deleted. - [x] **BEBAMIDT2**: A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. Engine is restarted. Broker is restarted. The two downtimes are still there with no duplicates. The downtime is removed from the service, the inherited downtime is then deleted. +- [x] **BEBAMIGNDT1**: A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. +- [x] **BEBAMIGNDT2**: A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. The first downtime reaches its end, the BA is still OK, but when the second downtime reaches its end, the BA should be CRITICAL. ### Broker +- [x] **BLDIS1**: Start broker with core logs 'disabled' +- [x] **BCL1**: Starting broker with option '-s foobar' should return an error +- [x] **BCL2**: Starting broker with option '-s5' should work +- [x] **BCL3**: Starting broker with options '-D' should work and activate diagnose mode +- [x] **BCL4**: Starting broker with options '-s2' and '-D' should work. - [x] **BFC1**: Start broker with invalid filters but one filter ok - [x] **BFC2**: Start broker with only invalid filters on an output -- [x] **BLDIS1**: Start broker with core logs 'disabled' +- [x] **BGRPCSS1**: Start-Stop two instances of broker configured with grpc stream and no coredump +- [x] **BGRPCSS2**: Start/Stop 10 times broker configured with grpc stream with 300ms interval and no coredump +- [x] **BGRPCSS3**: Start-Stop one instance of broker configured with grpc stream and no coredump +- [x] **BGRPCSS4**: Start/Stop 10 times broker configured with grpc stream with 1sec interval and no coredump +- [x] **BGRPCSS5**: Start-Stop with reversed connection on grpc acceptor with only one instance and no deadlock +- [x] **BGRPCSSU1**: Start-Stop with unified_sql two instances of broker with grpc stream and no coredump +- [x] **BGRPCSSU2**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 300ms interval and no coredump +- [x] **BGRPCSSU3**: Start-Stop with unified_sql one instance of broker configured with grpc and no coredump +- [x] **BGRPCSSU4**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 1sec interval and no coredump +- [x] **BGRPCSSU5**: Start-Stop with unified_sql with reversed connection on grpc acceptor with only one instance and no deadlock - [x] **BSS1**: Start-Stop two instances of broker and no coredump - [x] **BSS2**: Start/Stop 10 times broker with 300ms interval and no coredump - [x] **BSS3**: Start-Stop one instance of broker and no coredump @@ -90,18 +106,6 @@ Here is the list of the currently implemented tests: - [x] **BDBU7**: Access denied when database user password is wrong for unified sql - [x] **BDBU10**: Connection should be established when user password is good for unified sql - [x] **BDBMU1**: start broker/engine with unified sql and then start MariaDB => connection is established -- [x] **BGRPCSS1**: Start-Stop two instances of broker configured with grpc stream and no coredump -- [x] **BGRPCSS2**: Start/Stop 10 times broker configured with grpc stream with 300ms interval and no coredump -- [x] **BGRPCSS3**: Start-Stop one instance of broker configured with grpc stream and no coredump -- [x] **BGRPCSS4**: Start/Stop 10 times broker configured with grpc stream with 1sec interval and no coredump -- [x] **BGRPCSS5**: Start-Stop with reversed connection on grpc acceptor with only one instance and no deadlock -- [x] **BGRPCSSU1**: Start-Stop with unified_sql two instances of broker with grpc stream and no coredump -- [x] **BGRPCSSU2**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 300ms interval and no coredump -- [x] **BGRPCSSU3**: Start-Stop with unified_sql one instance of broker configured with grpc and no coredump -- [x] **BGRPCSSU4**: Start/Stop with unified_sql 10 times broker configured with grpc stream with 1sec interval and no coredump -- [x] **BGRPCSSU5**: Start-Stop with unified_sql with reversed connection on grpc acceptor with only one instance and no deadlock -- [x] **BCL1**: Starting broker with option '-s foobar' should return an error -- [x] **BCL2**: Starting broker with option '-s 5' should work ### Broker/database - [x] **NetworkDbFail1**: network failure test between broker and database (shutting down connection for 100ms) @@ -115,18 +119,15 @@ Here is the list of the currently implemented tests: - [x] **NetworkDBFailU7**: network failure test between broker and database: we wait for the connection to be established and then we shut down the connection for 60s (with unified_sql) ### Broker/engine +- [x] **BRRDDMDB1**: RRD metrics deletion from metric ids with a query in centreon_storage. +- [x] **BRRDDIDDB1**: RRD metrics deletion from index ids with a query in centreon_storage. +- [x] **BRRDRBDB1**: RRD metric rebuild with a query in centreon_storage and unified sql +- [x] **BRRDRBUDB1**: RRD metric rebuild with a query in centreon_storage and unified sql - [x] **BEPBBEE1**: central-module configured with bbdo_version 3.0 but not others. Unable to establish connection. - [x] **BEPBBEE2**: bbdo_version 3 not compatible with sql/storage - [x] **BEPBBEE3**: bbdo_version 3 generates new bbdo protobuf service status messages. - [x] **BEPBBEE4**: bbdo_version 3 generates new bbdo protobuf host status messages. - [x] **BEPBBEE5**: bbdo_version 3 generates new bbdo protobuf service messages. -- [x] **BECC1**: Broker/Engine communication with compression between central and poller -- [x] **EBNHG1**: New host group with several pollers and connections to DB -- [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup -- [x] **EBNHGU4**: New host group with several pollers and connections to DB with broker and rename this hostgroup - [x] **LOGV2EB1**: log-v2 enabled old log disabled check broker sink - [x] **LOGV2DB1**: log-v2 disabled old log enabled check broker sink - [x] **LOGV2DB2**: log-v2 disabled old log disabled check broker sink @@ -137,11 +138,15 @@ Here is the list of the currently implemented tests: - [x] **LOGV2EF2**: log-v2 enabled old log enabled check logfile sink - [x] **LOGV2BE2**: log-v2 enabled old log enabled check broker sink is equal - [x] **LOGV2FE2**: log-v2 enabled old log enabled check logfile sink -- [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -- [x] **BEHS1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) -- [x] **BRGC1**: Broker good reverse connection -- [x] **BRCTS1**: Broker reverse connection too slow -- [x] **BRCS1**: Broker reverse connection stopped +- [x] **BERD1**: Starting/stopping Broker does not create duplicated events. +- [x] **BERD2**: Starting/stopping Engine does not create duplicated events. +- [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases +- [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql +- [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases +- [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql +- [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +- [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 +- [x] **ENRSCHE1**: check next check of reschedule is last_check+interval_check - [x] **BRRDDM1**: RRD metrics deletion from metric ids. - [x] **BRRDDID1**: RRD metrics deletion from index ids. - [x] **BRRDDMID1**: RRD deletion of non existing metrics and indexes @@ -150,21 +155,7 @@ Here is the list of the currently implemented tests: - [x] **BRRDDMIDU1**: RRD deletion of non existing metrics and indexes - [x] **BRRDRM1**: RRD metric rebuild with gRPC API and unified sql - [x] **BRRDRMU1**: RRD metric rebuild with gRPC API and unified sql -- [x] **ENRSCHE1**: check next check of reschedule is last_check+interval_check -- [x] **EBNSG1**: New service group with several pollers and connections to DB -- [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql -- [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql - [x] **EBNSVC1**: New services with several pollers -- [x] **BERD1**: Starting/stopping Broker does not create duplicated events. -- [x] **BERD2**: Starting/stopping Engine does not create duplicated events. -- [x] **BERDUC1**: Starting/stopping Broker does not create duplicated events in usual cases -- [x] **BERDUCU1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql -- [x] **BERDUC2**: Starting/stopping Engine does not create duplicated events in usual cases -- [x] **BERDUCU2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql -- [x] **BERDUC3U1**: Starting/stopping Broker does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -- [x] **BERDUC3U2**: Starting/stopping Engine does not create duplicated events in usual cases with unified_sql and BBDO 3.0 -- [x] **BEDTMASS1**: New services with several pollers -- [x] **BEDTMASS2**: New services with several pollers - [x] **BESS1**: Start-Stop Broker/Engine - Broker started first - Broker stopped first - [x] **BESS2**: Start-Stop Broker/Engine - Broker started first - Engine stopped first - [x] **BESS3**: Start-Stop Broker/Engine - Engine started first - Engine stopped first @@ -176,6 +167,22 @@ Here is the list of the currently implemented tests: - [x] **BESS_GRPC4**: Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first - [x] **BESS_GRPC5**: Start-Stop grpc version Broker/engine - Engine debug level is set to all, it should not hang - [x] **BESS_GRPC_COMPRESS1**: Start-Stop grpc version Broker/Engine - Broker started first - Broker stopped first compression activated +- [x] **BESS_CRYPTED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +- [x] **BESS_CRYPTED_GRPC2**: Start-Stop grpc version Broker/Engine only server crypted +- [x] **BESS_CRYPTED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +- [x] **BESS_CRYPTED_REVERSED_GRPC1**: Start-Stop grpc version Broker/Engine - well configured +- [x] **BESS_CRYPTED_REVERSED_GRPC2**: Start-Stop grpc version Broker/Engine only engine server crypted +- [x] **BESS_CRYPTED_REVERSED_GRPC3**: Start-Stop grpc version Broker/Engine only engine crypted +- [x] **BEDTMASS1**: New services with several pollers +- [x] **BEDTMASS2**: New services with several pollers +- [x] **EBSNU1**: New services with notes_url with more than 2000 characters +- [x] **EBSAU2**: New services with action_url with more than 2000 characters +- [x] **EBSN3**: New services with notes with more than 500 characters +- [x] **BERES1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) +- [x] **BEHS1**: store_in_resources is enabled and store_in_hosts_services is not. Only writes into resources should be done (except hosts/services events that continue to be written in hosts/services tables) +- [x] **EBNSG1**: New service group with several pollers and connections to DB +- [x] **EBNSGU1**: New service group with several pollers and connections to DB with broker configured with unified_sql +- [x] **EBNSGU2**: New service group with several pollers and connections to DB with broker configured with unified_sql - [x] **BETAG1**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. - [x] **BETAG2**: Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. - [x] **BEUTAG1**: Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. @@ -198,9 +205,16 @@ Here is the list of the currently implemented tests: - [x] **BECT_GRPC2**: Broker/Engine communication with TLS between central and poller with key/cert - [x] **BECT_GRPC3**: Broker/Engine communication with anonymous TLS and ca certificate - [x] **BECT_GRPC4**: Broker/Engine communication with TLS between central and poller with key/cert and hostname forced -- [x] **EBSNU1**: New services with notes_url with more than 2000 characters -- [x] **EBSAU2**: New services with action_url with more than 2000 characters -- [x] **EBSN3**: New services with notes with more than 500 characters +- [x] **BECC1**: Broker/Engine communication with compression between central and poller +- [x] **EBNHG1**: New host group with several pollers and connections to DB +- [x] **EBNHGU1**: New host group with several pollers and connections to DB with broker configured with unified_sql +- [x] **EBNHGU2**: New host group with several pollers and connections to DB with broker configured with unified_sql +- [x] **EBNHGU3**: New host group with several pollers and connections to DB with broker configured with unified_sql +- [x] **EBNHG4**: New host group with several pollers and connections to DB with broker and rename this hostgroup +- [x] **EBNHGU4**: New host group with several pollers and connections to DB with broker and rename this hostgroup +- [x] **BRGC1**: Broker good reverse connection +- [x] **BRCTS1**: Broker reverse connection too slow +- [x] **BRCS1**: Broker reverse connection stopped - [x] **BEEXTCMD1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 - [x] **BEEXTCMD2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 - [x] **BEEXTCMD3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 @@ -247,10 +261,27 @@ Here is the list of the currently implemented tests: - [x] **BEEXTCMD_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc - [x] **BEEXTCMD_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc - [x] **BEEXTCMD_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc -- [x] **BEEXTCMD_REVERSE_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and grpc reversed +- [x] **BEEXTCMD_REVERSE_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and reversed gRPC - [x] **BEEXTCMD_REVERSE_GRPC2**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo2.0 and grpc reversed - [x] **BEEXTCMD_REVERSE_GRPC3**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo3.0 and grpc reversed - [x] **BEEXTCMD_REVERSE_GRPC4**: external command CHANGE_NORMAL_HOST_CHECK_INTERVAL on bbdo2.0 and grpc reversed +- [x] **BEEXTCMD_COMPRESS_GRPC1**: external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and compressed grpc +- [x] **BEATOI11**: external command SEND_CUSTOM_HOST_NOTIFICATION with option_number=1 should work +- [x] **BEATOI12**: external command SEND_CUSTOM_HOST_NOTIFICATION with option_number>7 should fail +- [x] **BEATOI13**: external command SCHEDULE SERVICE DOWNTIME with duration<0 should fail +- [x] **BEATOI21**: external command ADD_HOST_COMMENT and DEL_HOST_COMMENT should work +- [x] **BEATOI22**: external command DEL_HOST_COMMENT with comment_id<0 should fail +- [x] **BEATOI23**: external command ADD_SVC_COMMENT with persistent=0 should work + +### Ccc +- [x] **BECCC1**: ccc without port fails with an error message +- [x] **BECCC2**: ccc with -p 51001 connects to central cbd gRPC server. +- [x] **BECCC3**: ccc with -p 50001 connects to centengine gRPC server. +- [x] **BECCC4**: ccc with -p 51001 -l returns the available functions from Broker gRPC server +- [x] **BECCC5**: ccc with -p 51001 -l GetVersion returns an error because we can't execute a command with -l. +- [x] **BECCC6**: ccc with -p 51001 GetVersion{} calls the GetVersion command +- [x] **BECCC7**: ccc with -p 51001 GetVersion{"idx":1} returns an error because the input message is wrong. +- [x] **BECCC8**: ccc with -p 50001 EnableServiceNotifications{"names":{"host_name": "host_1", "service_name": "service_1"}} works and returns an empty message. ### Connector perl - [x] **test use connector perl exist script**: test exist script @@ -263,14 +294,14 @@ Here is the list of the currently implemented tests: - [x] **Test6Hosts**: as 127.0.0.x point to the localhost address we will simulate check on 6 hosts ### Engine -- [x] **EFHC1**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 -- [x] **EFHC2**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 -- [x] **EFHCU1**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. resources table is cleared before starting broker. -- [x] **EFHCU2**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. - [x] **ESS1**: Start-Stop (0s between start/stop) 5 times one instance of engine and no coredump - [x] **ESS2**: Start-Stop (300ms between start/stop) 5 times one instance of engine and no coredump - [x] **ESS3**: Start-Stop (0s between start/stop) 5 times three instances of engine and no coredump - [x] **ESS4**: Start-Stop (300ms between start/stop) 5 times three instances of engine and no coredump +- [x] **EFHC1**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 +- [x] **EFHC2**: Engine is configured with hosts and we force checks on one 5 times on bbdo2 +- [x] **EFHCU1**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. resources table is cleared before starting broker. +- [x] **EFHCU2**: Engine is configured with hosts and we force checks on one 5 times on bbdo3. Bbdo3 has no impact on this behavior. - [x] **EPC1**: Check with perl connector ### Migration diff --git a/tests/bam/inherited_downtime.robot b/tests/bam/inherited_downtime.robot index e507c6c22de..63e5faf6f33 100644 --- a/tests/bam/inherited_downtime.robot +++ b/tests/bam/inherited_downtime.robot @@ -2,10 +2,13 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite -Test Setup Stop Processes +Test Setup BAM Setup +Test Teardown Save logs If Failed Documentation Centreon Broker and BAM Library Process +Library DatabaseLibrary +Library DateTime Library OperatingSystem Library ../resources/Broker.py Library ../resources/Engine.py @@ -60,7 +63,7 @@ BEBAMIDT1 Should Be True ${result} msg=The BA ba_1 is in downtime as it should not Stop Engine - Stop Broker + Kindly Stop Broker BEBAMIDT2 [Documentation] A BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. Then we set a downtime on this last one. An inherited downtime is set to the BA. Engine is restarted. Broker is restarted. The two downtimes are still there with no duplicates. The downtime is removed from the service, the inherited downtime is then deleted. @@ -105,7 +108,7 @@ BEBAMIDT2 Start Engine Sleep 3s # Broker is restarted - Stop Broker + Kindly Stop Broker Start Broker END @@ -126,4 +129,177 @@ BEBAMIDT2 Should Be True ${result} msg=We should have no more downtime Stop Engine - Stop Broker + Kindly Stop Broker + +BEBAMIGNDT1 + [Documentation] A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. A first downtime is cancelled, the BA is still OK, but when the second downtime is cancelled, the BA should be CRITICAL. + [Tags] broker downtime engine bam + Clear Commands Status + Config Broker module + Config Broker central + Broker Config Log central bam trace + Config Broker rrd + Config Engine ${1} + + Clone Engine Config To DB + Add Bam Config To Engine + + @{svc}= Set Variable ${{ [("host_16", "service_313"), ("host_16", "service_314")] }} + Create BA With Services test worst ${svc} ignore + Add Bam Config To Broker central + # Command of service_314 is set to critical + ${cmd_1}= Get Command Id 313 + Log To Console service_314 has command id ${cmd_1} + Set Command Status ${cmd_1} 0 + ${cmd_2}= Get Command Id 314 + Log To Console service_314 has command id ${cmd_2} + Set Command Status ${cmd_2} 2 + Start Broker + Start Engine + Sleep 5s + + # KPI set to ok + Repeat Keyword 3 times Process Service Check Result host_16 service_313 0 output critical for 313 + ${result}= Check Service Status With Timeout host_16 service_313 0 60 + Should Be True ${result} msg=The service (host_16,service_313) is not OK as expected + + # KPI set to critical + Repeat Keyword 3 times Process Service Check Result host_16 service_314 2 output critical for 314 + ${result}= Check Service Status With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16,service_314) is not CRITICAL as expected + + # The BA should become critical + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The BA ba_1 is not CRITICAL as expected + Log To console The BA is critical. + + # Two downtimes are applied on service_314 + Schedule Service Downtime host_16 service_314 3600 + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console One downtime applied to service_314. + + Schedule Service Downtime host_16 service_314 1800 + ${result}= Check Service Downtime With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console Two downtimes applied to service_314. + + ${result}= Check Service Downtime With Timeout _Module_BAM_1 ba_1 0 60 + Should Be True ${result} msg=The BA ba_1 is in downtime but should not + Log to console The BA is configured to ignore kpis in downtime + + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The service in downtime should be ignored while computing the state of this BA. + Log to console The BA is OK, since the critical service is in downtime. + + # The first downtime is deleted + Delete Service Downtime host_16 service_314 + + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) does not contain 1 downtime as it should + Log to console Still one downtime applied to service_314. + + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The BA is not OK whereas the service_314 is still in downtime. + Log to console The BA is still OK + + # The second downtime is deleted + Delete Service Downtime host_16 service_314 + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The critical service is no more in downtime, the BA should be critical. + Log to console The BA is now critical (no more downtime) + + Stop Engine + Kindly Stop Broker + +BEBAMIGNDT2 + [Documentation] A BA of type 'worst' with two services is configured. The downtime policy on this ba is "Ignore the indicator in the calculation". The BA is in critical state, because of the second critical service. Then we apply two downtimes on this last one. The BA state is ok because of the policy on indicators. The first downtime reaches its end, the BA is still OK, but when the second downtime reaches its end, the BA should be CRITICAL. + [Tags] broker downtime engine bam + Clear Commands Status + Config Broker module + Config Broker central + Broker Config Log central core error + Broker Config Log central bam trace + Config Broker rrd + Config Engine ${1} + + Clone Engine Config To DB + Add Bam Config To Engine + + @{svc}= Set Variable ${{ [("host_16", "service_313"), ("host_16", "service_314")] }} + Create BA With Services test worst ${svc} ignore + Add Bam Config To Broker central + # Command of service_314 is set to critical + ${cmd_1}= Get Command Id 313 + Log To Console service_314 has command id ${cmd_1} + Set Command Status ${cmd_1} 0 + ${cmd_2}= Get Command Id 314 + Log To Console service_314 has command id ${cmd_2} + Set Command Status ${cmd_2} 2 + Start Broker + Start Engine + Sleep 5s + + # KPI set to ok + Repeat Keyword 3 times Process Service Check Result host_16 service_313 0 output critical for 313 + ${result}= Check Service Status With Timeout host_16 service_313 0 60 + Should Be True ${result} msg=The service (host_16,service_313) is not OK as expected + + # KPI set to critical + Repeat Keyword 3 times Process Service Check Result host_16 service_314 2 output critical for 314 + ${result}= Check Service Status With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16,service_314) is not CRITICAL as expected + + # The BA should become critical + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The BA ba_1 is not CRITICAL as expected + Log To console The BA is critical. + + # Two downtimes are applied on service_314 + Schedule Service Downtime host_16 service_314 60 + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console One downtime applied to service_314. + + Schedule Service Downtime host_16 service_314 30 + ${result}= Check Service Downtime With Timeout host_16 service_314 2 60 + Should Be True ${result} msg=The service (host_16, service_314) is not in downtime as it should be + Log to console Two downtimes applied to service_314. + + ${result}= Check Service Downtime With Timeout _Module_BAM_1 ba_1 0 60 + Should Be True ${result} msg=The BA ba_1 is in downtime but should not + Log to console The BA is configured to ignore kpis in downtime + + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The service in downtime should be ignored while computing the state of this BA. + Log to console The BA is OK, since the critical service is in downtime. + + # The first downtime should reach its end + + Log to console After 30s, the first downtime should be finished. + ${result}= Check Service Downtime With Timeout host_16 service_314 1 60 + Should Be True ${result} msg=The service (host_16, service_314) does not contain 1 downtime as it should + Log to console Still one downtime applied to service_314. + + Log to console After 30s, the second downtime should be finished. + ${result}= Check Ba Status With Timeout test 0 60 + Should Be True ${result} msg=The BA is not OK whereas the service_314 is still in downtime. + Log to console The BA is still OK + + # The second downtime finishes + ${result}= Check Ba Status With Timeout test 2 60 + Should Be True ${result} msg=The critical service is no more in downtime, the BA should be critical. + Log to console The BA is now critical (no more downtime) + + Stop Engine + Kindly Stop Broker + +*** Keywords *** +BAM Setup + Stop Processes + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + ${date}= Get Current Date result_format=epoch + log to console date=${date} + Query UPDATE downtimes SET deletion_time=${date}, actual_end_time=${date} WHERE actual_end_time is null + + diff --git a/tests/broker-engine/anomaly-detection.robot b/tests/broker-engine/anomaly-detection.robot new file mode 100644 index 00000000000..2c1ac5f7f26 --- /dev/null +++ b/tests/broker-engine/anomaly-detection.robot @@ -0,0 +1,90 @@ +*** Settings *** +Resource ../resources/resources.robot +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes +Test Teardown Save logs If Failed + +Documentation Centreon Broker and Engine anomaly detection + + +Library DateTime +Library Process +Library OperatingSystem +Library ../resources/Engine.py +Library ../resources/Broker.py +Library ../resources/Common.py + + +*** Test Cases *** +ANO_NOFILE + [Documentation] an anomaly detection without threshold file must be in unknown state + [Tags] Broker Engine Anomaly + Config Engine ${1} ${50} ${20} + Config Broker central + Config Broker module ${1} + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + ${serv_id}= Create Anomaly Detection ${0} ${1} ${1} metric + Remove File /tmp/anomaly_threshold.json + Clear Retention + Clear Db services + Start Broker + Start Engine + Process Service Check result host_1 anomaly_${serv_id} 2 taratata + Check Service Status With Timeout host_1 anomaly_${serv_id} 3 30 + +ANO_TOO_OLD_FILE + [Documentation] an anomaly detection with an oldest threshold file must be in unknown state + [Tags] Broker Engine Anomaly + Config Engine ${1} ${50} ${20} + Config Broker central + Config Broker module ${1} + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + ${serv_id}= Create Anomaly Detection ${0} ${1} ${1} metric + ${predict_data} = Evaluate [[0,0,2],[1648812678,0,3]] + Create Anomaly Threshold File /tmp/anomaly_threshold.json ${1} ${serv_id} metric ${predict_data} + Clear Retention + Clear Db services + Start Broker + Start Engine + Process Service Check result host_1 anomaly_${serv_id} 2 taratata|metric=70%;50;75 + Check Service Status With Timeout host_1 anomaly_${serv_id} 3 30 + + +ANO_OUT_LOWER_THAN_LIMIT + [Documentation] an anomaly detection with a perfdata lower than lower limit make a critical state + [Tags] Broker Engine Anomaly + Config Engine ${1} ${50} ${20} + Config Broker central + Config Broker module ${1} + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + ${serv_id}= Create Anomaly Detection ${0} ${1} ${1} metric + ${predict_data} = Evaluate [[0,50,52],[2648812678,50,63]] + Create Anomaly Threshold File /tmp/anomaly_threshold.json ${1} ${serv_id} metric ${predict_data} + Clear Retention + Clear Db services + Start Broker + Start Engine + Process Service Check result host_1 anomaly_${serv_id} 2 taratata|metric=20%;50;75 + Check Service Status With Timeout host_1 anomaly_${serv_id} 2 30 + +ANO_OUT_UPPER_THAN_LIMIT + [Documentation] an anomaly detection with a perfdata upper than upper limit make a critical state + [Tags] Broker Engine Anomaly + Config Engine ${1} ${50} ${20} + Config Broker central + Config Broker module ${1} + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + ${serv_id}= Create Anomaly Detection ${0} ${1} ${1} metric + ${predict_data} = Evaluate [[0,50,52],[2648812678,50,63]] + Create Anomaly Threshold File /tmp/anomaly_threshold.json ${1} ${serv_id} metric ${predict_data} + Clear Retention + Clear Db services + Start Broker + Start Engine + Process Service Check result host_1 anomaly_${serv_id} 2 taratata|metric=80%;50;75 + Check Service Status With Timeout host_1 anomaly_${serv_id} 2 30 diff --git a/tests/broker-engine/bbdo-protobuf.robot b/tests/broker-engine/bbdo-protobuf.robot index c3e8e6cf806..af38670e439 100644 --- a/tests/broker-engine/bbdo-protobuf.robot +++ b/tests/broker-engine/bbdo-protobuf.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Engine/Broker tests on bbdo_version 3.0.0 and protobuf bbdo embedded events. Library Process @@ -32,7 +33,7 @@ BEPBBEE1 ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=Message about not matching bbdo versions not available Stop Engine - Stop Broker + Kindly Stop Broker BEPBBEE2 [Documentation] bbdo_version 3 not compatible with sql/storage @@ -45,6 +46,7 @@ BEPBBEE2 Broker Config Add Item central bbdo_version 3.0.0 Broker Config Add Item rrd bbdo_version 3.0.0 Broker Config Log central sql debug + Broker Config Flush Log central 0 Clear Retention ${start}= Get Current Date Start Broker @@ -74,7 +76,7 @@ BEPBBEE3 Start Engine Wait Until Created /tmp/pbservicestatus.log 1m Stop Engine - Stop Broker + Kindly Stop Broker BEPBBEE4 [Documentation] bbdo_version 3 generates new bbdo protobuf host status messages. @@ -96,7 +98,7 @@ BEPBBEE4 Start Engine Wait Until Created /tmp/pbhoststatus.log 1m Stop Engine - Stop Broker + Kindly Stop Broker BEPBBEE5 [Documentation] bbdo_version 3 generates new bbdo protobuf service messages. @@ -118,4 +120,4 @@ BEPBBEE5 Start Engine Wait Until Created /tmp/pbservice.log 1m Stop Engine - Stop Broker + Kindly Stop Broker diff --git a/tests/broker-engine/compression.robot b/tests/broker-engine/compression.robot index 709cee658ec..a02757c4174 100644 --- a/tests/broker-engine/compression.robot +++ b/tests/broker-engine/compression.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine communication with or without compression Library Process @@ -33,7 +34,7 @@ BECC1 Start Engine ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected - Stop Broker + Kindly Stop Broker Stop Engine ${content1}= Create List we have extensions '${ext["${comp1}"]}' and peer has '${ext["${comp2}"]}' ${content2}= Create List we have extensions '${ext["${comp2}"]}' and peer has '${ext["${comp1}"]}' diff --git a/tests/broker-engine/downtimes.robot b/tests/broker-engine/downtimes.robot index df47017d827..2703665a155 100644 --- a/tests/broker-engine/downtimes.robot +++ b/tests/broker-engine/downtimes.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine progressively add services Library Process diff --git a/tests/broker-engine/external-commands.robot b/tests/broker-engine/external-commands.robot index 47f9400e010..05442d94391 100644 --- a/tests/broker-engine/external-commands.robot +++ b/tests/broker-engine/external-commands.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine progressively add services Library DatabaseLibrary @@ -33,7 +34,7 @@ BEEXTCMD1 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 10 @@ -41,8 +42,8 @@ BEEXTCMD1 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 300 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -65,7 +66,7 @@ BEEXTCMD2 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 15 @@ -73,8 +74,8 @@ BEEXTCMD2 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -104,7 +105,7 @@ BEEXTCMD3 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 10 @@ -112,8 +113,8 @@ BEEXTCMD3 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -138,7 +139,7 @@ BEEXTCMD4 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 15 @@ -146,8 +147,8 @@ BEEXTCMD4 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -177,7 +178,7 @@ BEEXTCMD5 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Retry Svc Check Interval ${use_grpc} host_1 service_1 15 @@ -185,8 +186,8 @@ BEEXTCMD5 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.retry_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.retry_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.retry_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.retry_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -210,7 +211,7 @@ BEEXTCMD6 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Retry Svc Check Interval ${use_grpc} host_1 service_1 10 @@ -218,8 +219,8 @@ BEEXTCMD6 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.retry_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.retry_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.retry_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.retry_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -249,7 +250,7 @@ BEEXTCMD7 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Retry Host Check Interval ${use_grpc} host_1 15 @@ -257,8 +258,8 @@ BEEXTCMD7 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select retry_interval from hosts where name='host_1' - ${output}= Query select retry_interval from hosts where name='host_1' + Log To Console SELECT retry_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT retry_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -283,7 +284,7 @@ BEEXTCMD8 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Retry Host Check Interval ${use_grpc} host_1 10 @@ -291,8 +292,8 @@ BEEXTCMD8 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select retry_interval from hosts where name='host_1' - ${output}= Query select retry_interval from hosts where name='host_1' + Log To Console SELECT retry_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT retry_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -320,7 +321,7 @@ BEEXTCMD9 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Max Svc Check Attempts ${use_grpc} host_1 service_1 15 @@ -328,8 +329,8 @@ BEEXTCMD9 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.max_check_attempts from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.max_check_attempts from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.max_check_attempts FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.max_check_attempts FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15,),)" @@ -337,8 +338,8 @@ BEEXTCMD9 Should Be Equal As Strings ${output} ((15,),) FOR ${index} IN RANGE 30 - Log To Console select max_check_attempts from resources where name='service_1' and parent_name='host_1' - ${output}= Query select max_check_attempts from resources where name='service_1' and parent_name='host_1' + Log To Console SELECT max_check_attempts FROM resources WHERE name='service_1' AND parent_name='host_1' + ${output}= Query SELECT max_check_attempts FROM resources WHERE name='service_1' AND parent_name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15,),)" @@ -362,7 +363,7 @@ BEEXTCMD10 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Max Svc Check Attempts ${use_grpc} host_1 service_1 10 @@ -370,8 +371,8 @@ BEEXTCMD10 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.max_check_attempts from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.max_check_attempts from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.max_check_attempts FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.max_check_attempts FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10,),)" @@ -401,7 +402,7 @@ BEEXTCMD11 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Max Host Check Attempts ${use_grpc} host_1 15 @@ -409,8 +410,8 @@ BEEXTCMD11 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select max_check_attempts from hosts where name='host_1' - ${output}= Query select max_check_attempts from hosts where name='host_1' + Log To Console SELECT max_check_attempts FROM hosts WHERE name='host_1' + ${output}= Query SELECT max_check_attempts FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15,),)" @@ -418,8 +419,8 @@ BEEXTCMD11 Should Be Equal As Strings ${output} ((15,),) FOR ${index} IN RANGE 30 - Log To Console select max_check_attempts from resources where name='host_1' - ${output}= Query select max_check_attempts from resources where name='host_1' + Log To Console SELECT max_check_attempts FROM resources WHERE name='host_1' + ${output}= Query SELECT max_check_attempts FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15,),)" @@ -444,7 +445,7 @@ BEEXTCMD12 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Max Host Check Attempts ${use_grpc} host_1 10 @@ -452,8 +453,8 @@ BEEXTCMD12 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select max_check_attempts from hosts where name='host_1' - ${output}= Query select max_check_attempts from hosts where name='host_1' + Log To Console SELECT max_check_attempts FROM hosts WHERE name='host_1' + ${output}= Query SELECT max_check_attempts FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10,),)" @@ -484,7 +485,7 @@ BEEXTCMD13 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Host Check Timeperiod ${use_grpc} host_1 24x6 @@ -492,8 +493,8 @@ BEEXTCMD13 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_period from hosts where name='host_1' - ${output}= Query select check_period from hosts where name='host_1' + Log To Console SELECT check_period FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_period FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x6',),)" @@ -519,7 +520,7 @@ BEEXTCMD14 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Host Check Timeperiod ${use_grpc} host_1 24x6 @@ -527,8 +528,8 @@ BEEXTCMD14 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_period from hosts where name='host_1' - ${output}= Query select check_period from hosts where name='host_1' + Log To Console SELECT check_period FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_period FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x6',),)" @@ -559,7 +560,7 @@ BEEXTCMD15 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Host Notification Timeperiod ${use_grpc} host_1 24x7 @@ -567,8 +568,8 @@ BEEXTCMD15 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notification_period from hosts where name='host_1' - ${output}= Query select notification_period from hosts where name='host_1' + Log To Console SELECT notification_period FROM hosts WHERE name='host_1' + ${output}= Query SELECT notification_period FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x7',),)" @@ -594,7 +595,7 @@ BEEXTCMD16 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Host Notification Timeperiod ${use_grpc} host_1 24x6 @@ -602,8 +603,8 @@ BEEXTCMD16 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notification_period from hosts where name='host_1' - ${output}= Query select notification_period from hosts where name='host_1' + Log To Console SELECT notification_period FROM hosts WHERE name='host_1' + ${output}= Query SELECT notification_period FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x6',),)" @@ -634,7 +635,7 @@ BEEXTCMD17 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Svc Check Timeperiod ${use_grpc} host_1 service_1 24x6 @@ -642,8 +643,8 @@ BEEXTCMD17 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.check_period from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_period from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.check_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x6',),)" @@ -669,7 +670,7 @@ BEEXTCMD18 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Svc Check Timeperiod ${use_grpc} host_1 service_1 24x7 @@ -677,8 +678,8 @@ BEEXTCMD18 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.check_period from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_period from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.check_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x7',),)" @@ -709,7 +710,7 @@ BEEXTCMD19 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Svc Notification Timeperiod ${use_grpc} host_1 service_1 24x7 @@ -717,8 +718,8 @@ BEEXTCMD19 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.notification_period from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notification_period from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notification_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notification_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x7',),)" @@ -744,7 +745,7 @@ BEEXTCMD20 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Svc Notification Timeperiod ${use_grpc} host_1 service_1 24x6 @@ -752,8 +753,8 @@ BEEXTCMD20 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.notification_period from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notification_period from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notification_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notification_period FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "(('24x6',),)" @@ -784,7 +785,7 @@ BEEXTCMD21 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host And child Notifications ${use_grpc} host_1 @@ -792,8 +793,8 @@ BEEXTCMD21 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -801,8 +802,8 @@ BEEXTCMD21 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select notifications_enabled from resources where name='host_1' - ${output}= Query select notifications_enabled from resources where name='host_1' + Log To Console SELECT notifications_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT notifications_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -812,8 +813,8 @@ BEEXTCMD21 Enable Host And child Notifications ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -821,8 +822,8 @@ BEEXTCMD21 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select notifications_enabled from resources where name='host_1' - ${output}= Query select notifications_enabled from resources where name='host_1' + Log To Console SELECT notifications_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT notifications_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -848,7 +849,7 @@ BEEXTCMD22 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host And child Notifications ${use_grpc} host_1 @@ -856,8 +857,8 @@ BEEXTCMD22 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -867,8 +868,8 @@ BEEXTCMD22 Enable Host And child Notifications ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -898,7 +899,7 @@ BEEXTCMD23 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Check ${use_grpc} host_1 @@ -906,8 +907,8 @@ BEEXTCMD23 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select active_checks from hosts where name='host_1' - ${output}= Query select active_checks from hosts where name='host_1' + Log To Console SELECT active_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT active_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -915,8 +916,8 @@ BEEXTCMD23 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select active_checks_enabled from resources where name='host_1' - ${output}= Query select active_checks_enabled from resources where name='host_1' + Log To Console SELECT active_checks_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT active_checks_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -924,8 +925,8 @@ BEEXTCMD23 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select should_be_scheduled from hosts where name='host_1' - ${output}= Query select should_be_scheduled from hosts where name='host_1' + Log To Console SELECT should_be_scheduled FROM hosts WHERE name='host_1' + ${output}= Query SELECT should_be_scheduled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -935,8 +936,8 @@ BEEXTCMD23 Enable Host Check ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select active_checks from hosts where name='host_1' - ${output}= Query select active_checks from hosts where name='host_1' + Log To Console SELECT active_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT active_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -944,8 +945,8 @@ BEEXTCMD23 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select active_checks_enabled from resources where name='host_1' - ${output}= Query select active_checks_enabled from resources where name='host_1' + Log To Console SELECT active_checks_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT active_checks_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -953,8 +954,8 @@ BEEXTCMD23 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select should_be_scheduled from hosts where name='host_1' - ${output}= Query select should_be_scheduled from hosts where name='host_1' + Log To Console SELECT should_be_scheduled FROM hosts WHERE name='host_1' + ${output}= Query SELECT should_be_scheduled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -978,7 +979,7 @@ BEEXTCMD24 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Check ${use_grpc} host_1 @@ -986,8 +987,8 @@ BEEXTCMD24 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select active_checks from hosts where name='host_1' - ${output}= Query select active_checks from hosts where name='host_1' + Log To Console SELECT active_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT active_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -995,8 +996,8 @@ BEEXTCMD24 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select should_be_scheduled from hosts where name='host_1' - ${output}= Query select should_be_scheduled from hosts where name='host_1' + Log To Console SELECT should_be_scheduled FROM hosts WHERE name='host_1' + ${output}= Query SELECT should_be_scheduled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1006,8 +1007,8 @@ BEEXTCMD24 Enable Host Check ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select active_checks from hosts where name='host_1' - ${output}= Query select active_checks from hosts where name='host_1' + Log To Console SELECT active_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT active_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1015,8 +1016,8 @@ BEEXTCMD24 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select should_be_scheduled from hosts where name='host_1' - ${output}= Query select should_be_scheduled from hosts where name='host_1' + Log To Console SELECT should_be_scheduled FROM hosts WHERE name='host_1' + ${output}= Query SELECT should_be_scheduled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1046,7 +1047,7 @@ BEEXTCMD25 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Event Handler ${use_grpc} host_1 @@ -1054,8 +1055,8 @@ BEEXTCMD25 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select event_handler_enabled from hosts where name='host_1' - ${output}= Query select event_handler_enabled from hosts where name='host_1' + Log To Console SELECT event_handler_enabled FROM hosts WHERE name='host_1' + ${output}= Query SELECT event_handler_enabled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1063,10 +1064,10 @@ BEEXTCMD25 Should Be Equal As Strings ${output} ((0,),) Enable Host Event Handler ${use_grpc} host_1 - + FOR ${index} IN RANGE 30 - Log To Console select event_handler_enabled from hosts where name='host_1' - ${output}= Query select event_handler_enabled from hosts where name='host_1' + Log To Console SELECT event_handler_enabled FROM hosts WHERE name='host_1' + ${output}= Query SELECT event_handler_enabled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1091,7 +1092,7 @@ BEEXTCMD26 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Event Handler ${use_grpc} host_1 @@ -1099,19 +1100,19 @@ BEEXTCMD26 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select event_handler_enabled from hosts where name='host_1' - ${output}= Query select event_handler_enabled from hosts where name='host_1' + Log To Console SELECT event_handler_enabled FROM hosts WHERE name='host_1' + ${output}= Query SELECT event_handler_enabled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" END Should Be Equal As Strings ${output} ((0,),) - + Enable Host Event Handler ${use_grpc} host_1 - + FOR ${index} IN RANGE 30 - Log To Console select event_handler_enabled from hosts where name='host_1' - ${output}= Query select event_handler_enabled from hosts where name='host_1' + Log To Console SELECT event_handler_enabled FROM hosts WHERE name='host_1' + ${output}= Query SELECT event_handler_enabled FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1141,7 +1142,7 @@ BEEXTCMD27 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Flap Detection ${use_grpc} host_1 @@ -1149,8 +1150,8 @@ BEEXTCMD27 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select flap_detection from hosts where name='host_1' - ${output}= Query select flap_detection from hosts where name='host_1' + Log To Console SELECT flap_detection FROM hosts WHERE name='host_1' + ${output}= Query SELECT flap_detection FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1160,8 +1161,8 @@ BEEXTCMD27 Enable Host Flap Detection ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select flap_detection from hosts where name='host_1' - ${output}= Query select flap_detection from hosts where name='host_1' + Log To Console SELECT flap_detection FROM hosts WHERE name='host_1' + ${output}= Query SELECT flap_detection FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1186,7 +1187,7 @@ BEEXTCMD28 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Flap detection ${use_grpc} host_1 @@ -1194,8 +1195,8 @@ BEEXTCMD28 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select flap_detection from hosts where name='host_1' - ${output}= Query select flap_detection from hosts where name='host_1' + Log To Console SELECT flap_detection FROM hosts WHERE name='host_1' + ${output}= Query SELECT flap_detection FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1205,8 +1206,8 @@ BEEXTCMD28 Enable Host Flap Detection ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select flap_detection from hosts where name='host_1' - ${output}= Query select flap_detection from hosts where name='host_1' + Log To Console SELECT flap_detection FROM hosts WHERE name='host_1' + ${output}= Query SELECT flap_detection FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1237,7 +1238,7 @@ BEEXTCMD29 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Notifications ${use_grpc} host_1 @@ -1245,8 +1246,8 @@ BEEXTCMD29 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1254,8 +1255,8 @@ BEEXTCMD29 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select notifications_enabled from resources where name='host_1' - ${output}= Query select notifications_enabled from resources where name='host_1' + Log To Console SELECT notifications_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT notifications_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1265,8 +1266,8 @@ BEEXTCMD29 Enable Host Notifications ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1274,8 +1275,8 @@ BEEXTCMD29 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select notifications_enabled from resources where name='host_1' - ${output}= Query select notifications_enabled from resources where name='host_1' + Log To Console SELECT notifications_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT notifications_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1301,7 +1302,7 @@ BEEXTCMD30 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Notifications ${use_grpc} host_1 @@ -1309,8 +1310,8 @@ BEEXTCMD30 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1320,8 +1321,8 @@ BEEXTCMD30 Enable Host Notifications ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select notify from hosts where name='host_1' - ${output}= Query select notify from hosts where name='host_1' + Log To Console SELECT notify FROM hosts WHERE name='host_1' + ${output}= Query SELECT notify FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1351,7 +1352,7 @@ BEEXTCMD31 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Svc Checks ${use_grpc} host_1 @@ -1359,8 +1360,8 @@ BEEXTCMD31 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1368,8 +1369,8 @@ BEEXTCMD31 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select active_checks_enabled from resources where name='service_1' - ${output}= Query select active_checks_enabled from resources where name='service_1' + Log To Console SELECT active_checks_enabled FROM resources WHERE name='service_1' + ${output}= Query SELECT active_checks_enabled FROM resources WHERE name='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1377,8 +1378,8 @@ BEEXTCMD31 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1388,8 +1389,8 @@ BEEXTCMD31 Enable Host Svc Checks ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1397,8 +1398,8 @@ BEEXTCMD31 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select active_checks_enabled from resources where name='service_1' - ${output}= Query select active_checks_enabled from resources where name='service_1' + Log To Console SELECT active_checks_enabled FROM resources WHERE name='service_1' + ${output}= Query SELECT active_checks_enabled FROM resources WHERE name='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1406,8 +1407,8 @@ BEEXTCMD31 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1431,7 +1432,7 @@ BEEXTCMD32 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Svc Checks ${use_grpc} host_1 @@ -1439,8 +1440,8 @@ BEEXTCMD32 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1448,8 +1449,8 @@ BEEXTCMD32 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1459,8 +1460,8 @@ BEEXTCMD32 Enable Host Svc Checks ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.active_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.active_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1468,8 +1469,8 @@ BEEXTCMD32 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.should_be_scheduled from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.should_be_scheduled FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1499,7 +1500,7 @@ BEEXTCMD33 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Svc Notifications ${use_grpc} host_1 @@ -1507,8 +1508,8 @@ BEEXTCMD33 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1516,10 +1517,10 @@ BEEXTCMD33 Should Be Equal As Strings ${output} ((0,),) Enable Host Svc Notifications ${use_grpc} host_1 - + FOR ${index} IN RANGE 30 - Log To Console select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1544,7 +1545,7 @@ BEEXTCMD34 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Host Svc Notifications ${use_grpc} host_1 @@ -1552,19 +1553,19 @@ BEEXTCMD34 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" END Should Be Equal As Strings ${output} ((0,),) - + Enable Host Svc Notifications ${use_grpc} host_1 - + FOR ${index} IN RANGE 30 - Log To Console select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.notify from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.notify FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1594,7 +1595,7 @@ BEEXTCMD35 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Passive Host Checks ${use_grpc} host_1 @@ -1602,8 +1603,8 @@ BEEXTCMD35 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select passive_checks from hosts where name='host_1' - ${output}= Query select passive_checks from hosts where name='host_1' + Log To Console SELECT passive_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT passive_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1611,8 +1612,8 @@ BEEXTCMD35 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select passive_checks_enabled from resources where name='host_1' - ${output}= Query select passive_checks_enabled from resources where name='host_1' + Log To Console SELECT passive_checks_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT passive_checks_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1622,8 +1623,8 @@ BEEXTCMD35 Enable Passive Host Checks ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select passive_checks from hosts where name='host_1' - ${output}= Query select passive_checks from hosts where name='host_1' + Log To Console SELECT passive_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT passive_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1631,8 +1632,8 @@ BEEXTCMD35 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select passive_checks_enabled from resources where name='host_1' - ${output}= Query select passive_checks_enabled from resources where name='host_1' + Log To Console SELECT passive_checks_enabled FROM resources WHERE name='host_1' + ${output}= Query SELECT passive_checks_enabled FROM resources WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1657,7 +1658,7 @@ BEEXTCMD36 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Passive Host Checks ${use_grpc} host_1 @@ -1665,8 +1666,8 @@ BEEXTCMD36 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select passive_checks from hosts where name='host_1' - ${output}= Query select passive_checks from hosts where name='host_1' + Log To Console SELECT passive_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT passive_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1676,8 +1677,8 @@ BEEXTCMD36 Enable Passive Host Checks ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select passive_checks from hosts where name='host_1' - ${output}= Query select passive_checks from hosts where name='host_1' + Log To Console SELECT passive_checks FROM hosts WHERE name='host_1' + ${output}= Query SELECT passive_checks FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1707,7 +1708,7 @@ BEEXTCMD37 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Passive Svc Checks ${use_grpc} host_1 service_1 @@ -1715,8 +1716,8 @@ BEEXTCMD37 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1724,8 +1725,8 @@ BEEXTCMD37 Should Be Equal As Strings ${output} ((0,),) FOR ${index} IN RANGE 30 - Log To Console select passive_checks_enabled from resources where name='service_1' - ${output}= Query select passive_checks_enabled from resources where name='service_1' + Log To Console SELECT passive_checks_enabled FROM resources WHERE name='service_1' + ${output}= Query SELECT passive_checks_enabled FROM resources WHERE name='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1735,8 +1736,8 @@ BEEXTCMD37 Enable Passive Svc Checks ${use_grpc} host_1 service_1 FOR ${index} IN RANGE 30 - Log To Console select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1744,8 +1745,8 @@ BEEXTCMD37 Should Be Equal As Strings ${output} ((1,),) FOR ${index} IN RANGE 30 - Log To Console select passive_checks_enabled from resources where name='service_1' - ${output}= Query select passive_checks_enabled from resources where name='service_1' + Log To Console SELECT passive_checks_enabled FROM resources WHERE name='service_1' + ${output}= Query SELECT passive_checks_enabled FROM resources WHERE name='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1770,7 +1771,7 @@ BEEXTCMD38 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Disable Passive Svc Checks ${use_grpc} host_1 service_1 @@ -1778,8 +1779,8 @@ BEEXTCMD38 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1789,8 +1790,8 @@ BEEXTCMD38 Enable Passive Svc Checks ${use_grpc} host_1 service_1 FOR ${index} IN RANGE 30 - Log To Console select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.passive_checks from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.passive_checks FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1820,7 +1821,7 @@ BEEXTCMD39 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Stop Obsessing Over Host ${use_grpc} host_1 @@ -1828,8 +1829,8 @@ BEEXTCMD39 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select obsess_over_host from hosts where name='host_1' - ${output}= Query select obsess_over_host from hosts where name='host_1' + Log To Console SELECT obsess_over_host FROM hosts WHERE name='host_1' + ${output}= Query SELECT obsess_over_host FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1839,8 +1840,8 @@ BEEXTCMD39 Start Obsessing Over Host ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select obsess_over_host from hosts where name='host_1' - ${output}= Query select obsess_over_host from hosts where name='host_1' + Log To Console SELECT obsess_over_host FROM hosts WHERE name='host_1' + ${output}= Query SELECT obsess_over_host FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1865,7 +1866,7 @@ BEEXTCMD40 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Stop Obsessing Over Host ${use_grpc} host_1 @@ -1873,8 +1874,8 @@ BEEXTCMD40 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select obsess_over_host from hosts where name='host_1' - ${output}= Query select obsess_over_host from hosts where name='host_1' + Log To Console SELECT obsess_over_host FROM hosts WHERE name='host_1' + ${output}= Query SELECT obsess_over_host FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1884,8 +1885,8 @@ BEEXTCMD40 Start Obsessing Over Host ${use_grpc} host_1 FOR ${index} IN RANGE 30 - Log To Console select obsess_over_host from hosts where name='host_1' - ${output}= Query select obsess_over_host from hosts where name='host_1' + Log To Console SELECT obsess_over_host FROM hosts WHERE name='host_1' + ${output}= Query SELECT obsess_over_host FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1915,7 +1916,7 @@ BEEXTCMD41 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Stop Obsessing Over Svc ${use_grpc} host_1 service_1 @@ -1923,8 +1924,8 @@ BEEXTCMD41 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1934,8 +1935,8 @@ BEEXTCMD41 Start Obsessing Over Svc ${use_grpc} host_1 service_1 FOR ${index} IN RANGE 30 - Log To Console select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -1960,7 +1961,7 @@ BEEXTCMD42 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Stop Obsessing Over Svc ${use_grpc} host_1 service_1 @@ -1968,8 +1969,8 @@ BEEXTCMD42 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((0,),)" @@ -1979,8 +1980,8 @@ BEEXTCMD42 Start Obsessing Over Svc ${use_grpc} host_1 service_1 FOR ${index} IN RANGE 30 - Log To Console select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.obsess_over_service from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.obsess_over_service FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((1,),)" @@ -2011,7 +2012,7 @@ BEEXTCMD_GRPC1 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 10 @@ -2019,8 +2020,8 @@ BEEXTCMD_GRPC1 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 300 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -2046,7 +2047,7 @@ BEEXTCMD_GRPC2 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 15 @@ -2054,8 +2055,8 @@ BEEXTCMD_GRPC2 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -2087,7 +2088,7 @@ BEEXTCMD_GRPC3 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 10 @@ -2095,8 +2096,8 @@ BEEXTCMD_GRPC3 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -2123,7 +2124,7 @@ BEEXTCMD_GRPC4 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 15 @@ -2131,8 +2132,8 @@ BEEXTCMD_GRPC4 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -2143,8 +2144,8 @@ BEEXTCMD_GRPC4 END BEEXTCMD_REVERSE_GRPC1 - [Documentation] external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and grpc reversed - [Tags] Broker Engine services extcmd + [Documentation] external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and reversed gRPC + [Tags] Broker Engine services extcmd bbdo3 Config Engine ${1} ${50} ${20} Config Broker rrd Config Broker central @@ -2164,9 +2165,10 @@ BEEXTCMD_REVERSE_GRPC1 Log To Console external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 use_grpc=${use_grpc} Clear Retention ${start}= Get Current Date + Sleep 1s Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 10 @@ -2174,8 +2176,8 @@ BEEXTCMD_REVERSE_GRPC1 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 300 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -2205,7 +2207,7 @@ BEEXTCMD_REVERSE_GRPC2 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Svc Check Interval ${use_grpc} host_1 service_1 15 @@ -2213,8 +2215,8 @@ BEEXTCMD_REVERSE_GRPC2 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' - ${output}= Query select s.check_interval from services s,hosts h where s.description='service_1' and h.name='host_1' + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE h.name='host_1' AND s.description='service_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -2250,7 +2252,7 @@ BEEXTCMD_REVERSE_GRPC3 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 10 @@ -2258,8 +2260,8 @@ BEEXTCMD_REVERSE_GRPC3 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((10.0,),)" @@ -2290,7 +2292,7 @@ BEEXTCMD_REVERSE_GRPC4 ${start}= Get Current Date Start Broker Start Engine - ${content}= Create List INITIAL SERVICE STATE: host_1;service_1; + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Change Normal Host Check Interval ${use_grpc} host_1 15 @@ -2298,8 +2300,8 @@ BEEXTCMD_REVERSE_GRPC4 Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} FOR ${index} IN RANGE 30 - Log To Console select check_interval from hosts where name='host_1' - ${output}= Query select check_interval from hosts where name='host_1' + Log To Console SELECT check_interval FROM hosts WHERE name='host_1' + ${output}= Query SELECT check_interval FROM hosts WHERE name='host_1' Log To Console ${output} Sleep 1s EXIT FOR LOOP IF "${output}" == "((15.0,),)" @@ -2308,3 +2310,182 @@ BEEXTCMD_REVERSE_GRPC4 Stop Engine Kindly Stop Broker END + + +BEEXTCMD_COMPRESS_GRPC1 + [Documentation] external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 and compressed grpc + [Tags] Broker Engine services extcmd + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker Compression Output module0 central-module-master-output yes + Change Broker Compression Input central centreon-broker-master-input yes + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + FOR ${use_grpc} IN RANGE 0 2 + Log To Console external command CHANGE_NORMAL_SVC_CHECK_INTERVAL on bbdo3.0 use_grpc=${use_grpc} + Clear Retention + ${start}= Get Current Date + Start Broker + Start Engine + ${content}= Create List INITIAL SERVICE STATE: host_50;service_1000; + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. + Change Normal Svc Check Interval ${use_grpc} host_1 service_1 10 + + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + + FOR ${index} IN RANGE 300 + Log To Console SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + ${output}= Query SELECT s.check_interval FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description='service_1' AND h.name='host_1' + Log To Console ${output} + Sleep 1s + EXIT FOR LOOP IF "${output}" == "((10.0,),)" + END + Should Be Equal As Strings ${output} ((10.0,),) + Stop Engine + Kindly Stop Broker + END + + +BEATOI11 + [Documentation] external command SEND_CUSTOM_HOST_NOTIFICATION with option_number=1 should work + [Tags] Broker Engine host extcmd Notification atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Start Broker + Start Engine + SEND CUSTOM HOST NOTIFICATION host_1 1 admin foobar + ${content}= Create List EXTERNAL COMMAND: SEND_CUSTOM_HOST_NOTIFICATION;host_1;1;admin;foobar + ${result}= Find In Log With Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=command argument notification_option must be an integer between 0 and 7. + Stop Engine + Kindly Stop Broker + +BEATOI12 + [Documentation] external command SEND_CUSTOM_HOST_NOTIFICATION with option_number>7 should fail + [Tags] Broker Engine host extcmd Notification atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Start Broker + Start Engine + SEND CUSTOM HOST NOTIFICATION host_1 8 admin foobar + ${content}= Create List Error: could not send custom host notification: '8' must be an integer between 0 and 7 + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=command argument notification_option must be an integer between 0 and 7. + Stop Engine + Kindly Stop Broker + + +BEATOI13 + [Documentation] external command SCHEDULE SERVICE DOWNTIME with duration<0 should fail + [Tags] Broker Engine host extcmd atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Start Broker + Start Engine + SCHEDULE SERVICE DOWNTIME host_1 service_1 -1 + ${content}= Create List Error: could not schedule downtime : duration + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=command argument duration must be an integer >= 0. + Stop Engine + Kindly Stop Broker + + +BEATOI21 + [Documentation] external command ADD_HOST_COMMENT and DEL_HOST_COMMENT should work + [Tags] Broker Engine host extcmd atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + ADD HOST COMMENT host_1 1 user comment + ${content}= Create List ADD_HOST_COMMENT + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=the comment with id:1 was not added. + ${com_id}= Find Internal Id ${start} True 30 + DEL HOST COMMENT ${com_id} + ${result}= Find Internal Id ${start} False 30 + Should Be True ${result} msg=the comment with id:${com_id} was not deleted. + Stop Engine + Kindly Stop Broker + + +BEATOI22 + [Documentation] external command DEL_HOST_COMMENT with comment_id<0 should fail + [Tags] Broker Engine host extcmd atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + ADD HOST COMMENT host_1 1 user comment + ${content}= Create List ADD_HOST_COMMENT + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=the comment with id:1 was not added. + ${com_id}= Find Internal Id ${start} True 30 + DEL HOST COMMENT -1 + ${content}= Create List Error: could not delete comment : comment_id + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=comment_id must be an unsigned integer. + ${result}= Find Internal Id ${start} True 30 + Should Be True ${result} msg=comment with id:-1 was deleted. + Stop Engine + Kindly Stop Broker + + +BEATOI23 + [Documentation] external command ADD_SVC_COMMENT with persistent=0 should work + [Tags] Broker Engine host extcmd atoi + Config Engine ${1} ${50} ${20} + Config Broker rrd + Config Broker central + Config Broker module ${1} + Broker Config Log central core error + Broker Config Log central sql debug + ${start}= Get Current Date + Start Broker + Start Engine + ADD SVC COMMENT host_1 service_1 0 user comment + ${content}= Create List ADD_SVC_COMMENT + ${result}= Find In Log with Timeout ${logEngine0} ${start} ${content} 60 + Should Be True ${result} msg=command argument persistent_flag must be 0 or 1. + Stop Engine + Kindly Stop Broker + + + + + diff --git a/tests/broker-engine/hostgroups.robot b/tests/broker-engine/hostgroups.robot index 3b44a9a32e7..3650542d342 100644 --- a/tests/broker-engine/hostgroups.robot +++ b/tests/broker-engine/hostgroups.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine add Hostgroup Library DatabaseLibrary @@ -40,7 +41,7 @@ EBNHG1 ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 45 Should Be True ${result} msg=One of the new host groups not found in logs. Stop Engine - Stop Broker + Kindly Stop Broker EBNHGU1 [Documentation] New host group with several pollers and connections to DB with broker configured with unified_sql diff --git a/tests/broker-engine/hosts-with-notes-and-actions.robot b/tests/broker-engine/hosts-with-notes-and-actions.robot index 6c087d47529..9d112471aa8 100644 --- a/tests/broker-engine/hosts-with-notes-and-actions.robot +++ b/tests/broker-engine/hosts-with-notes-and-actions.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine Creation of hosts with long action_url, notes and notes_url. Library DatabaseLibrary diff --git a/tests/broker-engine/log-v2_engine.robot b/tests/broker-engine/log-v2_engine.robot index 94ff430fcf2..7248eea3ba9 100644 --- a/tests/broker-engine/log-v2_engine.robot +++ b/tests/broker-engine/log-v2_engine.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine log_v2 Library DatabaseLibrary @@ -55,7 +56,7 @@ LOGV2EB1 END Should Be Equal As Strings ${output} ((1,),) Stop Engine - Stop Broker + Kindly Stop Broker LOGV2DB1 [Documentation] log-v2 disabled old log enabled check broker sink @@ -99,7 +100,7 @@ LOGV2DB1 END Should Be Equal As Strings ${output} ((1,),) Stop Engine - Stop Broker + Kindly Stop Broker LOGV2DB2 [Documentation] log-v2 disabled old log disabled check broker sink @@ -142,7 +143,7 @@ LOGV2DB2 END Should Be Equal As Strings ${output} ((0,),) Stop Engine - Stop Broker + Kindly Stop Broker LOGV2EB2 [Documentation] log-v2 enabled old log enabled check broker sink @@ -187,7 +188,7 @@ LOGV2EB2 Should Be Equal As Strings ${output} ((2,),) Stop Engine - Stop Broker + Kindly Stop Broker LOGV2EF1 [Documentation] log-v2 enabled old log disabled check logfile sink @@ -212,7 +213,7 @@ LOGV2EF1 ${result1}= Find In Log With Timeout ${logEngine0} ${start} ${content_v2} 30 Should Be True ${result1} Stop Engine - Stop Broker + Kindly Stop Broker LOGV2DF1 [Documentation] log-v2 disabled old log enabled check logfile sink @@ -240,7 +241,7 @@ LOGV2DF1 Should Be True ${result1} Should Not Be True ${result2} Stop Engine - Stop Broker + Kindly Stop Broker LOGV2DF2 [Documentation] log-v2 disabled old log disabled check logfile sink @@ -268,7 +269,7 @@ LOGV2DF2 Should Not Be True ${result1} Should Not Be True ${result2} Stop Engine - Stop Broker + Kindly Stop Broker LOGV2EF2 [Documentation] log-v2 enabled old log enabled check logfile sink @@ -296,7 +297,7 @@ LOGV2EF2 Should Be True ${result1} Should Be True ${result2} Stop Engine - Stop Broker + Kindly Stop Broker LOGV2BE2 [Documentation] log-v2 enabled old log enabled check broker sink is equal @@ -329,7 +330,7 @@ LOGV2BE2 Should Be True ${res} msg=one or other log are not duplicate in tables logs Stop Engine - Stop Broker + Kindly Stop Broker LOGV2FE2 [Documentation] log-v2 enabled old log enabled check logfile sink @@ -359,4 +360,4 @@ LOGV2FE2 ${res}= check engine logs are duplicated ${logEngine0} ${start} Should Be True ${res} msg=one or other log are not duplicate in logsfile Stop Engine - Stop Broker + Kindly Stop Broker diff --git a/tests/broker-engine/output-tables.robot b/tests/broker-engine/output-tables.robot index 1a0621eba57..e47c7d8af95 100644 --- a/tests/broker-engine/output-tables.robot +++ b/tests/broker-engine/output-tables.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Engine/Broker tests on bbdo_version 3.0.0 and protobuf bbdo embedded events. Library Process diff --git a/tests/broker-engine/retention-duplicates.robot b/tests/broker-engine/retention-duplicates.robot index a028180cb74..db7aacddd03 100644 --- a/tests/broker-engine/retention-duplicates.robot +++ b/tests/broker-engine/retention-duplicates.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker tests on dublicated data that could come from retention when centengine or cbd are restarted Library Process @@ -40,13 +41,13 @@ BERD1 ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected. Sleep 5s - Stop Broker + Kindly Stop Broker Sleep 5s Clear Cache Start Broker Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Files Contain Same Json /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=Contents of /tmp/lua.log and /tmp/lua-engine.log do not match. ${result}= Check Multiplicity When Broker Restarted /tmp/lua-engine.log /tmp/lua.log @@ -84,7 +85,7 @@ BERD2 Start Engine Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Files Contain Same Json /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=Contents of /tmp/lua.log and /tmp/lua-engine.log do not match. ${result}= Check Multiplicity When Engine Restarted /tmp/lua-engine.log /tmp/lua.log @@ -119,13 +120,13 @@ BERDUC1 ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected. Sleep 5s - Stop Broker + Kindly Stop Broker Sleep 5s Clear Cache Start Broker Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Broker Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log @@ -155,13 +156,13 @@ BERDUCU1 ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected. Sleep 5s - Stop Broker + Kindly Stop Broker Sleep 5s Clear Cache Start Broker Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Broker Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log @@ -196,7 +197,7 @@ BERDUC2 Start Engine Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Engine Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log @@ -232,7 +233,7 @@ BERDUCU2 Start Engine Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Engine Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log @@ -265,13 +266,13 @@ BERDUC3U1 ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected. Sleep 5s - Stop Broker + Kindly Stop Broker Sleep 5s Clear Cache Start Broker Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Broker Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log @@ -310,7 +311,7 @@ BERDUC3U2 Start Engine Sleep 25s Stop Engine - Stop Broker + Kindly Stop Broker ${result}= Check Multiplicity When Engine Restarted /tmp/lua-engine.log /tmp/lua.log Should Be True ${result} msg=There are events sent several times, see /tmp/lua-engine.log and /tmp/lua.log diff --git a/tests/broker-engine/reverse-connection.robot b/tests/broker-engine/reverse-connection.robot index cc022a70070..424d1e0d88b 100644 --- a/tests/broker-engine/reverse-connection.robot +++ b/tests/broker-engine/reverse-connection.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine communication with or without compression Library Process @@ -33,14 +34,14 @@ BRGC1 Run Reverse Bam ${50} ${0.2} - Stop Broker + Kindly Stop Broker Stop Engine - ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '/var/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it + ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '${VarRoot}/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it ${log}= Catenate SEPARATOR= ${BROKER_LOG} /central-broker-master.log ${result}= Find In Log With Timeout ${log} ${start} ${content} 40 Should Be True ${result} msg=Connection to map has failed. - File Should Not Exist /var/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map* msg=There should not exist que map files. + File Should Not Exist ${VarRoot}/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map* msg=There should not exist que map files. BRCTS1 @@ -61,14 +62,14 @@ BRCTS1 Run Reverse Bam ${150} ${10} - Stop Broker + Kindly Stop Broker Stop Engine - ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '/var/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it + ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '${VarRoot}/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it ${log}= Catenate SEPARATOR= ${BROKER_LOG} /central-broker-master.log ${result}= Find In Log With Timeout ${log} ${start} ${content} 40 Should Be True ${result} msg=Connection to map has failed - File Should Not Exist /var/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map* msg=There should not exist queue map files. + File Should Not Exist ${VarRoot}/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map* msg=There should not exist queue map files. BRCS1 @@ -87,11 +88,11 @@ BRCS1 ${result}= Check Connections Should Be True ${result} msg=Engine and Broker not connected - Stop Broker + Kindly Stop Broker Stop Engine - ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '/var/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it + ${content}= Create List New incoming connection 'centreon-broker-master-map-2' file: end of file '${VarRoot}/lib/centreon-broker//central-broker-master.queue.centreon-broker-master-map-2' reached, erasing it ${log}= Catenate SEPARATOR= ${BROKER_LOG} /central-broker-master.log ${result}= Find In Log With Timeout ${log} ${start} ${content} 40 Should Not Be True ${result} msg=Connection to map has failed - File Should Not Exist /var/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map-2 msg=There should not exist queue map files. + File Should Not Exist ${VarRoot}/lib/centreon-broker/central-broker-master.queue.centreon-broker-master-map-2 msg=There should not exist queue map files. diff --git a/tests/broker-engine/rrd-from-db.robot b/tests/broker-engine/rrd-from-db.robot new file mode 100644 index 00000000000..35821c137f5 --- /dev/null +++ b/tests/broker-engine/rrd-from-db.robot @@ -0,0 +1,195 @@ +*** Settings *** +Resource ../resources/resources.robot +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes +Test Teardown Save logs If Failed + +Documentation Centreon Broker RRD metric deletion from the legacy query made by the php. +Library DatabaseLibrary +Library Process +Library OperatingSystem +Library DateTime +Library Collections +Library ../resources/Engine.py +Library ../resources/Broker.py +Library ../resources/Common.py + +*** Test Cases *** +BRRDDMDB1 + Start Mysql + [Documentation] RRD metrics deletion from metric ids with a query in centreon_storage. + [Tags] RRD metric deletion unified_sql mysql + Config Engine ${1} + Config Broker rrd + Config Broker central + Config Broker Sql Output central unified_sql + Config Broker module + Broker Config Log central grpc error + Broker Config Log central sql info + Broker Config Log central core error + Broker Config Log rrd rrd debug + Broker Config Log rrd core error + Create Metrics 3 + ${start}= Get Current Date + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} msg=Engine and Broker not connected + + # We choose 3 metrics to remove. + ${metrics}= Get Metrics To Delete 3 + Log To Console Metrics to delete ${metrics} + + ${empty}= Create List + Remove Graphs from DB ${empty} ${metrics} + Reload Broker + ${metrics_str}= Catenate SEPARATOR=, @{metrics} + ${content}= Create List metrics ${metrics_str} erased from database + + ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} msg=No log message telling about metrics ${metrics_str} deletion. + FOR ${m} IN @{metrics} + Log to Console Waiting for ${VarRoot}/lib/centreon/metrics/${m}.rrd to be deleted + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s + END + +BRRDDIDDB1 + [Documentation] RRD metrics deletion from index ids with a query in centreon_storage. + [Tags] RRD metric deletion unified_sql + Config Engine ${1} + Config Broker rrd + Config Broker central + Config Broker Sql Output central unified_sql + Config Broker module + Broker Config Log central sql info + Broker Config Log rrd rrd debug + Broker Config Log rrd core error + Create Metrics 3 + + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} msg=Engine and Broker not connected + + log to console STEP1 + ${indexes}= Get Indexes To Delete 2 + log to console STEP2 + ${metrics}= Get Metrics Matching Indexes ${indexes} + log to console STEP3 + Log To Console indexes ${indexes} to delete with their metrics + + ${empty}= Create List + Remove Graphs from DB ${indexes} ${empty} + Reload Broker + ${indexes_str}= Catenate SEPARATOR=, @{indexes} + ${content}= Create List indexes ${indexes_str} erased from database + + ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} msg=No log message telling about indexes ${indexes_str} deletion. + FOR ${i} IN @{indexes} + log to console Wait for ${VarRoot}/lib/centreon/status/${i}.rrd to be deleted + Wait Until Removed ${VarRoot}/lib/centreon/status/${i}.rrd 20s + END + FOR ${m} IN @{metrics} + log to console Wait for ${VarRoot}/lib/centreon/metrics/${m}.rrd to be deleted + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s + END + +BRRDRBDB1 + [Documentation] RRD metric rebuild with a query in centreon_storage and unified sql + [Tags] RRD metric rebuild unified_sql + Config Engine ${1} + Config Broker rrd + Config Broker central + Config Broker Sql Output central unified_sql + Config Broker module + Broker Config Log rrd rrd trace + Broker Config Log central sql trace + Create Metrics 3 + + ${start}= Get Current Date + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} msg=Engine and Broker not connected + + # We get 3 indexes to rebuild + ${index}= Get Indexes To Rebuild 3 + Rebuild Rrd Graphs from DB ${index} 1 + Reload Broker + Log To Console Indexes to rebuild: ${index} + ${metrics}= Get Metrics Matching Indexes ${index} + Log To Console Metrics to rebuild: ${metrics} + log to console Coucou4 + ${content}= Create List Metric rebuild: metric is sent to rebuild + ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} msg=Central did not send metrics to rebuild + + ${content1}= Create List RRD: Starting to rebuild metrics + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 45 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild START + + ${content1}= Create List RRD: Rebuilding metric + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 45 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild DATA + + ${content1}= Create List RRD: Finishing to rebuild metrics + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 240 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild END + FOR ${m} IN @{metrics} + ${value}= Evaluate ${m} / 2 + ${result}= Compare RRD Average Value ${m} ${value} + Should Be True ${result} msg=Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + END + +BRRDRBUDB1 + [Documentation] RRD metric rebuild with a query in centreon_storage and unified sql + [Tags] RRD metric rebuild unified_sql grpc + Config Engine ${1} + Config Broker rrd + Config Broker central + Config Broker Sql Output central unified_sql + Config Broker module + Broker Config Log rrd rrd trace + Broker Config Log central sql trace + Broker Config Add Item module0 bbdo_version 3.0.1 + Broker Config Add Item rrd bbdo_version 3.0.1 + Broker Config Add Item central bbdo_version 3.0.1 + Create Metrics 3 + + ${start}= Get Current Date + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} msg=Engine and Broker not connected + + # We get 3 indexes to rebuild + ${index}= Get Indexes To Rebuild 3 + Rebuild Rrd Graphs from DB ${index} 1 + Reload Broker + Log To Console Indexes to rebuild: ${index} + ${metrics}= Get Metrics Matching Indexes ${index} + Log To Console Metrics to rebuild: ${metrics} + ${content}= Create List Metric rebuild: metric is sent to rebuild + ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 + Should Be True ${result} msg=Central did not send metrics to rebuild + + ${content1}= Create List RRD: Starting to rebuild metrics + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 30 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild START + + ${content1}= Create List RRD: Rebuilding metric + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 30 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild DATA + + ${content1}= Create List RRD: Finishing to rebuild metrics + ${result}= Find In Log With Timeout ${rrdLog} ${start} ${content1} 240 + Should Be True ${result} msg=RRD cbd did not receive metrics to rebuild END + FOR ${m} IN @{metrics} + ${value}= Evaluate ${m} / 2 + ${result}= Compare RRD Average Value ${m} ${value} + Should Be True ${result} msg=Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + END diff --git a/tests/broker-engine/rrd.robot b/tests/broker-engine/rrd.robot index 4d72c1eeac3..d5fbc4c0fdc 100644 --- a/tests/broker-engine/rrd.robot +++ b/tests/broker-engine/rrd.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker RRD metric deletion Library DatabaseLibrary @@ -45,7 +46,7 @@ BRRDDM1 ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=No log message telling about metrics ${metrics_str} deletion. FOR ${m} IN @{metrics} - Wait Until Removed /var/lib/centreon/metrics/${m}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s END BRRDDID1 @@ -61,6 +62,7 @@ BRRDDID1 Create Metrics 3 ${start}= Get Current Date + Sleep 1s Start Broker Start Engine ${result}= Check Connections @@ -78,10 +80,10 @@ BRRDDID1 ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=No log message telling about indexes ${indexes_str} deletion. FOR ${i} IN @{indexes} - Wait Until Removed /var/lib/centreon/status/${i}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/status/${i}.rrd 20s END FOR ${m} IN @{metrics} - Wait Until Removed /var/lib/centreon/metrics/${m}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s END BRRDDMID1 @@ -141,7 +143,7 @@ BRRDDMU1 ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 50 Should Be True ${result} msg=No log message telling about metrics ${metrics_str} deletion. FOR ${m} IN @{metrics} - Wait Until Removed /var/lib/centreon/metrics/${m}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s END BRRDDIDU1 @@ -175,10 +177,10 @@ BRRDDIDU1 ${result}= Find In Log With Timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=No log message telling about indexes ${indexes_str} deletion. FOR ${i} IN @{indexes} - Wait Until Removed /var/lib/centreon/status/${i}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/status/${i}.rrd 20s END FOR ${m} IN @{metrics} - Wait Until Removed /var/lib/centreon/metrics/${m}.rrd 20s + Wait Until Removed ${VarRoot}/lib/centreon/metrics/${m}.rrd 20s END BRRDDMIDU1 diff --git a/tests/broker-engine/scheduler.robot b/tests/broker-engine/scheduler.robot index 17fb785e98d..9adf6a71b8b 100644 --- a/tests/broker-engine/scheduler.robot +++ b/tests/broker-engine/scheduler.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine log_v2 Library DatabaseLibrary @@ -41,4 +42,4 @@ ENRSCHE1 Should Be True ${result1} msg=the delta of last_check and next_check is not equal to 60. Should Be True ${result2} msg=the delta of last_check and next_check is not equal to 300. Stop Engine - Stop Broker + Kindly Stop Broker diff --git a/tests/broker-engine/servicegroups.robot b/tests/broker-engine/servicegroups.robot index fbd1edd9333..3dcbc68ce44 100644 --- a/tests/broker-engine/servicegroups.robot +++ b/tests/broker-engine/servicegroups.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine add servicegroup Library Process diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index f4d8ad05a03..22b4f9890ef 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine progressively add services Library Process diff --git a/tests/broker-engine/start-stop.robot b/tests/broker-engine/start-stop.robot index b9180d0344d..7e2291849cd 100644 --- a/tests/broker-engine/start-stop.robot +++ b/tests/broker-engine/start-stop.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine start/stop tests Library Process @@ -23,7 +24,7 @@ BESS1 Start Engine ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS2 @@ -38,7 +39,7 @@ BESS2 ${result}= Check Connections Should Be True ${result} Stop Engine - Stop Broker + Kindly Stop Broker BESS3 [Documentation] Start-Stop Broker/Engine - Engine started first - Engine stopped first @@ -52,7 +53,7 @@ BESS3 ${result}= Check Connections Should Be True ${result} Stop Engine - Stop Broker + Kindly Stop Broker BESS4 [Documentation] Start-Stop Broker/Engine - Engine started first - Broker stopped first @@ -65,7 +66,7 @@ BESS4 Start Broker ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS5 @@ -80,7 +81,7 @@ BESS5 Start Engine ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS_GRPC1 @@ -98,7 +99,7 @@ BESS_GRPC1 Start Engine ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS_GRPC2 @@ -117,7 +118,7 @@ BESS_GRPC2 ${result}= Check Connections Should Be True ${result} Stop Engine - Stop Broker + Kindly Stop Broker BESS_GRPC3 [Documentation] Start-Stop grpc version Broker/Engine - Engine started first - Engine stopped first @@ -135,7 +136,7 @@ BESS_GRPC3 ${result}= Check Connections Should Be True ${result} Stop Engine - Stop Broker + Kindly Stop Broker BESS_GRPC4 [Documentation] Start-Stop grpc version Broker/Engine - Engine started first - Broker stopped first @@ -152,7 +153,7 @@ BESS_GRPC4 Start Broker ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS_GRPC5 @@ -171,7 +172,7 @@ BESS_GRPC5 Start Engine ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine BESS_GRPC_COMPRESS1 @@ -191,5 +192,155 @@ BESS_GRPC_COMPRESS1 Start Engine ${result}= Check Connections Should Be True ${result} - Stop Broker + Kindly Stop Broker Stop Engine + + +BESS_CRYPTED_GRPC1 + [Documentation] Start-Stop grpc version Broker/Engine - well configured + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp output grpc crypto module0 True False + Add Broker tcp input grpc crypto central True False + Remove Host from broker output module0 central-module-master-output + Add Host to broker output module0 central-module-master-output localhost + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} + Kindly Stop Broker + Stop Engine + END + +BESS_CRYPTED_GRPC2 + [Documentation] Start-Stop grpc version Broker/Engine only server crypted + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp input grpc crypto central True False + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + Sleep 2s + Kindly Stop Broker + Stop Engine + END + +BESS_CRYPTED_GRPC3 + [Documentation] Start-Stop grpc version Broker/Engine only engine crypted + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp output grpc crypto module0 True False + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + Sleep 2s + Kindly Stop Broker + Stop Engine + END + +BESS_CRYPTED_REVERSED_GRPC1 + [Documentation] Start-Stop grpc version Broker/Engine - well configured + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp output grpc crypto module0 True True + Add Broker tcp input grpc crypto central True True + Add Host to broker input central central-broker-master-input localhost + Remove Host from broker output module0 central-module-master-output + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + ${result}= Check Connections + Should Be True ${result} + Sleep 2s + Kindly Stop Broker + Stop Engine + END + +BESS_CRYPTED_REVERSED_GRPC2 + [Documentation] Start-Stop grpc version Broker/Engine only engine server crypted + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp output grpc crypto module0 True True + Add Host to broker input central central-broker-master-input localhost + Remove Host from broker output module0 central-module-master-output + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + Sleep 5s + Kindly Stop Broker + Stop Engine + END + +BESS_CRYPTED_REVERSED_GRPC3 + [Documentation] Start-Stop grpc version Broker/Engine only engine crypted + [Tags] Broker Engine start-stop + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Change Broker tcp output to grpc central + Change Broker tcp output to grpc module0 + Change Broker tcp input to grpc central + Change Broker tcp input to grpc rrd + Add Broker tcp input grpc crypto central True True + Add Host to broker input central central-broker-master-input localhost + Remove Host from broker output module0 central-module-master-output + FOR ${i} IN RANGE 0 5 + Start Broker + Start Engine + Sleep 5s + Kindly Stop Broker + Stop Engine + END diff --git a/tests/broker-engine/tags.robot b/tests/broker-engine/tags.robot index 0e9a13fe65c..2394cd929f7 100644 --- a/tests/broker-engine/tags.robot +++ b/tests/broker-engine/tags.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Engine/Broker tests on tags. Library Process diff --git a/tests/broker-engine/tls.robot b/tests/broker-engine/tls.robot index 3120dc5fdb9..d29eaee46a7 100644 --- a/tests/broker-engine/tls.robot +++ b/tests/broker-engine/tls.robot @@ -3,6 +3,7 @@ Resource ../resources/resources.robot Suite Setup Clean Before Suite Suite Teardown Clean After Suite Test Setup Stop Processes +Test Teardown Save logs If Failed Documentation Centreon Broker and Engine communication with or without TLS Library Process @@ -60,15 +61,15 @@ BECT2 Config Broker module ${hostname}= Get Hostname - Create Key And Certificate ${hostname} /etc/centreon-broker/server.key /etc/centreon-broker/server.crt - Create Key And Certificate ${hostname} /etc/centreon-broker/client.key /etc/centreon-broker/client.crt + Create Key And Certificate localhost ${EtcRoot}/centreon-broker/server.key ${EtcRoot}/centreon-broker/server.crt + Create Key And Certificate localhost ${EtcRoot}/centreon-broker/client.key ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input private_key /etc/centreon-broker/client.key - Broker Config Input set central central-broker-master-input public_cert /etc/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output private_key /etc/centreon-broker/server.key - Broker Config Output set module0 central-module-master-output public_cert /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input private_key ${EtcRoot}/centreon-broker/client.key + Broker Config Input set central central-broker-master-input public_cert ${EtcRoot}/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output private_key ${EtcRoot}/centreon-broker/server.key + Broker Config Output set module0 central-module-master-output public_cert ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info @@ -102,11 +103,11 @@ BECT3 Config Broker module ${hostname}= Get Hostname - Create Certificate ${hostname} /etc/centreon-broker/server.crt - Create Certificate ${hostname} /etc/centreon-broker/client.crt + Create Certificate ${hostname} ${EtcRoot}/centreon-broker/server.crt + Create Certificate ${hostname} ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info @@ -141,24 +142,24 @@ BECT4 Config Broker module Set Local Variable ${hostname} centreon - Create Key And Certificate ${hostname} /etc/centreon-broker/server.key /etc/centreon-broker/server.crt - Create Key And Certificate ${hostname} /etc/centreon-broker/client.key /etc/centreon-broker/client.crt + Create Key And Certificate ${hostname} ${EtcRoot}/centreon-broker/server.key ${EtcRoot}/centreon-broker/server.crt + Create Key And Certificate ${hostname} ${EtcRoot}/centreon-broker/client.key ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info Broker Config Log module0 bbdo info Broker Config Input set central central-broker-master-input tls yes - Broker Config Input set central central-broker-master-input private_key /etc/centreon-broker/client.key - Broker Config Input set central central-broker-master-input public_cert /etc/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt + Broker Config Input set central central-broker-master-input private_key ${EtcRoot}/centreon-broker/client.key + Broker Config Input set central central-broker-master-input public_cert ${EtcRoot}/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt Broker Config Input set central central-broker-master-input tls_hostname centreon Broker Config Output set module0 central-module-master-output tls yes - Broker Config Output set module0 central-module-master-output private_key /etc/centreon-broker/server.key - Broker Config Output set module0 central-module-master-output public_cert /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Output set module0 central-module-master-output private_key ${EtcRoot}/centreon-broker/server.key + Broker Config Output set module0 central-module-master-output public_cert ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt # We get the current date just before starting broker ${start}= Get Current Date Start Broker @@ -225,16 +226,15 @@ BECT_GRPC2 Config Broker central Config Broker module - ${hostname}= Get Hostname - Create Key And Certificate ${hostname} /etc/centreon-broker/server.key /etc/centreon-broker/server.crt - Create Key And Certificate ${hostname} /etc/centreon-broker/client.key /etc/centreon-broker/client.crt + Create Key And Certificate localhost ${EtcRoot}/centreon-broker/server.key ${EtcRoot}/centreon-broker/server.crt + Create Key And Certificate localhost ${EtcRoot}/centreon-broker/client.key ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input private_key /etc/centreon-broker/client.key - Broker Config Input set central central-broker-master-input public_cert /etc/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output private_key /etc/centreon-broker/server.key - Broker Config Output set module0 central-module-master-output public_cert /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input private_key ${EtcRoot}/centreon-broker/client.key + Broker Config Input set central central-broker-master-input public_cert ${EtcRoot}/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output private_key ${EtcRoot}/centreon-broker/server.key + Broker Config Output set module0 central-module-master-output public_cert ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info @@ -270,11 +270,11 @@ BECT_GRPC3 Config Broker module ${hostname}= Get Hostname - Create Certificate ${hostname} /etc/centreon-broker/server.crt - Create Certificate ${hostname} /etc/centreon-broker/client.crt + Create Certificate ${hostname} ${EtcRoot}/centreon-broker/server.crt + Create Certificate ${hostname} ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info @@ -311,11 +311,11 @@ BECT_GRPC4 Config Broker module Set Local Variable ${hostname} centreon - Create Key And Certificate ${hostname} /etc/centreon-broker/server.key /etc/centreon-broker/server.crt - Create Key And Certificate ${hostname} /etc/centreon-broker/client.key /etc/centreon-broker/client.crt + Create Key And Certificate ${hostname} ${EtcRoot}/centreon-broker/server.key ${EtcRoot}/centreon-broker/server.crt + Create Key And Certificate ${hostname} ${EtcRoot}/centreon-broker/client.key ${EtcRoot}/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt Broker Config Log central tls debug Broker Config Log module0 tls debug Broker Config Log central bbdo info @@ -323,14 +323,14 @@ BECT_GRPC4 Change Broker tcp output to grpc module0 Change Broker tcp input to grpc central Broker Config Input set central central-broker-master-input tls yes - Broker Config Input set central central-broker-master-input private_key /etc/centreon-broker/client.key - Broker Config Input set central central-broker-master-input public_cert /etc/centreon-broker/client.crt - Broker Config Input set central central-broker-master-input ca_certificate /etc/centreon-broker/server.crt + Broker Config Input set central central-broker-master-input private_key ${EtcRoot}/centreon-broker/client.key + Broker Config Input set central central-broker-master-input public_cert ${EtcRoot}/centreon-broker/client.crt + Broker Config Input set central central-broker-master-input ca_certificate ${EtcRoot}/centreon-broker/server.crt Broker Config Input set central central-broker-master-input tls_hostname centreon Broker Config Output set module0 central-module-master-output tls yes - Broker Config Output set module0 central-module-master-output private_key /etc/centreon-broker/server.key - Broker Config Output set module0 central-module-master-output public_cert /etc/centreon-broker/server.crt - Broker Config Output set module0 central-module-master-output ca_certificate /etc/centreon-broker/client.crt + Broker Config Output set module0 central-module-master-output private_key ${EtcRoot}/centreon-broker/server.key + Broker Config Output set module0 central-module-master-output public_cert ${EtcRoot}/centreon-broker/server.crt + Broker Config Output set module0 central-module-master-output ca_certificate ${EtcRoot}/centreon-broker/client.crt # We get the current date just before starting broker ${start}= Get Current Date Start Broker diff --git a/tests/broker/command-line.robot b/tests/broker/command-line.robot index 2ada10989b2..b6632e0db4d 100644 --- a/tests/broker/command-line.robot +++ b/tests/broker/command-line.robot @@ -22,15 +22,37 @@ BCL1 Should be True ${expected} msg=expected error 'The option -s expects a positive integer' BCL2 - [Documentation] Starting broker with option '-s 5' should work + [Documentation] Starting broker with option '-s5' should work [Tags] Broker start-stop Config Broker central ${start}= Get Current Date exclude_millis=True - Start Broker With Args -s5 /etc/centreon-broker/central-broker.json + Sleep 1s + Start Broker With Args -s5 ${EtcRoot}/centreon-broker/central-broker.json ${table}= Create List Starting the TCP thread pool of 5 threads - Find in log with timeout ${centralLog} ${start} ${table} 30 + ${logger_res}= Find in log with timeout ${centralLog} ${start} ${table} 30 + Should be True ${logger_res} msg=Didn't found 5 threads in ${VarRoot}/log/centreon-broker/central-broker-master.log Stop Broker With Args +BCL3 + [Documentation] Starting broker with options '-D' should work and activate diagnose mode + [Tags] Broker start-stop + Config Broker central + ${start}= Get Current Date exclude_millis=True + Sleep 1s + Start Broker With Args -D ${EtcRoot}/centreon-broker/central-broker.json + ${result}= Wait For Broker + ${expected}= Evaluate "diagnostic:" in """${result}""" + Should be True ${expected} msg=diagnostic mode didn't launch + +BCL4 + [Documentation] Starting broker with options '-s2' and '-D' should work. + [Tags] Broker start-stop + Config Broker central + Start Broker With Args -s2 -D ${EtcRoot}/centreon-broker/central-broker.json + ${result}= Wait For Broker + ${expected}= Evaluate "diagnostic:" in """${result}""" + Should be True ${expected} msg=diagnostic mode didn't launch + *** Keywords *** Start Broker With Args [Arguments] @{options} diff --git a/tests/broker/grpc-stream.robot b/tests/broker/grpc-stream.robot index 5e71796de44..b76d367a5aa 100644 --- a/tests/broker/grpc-stream.robot +++ b/tests/broker/grpc-stream.robot @@ -97,8 +97,8 @@ BGRPCSSU5 *** Keywords *** Start Stop Service [Arguments] ${interval} - Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 - Start Process /usr/sbin/cbd /etc/centreon-broker/central-rrd.json alias=b2 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-broker.json alias=b1 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-rrd.json alias=b2 Sleep ${interval} Send Signal To Process SIGTERM b1 ${result}= Wait For Process b1 timeout=60s on_timeout=kill @@ -109,7 +109,7 @@ Start Stop Service Start Stop Instance [Arguments] ${interval} - Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-broker.json alias=b1 Sleep ${interval} Send Signal To Process SIGTERM b1 ${result}= Wait For Process b1 timeout=60s on_timeout=kill diff --git a/tests/broker/sql.robot b/tests/broker/sql.robot index 7917befa07b..0d7b225d24a 100644 --- a/tests/broker/sql.robot +++ b/tests/broker/sql.robot @@ -26,7 +26,7 @@ BDB1 ${content}= Create List storage and sql streams do not have the same database configuration ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=A message should tell that sql and storage outputs do not have the same configuration. - Stop Broker + Kindly Stop Broker END BDB2 @@ -43,7 +43,7 @@ BDB2 ${content}= Create List storage and sql streams do not have the same database configuration ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} msg=A log telling the impossibility to establish a connection between the storage stream and the database should appear. - Stop Broker + Kindly Stop Broker END BDB3 @@ -59,7 +59,7 @@ BDB3 ${content}= Create List global error: mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} msg=No message about the database not connected. - Stop Broker + Kindly Stop Broker END BDB4 @@ -76,7 +76,7 @@ BDB4 ${content}= Create List error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} msg=No message about the fact that cbd is not correctly connected to the database. - Stop Broker + Kindly Stop Broker END BDB5 @@ -93,7 +93,7 @@ BDB5 ${content}= Create List error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 50 Should Be True ${result} msg=No message about the disconnection between cbd and the database - Stop Broker + Kindly Stop Broker END BDB6 @@ -109,7 +109,7 @@ BDB6 ${content}= Create List error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} msg=No message about the disconnection between cbd and the database - Stop Broker + Kindly Stop Broker END BDB7 @@ -125,7 +125,7 @@ BDB7 ${content}= Create List mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} - Stop Broker + Kindly Stop Broker BDB8 [Documentation] access denied when database user password is wrong for perfdata/sql @@ -140,7 +140,7 @@ BDB8 ${content}= Create List mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} - Stop Broker + Kindly Stop Broker BDB9 [Documentation] access denied when database user password is wrong for sql @@ -154,7 +154,7 @@ BDB9 ${content}= Create List mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} - Stop Broker + Kindly Stop Broker BDB10 [Documentation] connection should be established when user password is good for sql/perfdata @@ -168,7 +168,7 @@ BDB10 ${content}= Create List sql stream initialization storage stream initialization ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 40 Should Be True ${result} - Stop Broker + Kindly Stop Broker BEDB2 [Documentation] start broker/engine and then start MariaDB => connection is established @@ -187,7 +187,7 @@ BEDB2 Start Mysql ${result}= Check Broker Stats exist central mysql manager waiting tasks in connection 0 Should Be True ${result} msg=Message about the connection to the database is missing. - Stop Broker + Kindly Stop Broker Stop Engine BEDB3 @@ -219,7 +219,7 @@ BEDB3 Exit For Loop If ${result} END Should Be True ${result} msg=gRPC does not return 3 connections as expected - Stop Broker + Kindly Stop Broker Stop Engine BEDB4 @@ -245,7 +245,7 @@ BEDB4 Exit For Loop If ${result} END Should Be True ${result} msg=gRPC does not return 3 connections as expected - Stop Broker + Kindly Stop Broker Stop Engine BDBM1 @@ -269,7 +269,7 @@ BDBM1 Start Mysql ${result}= Get Broker Stats Size central mysql manager Should Be True ${result} >= ${c} + 1 msg=The stats file should contain at less ${c} + 1 connections to the database. - Stop Broker + Kindly Stop Broker Stop Engine END @@ -287,7 +287,7 @@ BDBU1 ${content}= Create List Table 'centreon.instances' doesn't exist ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} - Stop Broker + Kindly Stop Broker END BDBU3 @@ -304,7 +304,7 @@ BDBU3 ${content}= Create List global error: mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 30 Should Be True ${result} - Stop Broker + Kindly Stop Broker END BDBU5 @@ -321,7 +321,7 @@ BDBU5 ${content}= Create List error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 50 Should Be True ${result} msg=Cannot find the message telling cbd is not connected to the database. - Stop Broker + Kindly Stop Broker END BDBU7 @@ -337,7 +337,7 @@ BDBU7 ${content}= Create List mysql_connection: error while starting connection ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 20 Should Be True ${result} msg=Error concerning cbd not connected to the database is missing. - Stop Broker + Kindly Stop Broker BDBU10 [Documentation] Connection should be established when user password is good for unified sql @@ -353,7 +353,7 @@ BDBU10 ${content}= Create List mysql_connection: commit ${result}= Find In Log with timeout ${centralLog} ${start} ${content} 40 Should Be True ${result} msg=Log concerning a commit (connection ok) is missing. - Stop Broker + Kindly Stop Broker BDBMU1 [Documentation] start broker/engine with unified sql and then start MariaDB => connection is established @@ -379,6 +379,6 @@ BDBMU1 Should Be True ${result} msg=No stats on mysql manager found ${result}= Get Broker Stats size central mysql manager ${60} Should Be True ${result} >= ${c} + 1 msg=Broker mysql manager stats do not show the ${c} connections - Stop Broker + Kindly Stop Broker Stop Engine END diff --git a/tests/broker/start-stop.robot b/tests/broker/start-stop.robot index d7a54d0bff7..c8d7e13beb7 100644 --- a/tests/broker/start-stop.robot +++ b/tests/broker/start-stop.robot @@ -85,8 +85,8 @@ BSSU5 *** Keywords *** Start Stop Service [Arguments] ${interval} - Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 - Start Process /usr/sbin/cbd /etc/centreon-broker/central-rrd.json alias=b2 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-broker.json alias=b1 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-rrd.json alias=b2 Sleep ${interval} Send Signal To Process SIGTERM b1 ${result}= Wait For Process b1 timeout=60s on_timeout=kill @@ -97,7 +97,7 @@ Start Stop Service Start Stop Instance [Arguments] ${interval} - Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-broker.json alias=b1 Sleep ${interval} Send Signal To Process SIGTERM b1 ${result}= Wait For Process b1 timeout=60s on_timeout=kill diff --git a/tests/ccc/ccc.robot b/tests/ccc/ccc.robot new file mode 100644 index 00000000000..de369845ad8 --- /dev/null +++ b/tests/ccc/ccc.robot @@ -0,0 +1,266 @@ +*** Settings *** +Resource ../resources/resources.robot +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes + +Documentation ccc tests with engine and broker +Library Process +Library DateTime +Library OperatingSystem +Library ../resources/Engine.py +Library ../resources/Broker.py +Library ../resources/Common.py + +*** Test Cases *** +BECCC1 + [Documentation] ccc without port fails with an error message + [Tags] Broker Engine ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc stderr=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("${content.strip()}") > 0 + Sleep 1s + END + should be equal as strings ${content.strip()} You must specify a port for the connection to the gRPC server + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC2 + [Documentation] ccc with -p 51001 connects to central cbd gRPC server. + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 51001 stderr=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("${content.strip()}") > 0 + Sleep 1s + END + Should Be Equal As Strings ${content.strip()} Connected to a Centreon Broker 22.04.1 gRPC server + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC3 + [Documentation] ccc with -p 50001 connects to centengine gRPC server. + [Tags] Broker Engine protobuf bbdo + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 50001 stderr=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("${content.strip()}") > 0 + Sleep 1s + END + Should Be Equal As Strings ${content.strip()} Connected to a Centreon Engine 22.04.1 gRPC server + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC4 + [Documentation] ccc with -p 51001 -l returns the available functions from Broker gRPC server + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 51001 -l stdout=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("""${content.strip()}""") > 0 + Sleep 1s + END + ${contains}= Evaluate "GetVersion" in """${content}""" and "RemovePoller" in """${content}""" + Should Be True ${contains} msg=The list of methods should contain GetVersion(Empty) + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC5 + [Documentation] ccc with -p 51001 -l GetVersion returns an error because we can't execute a command with -l. + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 51001 -l GetVersion stderr=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("""${content.strip()}""") > 0 + Sleep 1s + END + ${contains}= Evaluate "The list argument expects no command" in """${content}""" + Should Be True ${contains} msg=When -l option is applied, we can't call a command. + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC6 + [Documentation] ccc with -p 51001 GetVersion{} calls the GetVersion command + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 51001 GetVersion{} stdout=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("""${content.strip().split()}""") > 50 + Sleep 1s + END + Should Contain ${content} {\n \"major\": 22,\n \"minor\": 4,\n \"patch\": 1\n} msg=A version as json string should be returned + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC7 + [Documentation] ccc with -p 51001 GetVersion{"idx":1} returns an error because the input message is wrong. + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 51001 GetVersion{"idx":1} stderr=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("""${content.strip().split()}""") > 10 + Sleep 1s + END + Should Contain ${content} Error during the execution of '/com.centreon.broker.Broker/GetVersion' method: msg=GetVersion{"idx":1} should return an error because the input message is incompatible with the expected one. + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + +BECCC8 + [Documentation] ccc with -p 50001 EnableServiceNotifications{"names":{"host_name": "host_1", "service_name": "service_1"}} works and returns an empty message. + [Tags] Broker Engine protobuf bbdo ccc + Config Engine ${1} + Config Broker central + Config Broker module + Config Broker rrd + Broker Config Add Item module0 bbdo_version 3.0.0 + Broker Config Add Item central bbdo_version 3.0.0 + Broker Config Add Item rrd bbdo_version 3.0.0 + Broker Config Log central sql trace + Config Broker Sql Output central unified_sql + Broker Config Output Set central central-broker-unified-sql store_in_resources yes + Broker Config Output Set central central-broker-unified-sql store_in_hosts_services no + Clear Retention + ${start}= Get Current Date + Sleep 1s + Start Broker + Start Engine + Sleep 3s + Start Process /usr/bin/ccc -p 50001 EnableServiceNotifications{"names":{"host_name": "host_1", "service_name": "service_1"}} stdout=/tmp/output.txt + FOR ${i} IN RANGE 10 + Wait Until Created /tmp/output.txt + ${content}= Get File /tmp/output.txt + EXIT FOR LOOP IF len("""${content.strip().split()}""") > 2 + Sleep 1s + END + Should Contain ${content} {} + Stop Engine + Kindly Stop Broker + Remove File /tmp/output.txt + diff --git a/tests/connector_perl/conf_engine/central-module.json b/tests/connector_perl/conf_engine/central-module.json index 84716f52756..dfb45c6bf87 100644 --- a/tests/connector_perl/conf_engine/central-module.json +++ b/tests/connector_perl/conf_engine/central-module.json @@ -9,9 +9,8 @@ "log_thread_id": false, "event_queue_max_size": 100000, "command_file": "", - "cache_directory": "/var/lib/centreon-engine", "log": { - "directory": "/tmp/test_connector_ssh/log/", + "directory": "/tmp/test_connector_perl/log/", "filename": "centengine-cbmod.log", "max_size": 0, "loggers": { @@ -42,15 +41,8 @@ "type": "ipv4" } ], - "stats": [ - { - "type": "stats", - "name": "central-module-master-stats", - "json_fifo": "/var/lib/centreon-engine/central-module-master-stats.json" - } - ], "grpc": { "port": 51003 } } -} \ No newline at end of file +} diff --git a/tests/connector_ssh/conf_engine/central-module.json b/tests/connector_ssh/conf_engine/central-module.json index 84716f52756..141df8ac47e 100644 --- a/tests/connector_ssh/conf_engine/central-module.json +++ b/tests/connector_ssh/conf_engine/central-module.json @@ -9,7 +9,6 @@ "log_thread_id": false, "event_queue_max_size": 100000, "command_file": "", - "cache_directory": "/var/lib/centreon-engine", "log": { "directory": "/tmp/test_connector_ssh/log/", "filename": "centengine-cbmod.log", @@ -42,15 +41,8 @@ "type": "ipv4" } ], - "stats": [ - { - "type": "stats", - "name": "central-module-master-stats", - "json_fifo": "/var/lib/centreon-engine/central-module-master-stats.json" - } - ], "grpc": { "port": 51003 } } -} \ No newline at end of file +} diff --git a/tests/engine/forced_checks.robot b/tests/engine/forced_checks.robot index cbd43780b70..43e26b0d0af 100644 --- a/tests/engine/forced_checks.robot +++ b/tests/engine/forced_checks.robot @@ -35,7 +35,7 @@ EFHC1 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Process host check result host_1 0 host_1 UP FOR ${i} IN RANGE ${4} - Schedule Forced HOST CHECK host_1 /var/lib/centreon-engine/config0/rw/centengine.cmd + Schedule Forced HOST CHECK host_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd Sleep 5s END ${content}= Create List EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;1; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;2; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;HARD;3; @@ -67,7 +67,7 @@ EFHC2 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Process host check result host_1 0 host_1 UP FOR ${i} IN RANGE ${4} - Schedule Forced HOST CHECK host_1 /var/lib/centreon-engine/config0/rw/centengine.cmd + Schedule Forced HOST CHECK host_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd Sleep 5s END ${content}= Create List EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;1; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;2; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;HARD;3; @@ -108,7 +108,7 @@ EFHCU1 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Process host check result host_1 0 host_1 UP FOR ${i} IN RANGE ${4} - Schedule Forced HOST CHECK host_1 /var/lib/centreon-engine/config0/rw/centengine.cmd + Schedule Forced HOST CHECK host_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd Sleep 5s END ${content}= Create List EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;1; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;2; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;HARD;3; @@ -148,7 +148,7 @@ EFHCU2 Should Be True ${result} msg=An Initial host state on host_1 should be raised before we can start our external commands. Process host check result host_1 0 host_1 UP FOR ${i} IN RANGE ${4} - Schedule Forced HOST CHECK host_1 /var/lib/centreon-engine/config0/rw/centengine.cmd + Schedule Forced HOST CHECK host_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd Sleep 5s END ${content}= Create List EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;1; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;SOFT;2; EXTERNAL COMMAND: SCHEDULE_FORCED_HOST_CHECK;host_1; HOST ALERT: host_1;DOWN;HARD;3; diff --git a/tests/init-sql.sh b/tests/init-sql.sh index 2e564486130..f22037423d8 100755 --- a/tests/init-sql.sh +++ b/tests/init-sql.sh @@ -1,5 +1,16 @@ #!/bin/bash -mysql -u root -pcentreon -e "drop database centreon" -mysql -u root -pcentreon < ../resources/centreon.sql -mysql -u root -pcentreon < ../resources/centreon_storage.sql +DBUserRoot=$(awk '($1=="${DBUserRoot}") {print $2}' resources/db_variables.robot) +DBPassRoot=$(awk '($1=="${DBPassRoot}") {print $2}' resources/db_variables.robot) + +if [ -z $DBUserRoot ] ; then + DBUserRoot="root" +fi + +if [ -z $DBPassRoot ] ; then + DBPassRoot="centreon" +fi + +mysql --user="$DBUserRoot" --password="$DBPassRoot" -e "drop database centreon" +mysql --user="$DBUserRoot" --password="$DBPassRoot" < ../resources/centreon.sql +mysql --user="$DBUserRoot" --password="$DBPassRoot" < ../resources/centreon_storage.sql diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index c44c231119d..5367608f386 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -17,9 +17,21 @@ import broker_pb2 import broker_pb2_grpc from google.protobuf import empty_pb2 +from robot.libraries.BuiltIn import BuiltIn TIMEOUT = 30 +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") +VAR_ROOT = BuiltIn().get_variable_value("${VarRoot}") +ETC_ROOT = BuiltIn().get_variable_value("${EtcRoot}") + + config = { "central": """{{ "centreonBroker": {{ @@ -31,10 +43,12 @@ "log_timestamp": true, "log_thread_id": false, "event_queue_max_size": 100000, - "command_file": "/var/lib/centreon-broker/command.sock", - "cache_directory": "/var/lib/centreon-broker", + "command_file": "{7}/lib/centreon-broker/command.sock", + "cache_directory": "{7}/lib/centreon-broker", "log": {{ - "directory": "/var/log/centreon-broker/", + "log_pid": true, + "flush_period": 1, + "directory": "{7}/log/centreon-broker/", "filename": "", "max_size": 0, "loggers": {{ @@ -44,11 +58,11 @@ "processing": "error", "perfdata": "error", "bbdo": "error", - "tcp": "error", + "tcp": "debug", "tls": "error", "lua": "error", "bam": "error", - "grpc": "error" + "grpc": "off" }} }}, "input": [ @@ -71,11 +85,11 @@ "db_type": "mysql", "retry_interval": "5", "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "connections_count": "3", "read_timeout": "1", @@ -101,11 +115,11 @@ "buffering_timeout": "0", "length": "15552000", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "read_timeout": "1", "check_replication": "no", @@ -119,7 +133,7 @@ {{ "type": "stats", "name": "central-broker-master-stats", - "json_fifo": "/var/lib/centreon-broker/central-broker-master-stats.json" + "json_fifo": "{7}/lib/centreon-broker/central-broker-master-stats.json" }} ], "grpc": {{ @@ -130,8 +144,8 @@ "module": """{{ "centreonBroker": {{ - "broker_id": {}, - "broker_name": "{}", + "broker_id": {0}, + "broker_name": "{1}", "poller_id": 1, "poller_name": "Central", "module_directory": "/usr/share/centreon/lib/centreon-broker", @@ -139,9 +153,11 @@ "log_thread_id": false, "event_queue_max_size": 100000, "command_file": "", - "cache_directory": "/var/lib/centreon-engine", + "cache_directory": "{7}/lib/centreon-engine", "log": {{ - "directory": "/var/log/centreon-broker/", + "log_pid": true, + "flush_period": 1, + "directory": "{7}/log/centreon-broker/", "filename": "", "max_size": 0, "loggers": {{ @@ -154,7 +170,8 @@ "tcp": "debug", "tls": "debug", "lua": "debug", - "bam": "debug" + "bam": "debug", + "grpc": "debug" }} }}, "output": [ @@ -176,7 +193,7 @@ {{ "type": "stats", "name": "central-module-master-stats", - "json_fifo": "/var/lib/centreon-engine/central-module-master-stats.json" + "json_fifo": "{7}/lib/centreon-engine/central-module-master-stats.json" }} ], "grpc": {{ @@ -196,9 +213,11 @@ "log_thread_id": false, "event_queue_max_size": 100000, "command_file": "", - "cache_directory": "/var/lib/centreon-broker", + "cache_directory": "{7}/lib/centreon-broker", "log": {{ - "directory": "/var/log/centreon-broker/", + "log_pid": true, + "flush_period": 1, + "directory": "{7}/log/centreon-broker/", "filename": "", "max_size": 0, "loggers": {{ @@ -231,8 +250,8 @@ "output": [ {{ "name": "central-rrd-master-output", - "metrics_path": "/var/lib/centreon/metrics/", - "status_path": "/var/lib/centreon/status/", + "metrics_path": "{7}/lib/centreon/metrics/", + "status_path": "{7}/lib/centreon/status/", "write_metrics": "yes", "store_in_data_bin": "yes", "write_status": "yes", @@ -246,7 +265,7 @@ {{ "type": "stats", "name": "central-rrd-master-stats", - "json_fifo": "/var/lib/centreon-broker/central-rrd-master-stats.json" + "json_fifo": "{7}/lib/centreon-broker/central-rrd-master-stats.json" }} ], "grpc": {{ @@ -265,10 +284,10 @@ "log_timestamp": true, "log_thread_id": false, "event_queue_max_size": 10, - "command_file": "/var/lib/centreon-broker/command.sock", - "cache_directory": "/var/lib/centreon-broker", + "command_file": "{7}/lib/centreon-broker/command.sock", + "cache_directory": "{7}/lib/centreon-broker", "log": {{ - "directory": "/var/log/centreon-broker/", + "directory": "{7}/log/centreon-broker/", "filename": "", "max_size": 0, "loggers": {{ @@ -305,11 +324,11 @@ "db_type": "mysql", "retry_interval": "5", "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "connections_count": "3", "read_timeout": "1", @@ -347,11 +366,11 @@ "buffering_timeout": "0", "length": "15552000", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": "{2}", + "db_port": "{3}", + "db_user": "{4}", + "db_password": "{5}", + "db_name": "{6}", "queries_per_transaction": "1000", "read_timeout": "1", "check_replication": "no", @@ -365,7 +384,7 @@ {{ "type": "stats", "name": "central-broker-master-stats", - "json_fifo": "/var/lib/centreon-broker/central-broker-master-stats.json" + "json_fifo": "{7}/lib/centreon-broker/central-broker-master-stats.json" }} ], "grpc": {{ @@ -384,17 +403,23 @@ def _apply_conf(name, callback): else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) callback(conf) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() def config_broker(name, poller_inst: int = 1): + makedirs(ETC_ROOT, mode=0o777, exist_ok=True) + makedirs(VAR_ROOT, mode=0o777, exist_ok=True) + makedirs(ETC_ROOT + "/centreon-broker", mode=0o777, exist_ok=True) + makedirs(VAR_ROOT + "/log/centreon-broker/", mode=0o777, exist_ok=True) + makedirs(VAR_ROOT + "/lib/centreon-broker/", mode=0o777, exist_ok=True) + if name == 'central': broker_id = 1 broker_name = "central-broker-master" @@ -408,23 +433,23 @@ def config_broker(name, poller_inst: int = 1): broker_name = "central-module-master" filename = "central-module0.json" else: - if not exists("/var/lib/centreon/metrics/"): - makedirs("/var/lib/centreon/metrics/") - if not exists("/var/lib/centreon/status/"): - makedirs("/var/lib/centreon/status/") - if not exists("/var/lib/centreon/metrics/tmpl_15552000_300_0.rrd"): + if not exists(VAR_ROOT + "/lib/centreon/metrics/"): + makedirs(VAR_ROOT + "/lib/centreon/metrics/") + if not exists(VAR_ROOT + "/lib/centreon/status/"): + makedirs(VAR_ROOT + "/lib/centreon/status/") + if not exists(VAR_ROOT + "/lib/centreon/metrics/tmpl_15552000_300_0.rrd"): getoutput( - "rrdcreate /var/lib/centreon/metrics/tmpl_15552000_300_0.rrd DS:value:ABSOLUTE:3000:U:U RRA:AVERAGE:0.5:1:864000") + "rrdcreate " + VAR_ROOT + "/lib/centreon/metrics/tmpl_15552000_300_0.rrd DS:value:ABSOLUTE:3000:U:U RRA:AVERAGE:0.5:1:864000") broker_id = 2 broker_name = "central-rrd-master" filename = "central-rrd.json" if name == 'module': for i in range(poller_inst): - broker_name = "/etc/centreon-broker/central-module{}.json".format( + broker_name = ETC_ROOT + "/centreon-broker/central-module{}.json".format( i) buf = config[name].format( - broker_id, "central-module-master{}".format(i)) + broker_id, "central-module-master{}".format(i), "", "", "", "", "", VAR_ROOT) conf = json.loads(buf) conf["centreonBroker"]["poller_id"] = i + 1 @@ -432,8 +457,9 @@ def config_broker(name, poller_inst: int = 1): f.write(json.dumps(conf, indent=2)) f.close() else: - f = open("/etc/centreon-broker/{}".format(filename), "w") - f.write(config[name].format(broker_id, broker_name)) + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") + f.write(config[name].format(broker_id, broker_name, + DB_HOST, DB_PORT, DB_USER, DB_PASS, DB_NAME_STORAGE, VAR_ROOT)) f.close() @@ -455,13 +481,84 @@ def input_to_grpc(conf): _apply_conf(name, input_to_grpc) -def change_broker_compression_output(config_name: str, compression_value: str): +def add_broker_crypto(json_dict, add_cert: bool, only_ca_cert: bool): + json_dict["encryption"] = "yes" + if (add_cert): + json_dict["ca_certificate"] = "/tmp/ca_1234.crt" + if (only_ca_cert == False): + json_dict["public_cert"] = "/tmp/server_1234.crt" + json_dict["private_key"] = "/tmp/server_1234.key" + + +def add_broker_tcp_input_grpc_crypto(name: str, add_cert: bool, reversed: bool): + def crypto_modifier(conf): + input_dict = conf["centreonBroker"]["input"] + for i, v in enumerate(input_dict): + if v["type"] == "grpc": + add_broker_crypto(v, add_cert, reversed) + _apply_conf(name, crypto_modifier) + + +def add_broker_tcp_output_grpc_crypto(name: str, add_cert: bool, reversed: bool): + def crypto_modifier(conf): + input_dict = conf["centreonBroker"]["output"] + for i, v in enumerate(input_dict): + if v["type"] == "grpc": + add_broker_crypto(v, add_cert, not reversed) + _apply_conf(name, crypto_modifier) + + +def add_host_to_broker_output(name: str, output_name: str, host_ip: str): + def modifier(conf): + input_dict = conf["centreonBroker"]["output"] + for i, v in enumerate(input_dict): + if (v["name"] == output_name): + v["host"] = host_ip + _apply_conf(name, modifier) + + +def add_host_to_broker_input(name: str, input_name: str, host_ip: str): + def modifier(conf): + input_dict = conf["centreonBroker"]["input"] + for i, v in enumerate(input_dict): + if (v["name"] == input_name): + v["host"] = host_ip + _apply_conf(name, modifier) + + +def remove_host_from_broker_output(name: str, output_name: str): + def modifier(conf): + input_dict = conf["centreonBroker"]["output"] + for i, v in enumerate(input_dict): + if (v["name"] == output_name): + v.pop("host") + _apply_conf(name, modifier) + + +def remove_host_from_broker_input(name: str, input_name: str): + def modifier(conf): + input_dict = conf["centreonBroker"]["input"] + for i, v in enumerate(input_dict): + if (v["name"] == input_name): + v.pop("host") + _apply_conf(name, modifier) + + +def change_broker_compression_output(config_name: str, output_name: str, compression_value: str): def compression_modifier(conf): output_dict = conf["centreonBroker"]["output"] for i, v in enumerate(output_dict): - v["compression"] = compression_value + if (v["name"] == output_name): + v["compression"] = compression_value _apply_conf(config_name, compression_modifier) +def change_broker_compression_input(config_name: str, input_name: str, compression_value: str): + def compression_modifier(conf): + input_dict = conf["centreonBroker"]["input"] + for i, v in enumerate(input_dict): + if (v["name"] == input_name): + v["compression"] = compression_value + _apply_conf(config_name, compression_modifier) def config_broker_sql_output(name, output): if name == 'central': @@ -471,7 +568,7 @@ def config_broker_sql_output(name, output): else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -483,11 +580,11 @@ def config_broker_sql_output(name, output): output_dict.append({ "name": "central-broker-unified-sql", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, "interval": "60", "length": "15552000", "queries_per_transaction": "20000", @@ -506,11 +603,11 @@ def config_broker_sql_output(name, output): "db_type": "mysql", "retry_interval": "5", "buffering_timeout": "0", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, "queries_per_transaction": "1000", "connections_count": "3", "read_timeout": "1", @@ -523,11 +620,11 @@ def config_broker_sql_output(name, output): "buffering_timeout": "0", "length": "15552000", "db_type": "mysql", - "db_host": "localhost", - "db_port": "3306", - "db_user": "centreon", - "db_password": "centreon", - "db_name": "centreon_storage", + "db_host": DB_HOST, + "db_port": DB_PORT, + "db_user": DB_USER, + "db_password": DB_PASS, + "db_name": DB_NAME_STORAGE, "queries_per_transaction": "1000", "read_timeout": "1", "check_replication": "no", @@ -536,7 +633,7 @@ def config_broker_sql_output(name, output): "insert_in_index_data": "1", "type": "storage" }) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -549,7 +646,7 @@ def broker_config_clear_outputs_except(name, ex: list): else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -558,7 +655,7 @@ def broker_config_clear_outputs_except(name, ex: list): if v["type"] not in ex: output_dict.pop(i) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -571,12 +668,12 @@ def broker_config_add_item(name, key, value): elif name.startswith('module'): filename = "central-{}.json".format(name) - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) conf["centreonBroker"][key] = value - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -589,12 +686,12 @@ def broker_config_remove_item(name, key): elif name.startswith('module'): filename = "central-{}.json".format(name) - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) conf["centreonBroker"].pop(key) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -607,7 +704,7 @@ def broker_config_add_lua_output(name, output, luafile): else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -617,7 +714,7 @@ def broker_config_add_lua_output(name, output, luafile): "path": luafile, "type": "lua" }) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -629,14 +726,14 @@ def broker_config_output_set(name, output, key, value): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) output_dict = [elem for i, elem in enumerate( conf["centreonBroker"]["output"]) if elem["name"] == output][0] output_dict[key] = value - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -648,7 +745,7 @@ def broker_config_output_set_json(name, output, key, value): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -656,7 +753,7 @@ def broker_config_output_set_json(name, output, key, value): conf["centreonBroker"]["output"]) if elem["name"] == output][0] j = json.loads(value) output_dict[key] = j - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -668,7 +765,7 @@ def broker_config_output_remove(name, output, key): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -676,7 +773,7 @@ def broker_config_output_remove(name, output, key): conf["centreonBroker"]["output"]) if elem["name"] == output][0] if key in output_dict: output_dict.pop(key) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -688,14 +785,14 @@ def broker_config_input_set(name, inp, key, value): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) input_dict = [elem for i, elem in enumerate( conf["centreonBroker"]["input"]) if elem["name"] == inp][0] input_dict[key] = value - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -707,7 +804,7 @@ def broker_config_input_remove(name, inp, key): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -715,7 +812,7 @@ def broker_config_input_remove(name, inp, key): conf["centreonBroker"]["input"]) if elem["name"] == inp][0] if key in input_dict: input_dict.pop(key) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -727,13 +824,13 @@ def broker_config_log(name, key, value): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) loggers = conf["centreonBroker"]["log"]["loggers"] loggers[key] = value - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -745,13 +842,13 @@ def broker_config_flush_log(name, value): filename = "central-{}.json".format(name) else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) log = conf["centreonBroker"]["log"] log["flush_period"] = value - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() @@ -768,7 +865,7 @@ def check_broker_stats_exist(name, key1, key2, timeout=TIMEOUT): retry = True while retry: retry = False - f = open("/var/lib/centreon-broker/{}".format(filename), "r") + f = open(VAR_ROOT + "/lib/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() @@ -796,7 +893,7 @@ def get_broker_stats_size(name, key, timeout=TIMEOUT): retry = True while retry: retry = False - f = open("/var/lib/centreon-broker/{}".format(filename), "r") + f = open(VAR_ROOT + "/lib/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() try: @@ -825,10 +922,10 @@ def get_broker_stats_size(name, key, timeout=TIMEOUT): # def get_not_existing_indexes(count: int): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -863,14 +960,14 @@ def get_not_existing_indexes(count: int): # def get_indexes_to_delete(count: int): files = [os.path.basename(x) for x in glob.glob( - "/var/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -903,14 +1000,14 @@ def get_indexes_to_delete(count: int): # def get_not_existing_metrics(count: int): files = [os.path.basename(x) for x in glob.glob( - "/var/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -941,14 +1038,14 @@ def get_not_existing_metrics(count: int): # def get_metrics_to_delete(count: int): files = [os.path.basename(x) for x in glob.glob( - "/var/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -972,14 +1069,14 @@ def get_metrics_to_delete(count: int): def create_metrics(count: int): files = [os.path.basename(x) for x in glob.glob( - "/var/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -999,18 +1096,18 @@ def create_metrics(count: int): result = cursor.fetchall() ids_index = [r['id'] for r in result] if ids_index == []: - sql = "INSERT INTO index_data (host_id, service_id) VALUES ('1', '1')" + sql = "INSERT INTO index_data (host_id, service_id) VALUES (1, 1)" cursor.execute(sql) - ids_index = cursor.lastrowid + ids_index.append(cursor.lastrowid) for c in range(count): sql = "INSERT INTO metrics (index_id,metric_name,unit_name,warn,warn_low,warn_threshold_mode,crit,crit_low,crit_threshold_mode,min,max,current_value,data_source_type) VALUES ('{}','metric_{}','unit_{}','10','1','0','1','1','0','0','100','25','0')".format( ids_index[0], c, c) cursor.execute(sql) ids_metric = cursor.lastrowid - connection.commit() - shutil.copy("/var/lib/centreon/metrics/tmpl_15552000_300_0.rrd", - "/var/lib/centreon/metrics/{}.rrd".format(ids_metric)) + shutil.copy(VAR_ROOT + "/lib/centreon/metrics/tmpl_15552000_300_0.rrd", + VAR_ROOT + "/lib/centreon/metrics/{}.rrd".format(ids_metric)) logger.console("create metric file {}".format(ids_metric)) + connection.commit() def run_reverse_bam(duration, interval): @@ -1028,14 +1125,14 @@ def run_reverse_bam(duration, interval): # @return a list of indexes def get_indexes_to_rebuild(count: int): files = [os.path.basename(x) for x in glob.glob( - "/var/lib/centreon/metrics/[0-9]*.rrd")] + VAR_ROOT + "/lib/centreon/metrics/[0-9]*.rrd")] ids = [int(f.split(".")[0]) for f in files] # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) retval = [] @@ -1082,10 +1179,10 @@ def get_indexes_to_rebuild(count: int): # @return a list of metric ids. def get_metrics_matching_indexes(indexes): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: @@ -1117,10 +1214,42 @@ def remove_graphs(port, indexes, metrics, timeout=10): trm.metric_ids.extend(metrics) try: stub.RemoveGraphs(trm) + break except: logger.console("gRPC server not ready") +## +# @brief send a query to the db to remove graphs (by indexes or by metrics) +# +# @param indexes a list of indexes +# @param metrics a list of metrics +# +def remove_graphs_from_db(indexes, metrics, timeout=10): + logger.console("rem1") + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + ids_db = [] + with connection: + with connection.cursor() as cursor: + if len(indexes) > 0: + str_indexes = [str(i) for i in indexes] + sql = "UPDATE index_data SET to_delete=1 WHERE id in ({})".format(",".join(str_indexes)) + logger.console(sql) + cursor.execute(sql) + if len(metrics) > 0: + str_metrics = [str(i) for i in metrics] + sql = "UPDATE metrics SET to_delete=1 WHERE metric_id in ({})".format(",".join(str_metrics)) + logger.console(sql) + cursor.execute(sql) + connection.commit() + + ## # @brief Execute the gRPC command RebuildRRDGraphs() # @@ -1137,13 +1266,37 @@ def rebuild_rrd_graphs(port, indexes, timeout: int = TIMEOUT): stub = broker_pb2_grpc.BrokerStub(channel) k = 0.0 idx = broker_pb2.IndexIds() - idx.index_id.extend(indexes) + idx.index_ids.extend(indexes) try: stub.RebuildRRDGraphs(idx) + break except: logger.console("gRPC server not ready") +## +# @brief Send a query to the db to rebuild graphs +# +# @param indexes The list of indexes corresponding to metrics to rebuild. +# +def rebuild_rrd_graphs_from_db(indexes, timeout: int = TIMEOUT): + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + ids_db = [] + with connection: + with connection.cursor() as cursor: + if len(indexes) > 0: + sql = "UPDATE index_data SET must_be_rebuild=1 WHERE id in ({})".format(",".join(map(str, indexes))) + logger.console(sql) + cursor.execute(sql) + connection.commit() + + ## # @brief Compare the average value for an RRD metric on the last 30 days with # a value. @@ -1154,15 +1307,19 @@ def rebuild_rrd_graphs(port, indexes, timeout: int = TIMEOUT): # @return A boolean. def compare_rrd_average_value(metric, value: float): res = getoutput("rrdtool graph dummy --start=end-30d --end=now" - " DEF:x=/var/lib/centreon/metrics/{}.rrd:value:AVERAGE VDEF:xa=x,AVERAGE PRINT:xa:%lf" + " DEF:x=" + VAR_ROOT + + "/lib/centreon/metrics/{}.rrd:value:AVERAGE VDEF:xa=x,AVERAGE PRINT:xa:%lf" .format(metric)) lst = res.split('\n') if len(lst) >= 2: res = float(lst[1].replace(',', '.')) - return abs(res - float(value)) < 2 + err = abs(res - float(value)) / float(value) + logger.console( + f"expected value: {value} - result value: {res} - err: {err}") + return err < 0.01 else: logger.console( - "It was impossible to get the average value from the file /var/lib/centreon/metrics/{}.rrd from the last 30 days".format(metric)) + "It was impossible to get the average value from the file " + VAR_ROOT + "/lib/centreon/metrics/{}.rrd from the last 30 days".format(metric)) return True @@ -1229,7 +1386,7 @@ def add_bam_config_to_broker(name): else: filename = "central-rrd.json" - f = open("/etc/centreon-broker/{}".format(filename), "r") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "r") buf = f.read() f.close() conf = json.loads(buf) @@ -1238,15 +1395,15 @@ def add_bam_config_to_broker(name): "name": "centreon-bam-monitoring", "cache": "yes", "check_replication": "no", - "command_file": "/var/lib/centreon-engine/config0/rw/centengine.cmd", - "db_host": "127.0.0.1", - "db_name": "centreon", - "db_password": "centreon", - "db_port": "3306", + "command_file": VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", + "db_host": DB_HOST, + "db_name": DB_NAME_CONF, + "db_password": DB_PASS, + "db_port": DB_PORT, "db_type": "mysql", - "db_user": "centreon", + "db_user": DB_USER, "queries_per_transaction": "0", - "storage_db_name": "centreon_storage", + "storage_db_name": DB_NAME_STORAGE, "type": "bam" }) output_dict.append({ @@ -1257,15 +1414,15 @@ def add_bam_config_to_broker(name): ] }, "check_replication": "no", - "db_host": "127.0.0.1", - "db_name": "centreon_storage", - "db_password": "centreon", - "db_port": "3306", + "db_host": DB_HOST, + "db_name": DB_NAME_STORAGE, + "db_password": DB_PASS, + "db_port": DB_PORT, "db_type": "mysql", - "db_user": "centreon", + "db_user": DB_USER, "queries_per_transaction": "0", "type": "bam_bi" }) - f = open("/etc/centreon-broker/{}".format(filename), "w") + f = open(ETC_ROOT + "/centreon-broker/{}".format(filename), "w") f.write(json.dumps(conf, indent=2)) f.close() diff --git a/tests/resources/Common.py b/tests/resources/Common.py index e2324fceb2d..5b7f070f95b 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -6,21 +6,35 @@ from dateutil import parser from datetime import datetime import pymysql.cursors +from robot.libraries.BuiltIn import BuiltIn + TIMEOUT = 30 +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") +VAR_ROOT = BuiltIn().get_variable_value("${VarRoot}") + def check_connection(port: int, pid1: int, pid2: int): limit = time.time() + TIMEOUT - r = re.compile(r"^ESTAB.*127\.0\.0\.1\]*:{}\s".format(port)) + r = re.compile( + r"^ESTAB.*127\.0\.0\.1\]*:{}\s|^ESTAB.*\[::1\]*:{}\s".format(port, port)) + p = re.compile( + r"127\.0\.0\.1\]*:(\d+)\s+.*127\.0\.0\.1\]*:(\d+)\s+.*,pid=(\d+)") + p_v6 = re.compile( + r"::1\]*:(\d+)\s+.*::1\]*:(\d+)\s+.*,pid=(\d+)") while time.time() < limit: out = getoutput("ss -plant") lst = out.split('\n') estab_port = list(filter(r.match, lst)) if len(estab_port) >= 2: ok = [False, False] - p = re.compile( - r"127\.0\.0\.1\]*:(\d+)\s+.*127\.0\.0\.1\]*:(\d+)\s+.*,pid=(\d+)") for l in estab_port: m = p.search(l) if m is not None: @@ -28,10 +42,15 @@ def check_connection(port: int, pid1: int, pid2: int): ok[0] = True if pid2 == int(m.group(3)): ok[1] = True + m = p_v6.search(l) + if m is not None: + if pid1 == int(m.group(3)): + ok[0] = True + if pid2 == int(m.group(3)): + ok[1] = True if ok[0] and ok[1]: return True time.sleep(1) - return False @@ -158,17 +177,16 @@ def kill_engine(): def clear_retention(): - getoutput("find /var -name '*.cache.*' -delete") + getoutput("find " + VAR_ROOT + " -name '*.cache.*' -delete") getoutput("find /tmp -name 'lua*' -delete") - getoutput("find /tmp -name 'central-*' -delete") - getoutput("find /var -name '*.memory.*' -delete") - getoutput("find /var -name '*.queue.*' -delete") - getoutput("find /var -name '*.unprocessed*' -delete") - getoutput("find /var -name 'retention.dat' -delete") + getoutput("find " + VAR_ROOT + " -name '*.memory.*' -delete") + getoutput("find " + VAR_ROOT + " -name '*.queue.*' -delete") + getoutput("find " + VAR_ROOT + " -name '*.unprocessed*' -delete") + getoutput("find " + VAR_ROOT + " -name 'retention.dat' -delete") def clear_cache(): - getoutput("find /var -name '*.cache.*' -delete") + getoutput("find " + VAR_ROOT + " -name '*.cache.*' -delete") def engine_log_table_duplicate(result: list): @@ -187,7 +205,8 @@ def check_engine_logs_are_duplicated(log: str, date): idx = find_line_from(lines, date) count_true = 0 count_false = 0 - logs = [] + logs_old = [] + logs_new = [] old_log = re.compile(r"\[[^\]]*\] \[[^\]]*\] ([^\[].*)") new_log = re.compile( r"\[[^\]]*\] \[[^\]]*\] \[[^\]]*\] \[[^\]]*\] (.*)") @@ -195,25 +214,27 @@ def check_engine_logs_are_duplicated(log: str, date): mo = old_log.match(l) mn = new_log.match(l) if mo is not None: - if mo.group(1) in logs: - logs.remove(mo.group(1)) + if mo.group(1) in logs_new: + logs_new.remove(mo.group(1)) else: - logs.append(mo.group(1)) + logs_old.append(mo.group(1)) else: mn = new_log.match(l) if mn is not None: - if mn.group(1) in logs: - logs.remove(mn.group(1)) + if mn.group(1) in logs_old: + logs_old.remove(mn.group(1)) else: - logs.append(mn.group(1)) - if len(logs) <= 1: + logs_new.append(mn.group(1)) + if len(logs_old) <= 1: # It is possible to miss one log because of the initial split of the # file. return True else: - logger.console("Logs not duplicated") - for l in logs: + logger.console( + "{} old logs are not duplicated".format(len(logs_old))) + for l in logs_old: logger.console(l) + # We don't care about new logs not duplicated, in a future, we won't have any old logs except IOError: logger.console("The file '{}' does not exist".format(log)) return False @@ -316,11 +337,11 @@ def set_command_status(cmd, status): def check_service_status_with_timeout(hostname: str, service_desc: str, status: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, autocommit=True, - database='centreon_storage', + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -329,7 +350,7 @@ def check_service_status_with_timeout(hostname: str, service_desc: str, status: cursor.execute("SELECT s.state FROM services s LEFT JOIN hosts h ON s.host_id=h.host_id WHERE s.description=\"{}\" AND h.name=\"{}\"".format( service_desc, hostname)) result = cursor.fetchall() - if result[0]['state'] and int(result[0]['state']) == status: + if len(result) > 0 and result[0]['state'] is not None and int(result[0]['state']) == int(status): return True time.sleep(5) return False @@ -338,10 +359,10 @@ def check_service_status_with_timeout(hostname: str, service_desc: str, status: def check_severity_with_timeout(name: str, level, icon_id, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -360,10 +381,10 @@ def check_severity_with_timeout(name: str, level, icon_id, timeout: int): def check_tag_with_timeout(name: str, typ, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -382,10 +403,10 @@ def check_tag_with_timeout(name: str, typ, timeout: int): def check_severities_count(value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -403,10 +424,10 @@ def check_severities_count(value: int, timeout: int): def check_tags_count(value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -424,10 +445,10 @@ def check_tags_count(value: int, timeout: int): def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: @@ -435,7 +456,7 @@ def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): cursor.execute( "SELECT current_status FROM mod_bam WHERE name='{}'".format(ba_name)) result = cursor.fetchall() - if result[0]['current_status'] and int(result[0]['current_status']) == status: + if result[0]['current_status'] is not None and int(result[0]['current_status']) == status: return True time.sleep(5) return False @@ -444,10 +465,10 @@ def check_ba_status_with_timeout(ba_name: str, status: int, timeout: int): def check_service_downtime_with_timeout(hostname: str, service_desc: str, enabled, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -464,30 +485,30 @@ def check_service_downtime_with_timeout(hostname: str, service_desc: str, enable def delete_service_downtime(hst: str, svc: str): now = int(time.time()) - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: with connection.cursor() as cursor: - cursor.execute("select d.internal_id from downtimes d inner join hosts h on d.host_id=h.host_id inner join services s on d.service_id=s.service_id where d.cancelled='0' and s.scheduled_downtime_depth='1' and s.description='{}' and h.name='{}'".format(svc, hst)) + cursor.execute("select d.internal_id from downtimes d inner join hosts h on d.host_id=h.host_id inner join services s on d.service_id=s.service_id where d.deletion_time is null and s.scheduled_downtime_depth<>'0' and s.description='{}' and h.name='{}' LIMIT 1".format(svc, hst)) result = cursor.fetchall() did = int(result[0]['internal_id']) cmd = "[{}] DEL_SVC_DOWNTIME;{}".format(now, did) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() def number_of_downtimes_is(nb: int): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -500,10 +521,10 @@ def number_of_downtimes_is(nb: int): def clear_db(table: str): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -516,10 +537,10 @@ def clear_db(table: str): def check_service_severity_with_timeout(host_id: int, service_id: int, severity_id, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -538,13 +559,13 @@ def check_service_severity_with_timeout(host_id: int, service_id: int, severity_ return False -def check_host_severity_with_timeout(host_id: int, severity_id, timeout: int): +def check_host_severity_with_timeout(host_id: int, severity_id, timeout: int = TIMEOUT): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', autocommit=True, cursorclass=pymysql.cursors.DictCursor) @@ -575,10 +596,10 @@ def check_resources_tags_with_timeout(parent_id: int, mid: int, typ: str, tag_id t = 3 limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -621,10 +642,10 @@ def check_resources_tags_with_timeout(parent_id: int, mid: int, typ: str, tag_id def check_host_tags_with_timeout(host_id: int, tag_id: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -643,10 +664,10 @@ def check_host_tags_with_timeout(host_id: int, tag_id: int, timeout: int): def check_number_of_resources_monitored_by_poller_is(poller: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -666,10 +687,10 @@ def check_number_of_downtimes(expected: int, start, timeout: int): limit = time.time() + timeout d = parser.parse(start).timestamp() while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) with connection: @@ -689,10 +710,10 @@ def check_number_of_downtimes(expected: int, start, timeout: int): def check_number_of_relations_between_hostgroup_and_hosts(hostgroup: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -711,10 +732,10 @@ def check_number_of_relations_between_hostgroup_and_hosts(hostgroup: int, value: def check_number_of_relations_between_servicegroup_and_services(servicegroup: int, value: int, timeout: int): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -733,10 +754,10 @@ def check_number_of_relations_between_servicegroup_and_services(servicegroup: in def check_host_status(host: str, value: int, t: int, in_resources: bool, timeout: int = TIMEOUT): limit = time.time() + timeout while time.time() < limit: - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon_storage', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -763,3 +784,32 @@ def check_host_status(host: str, value: int, t: int, in_resources: bool, timeout host, result[0][key], result[0][confirmed])) time.sleep(1) return False + + +def find_internal_id(date, exists=True, timeout: int = TIMEOUT): + my_date = datetime.timestamp(parser.parse(date)) + limit = time.time() + timeout + while time.time() < limit: + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + autocommit=True, + cursorclass=pymysql.cursors.DictCursor) + + with connection: + with connection.cursor() as cursor: + logger.console( + "select internal_id from comments where entry_time >= {} and deletion_time is null".format(my_date)) + cursor.execute( + "select internal_id from comments where entry_time >= {} and deletion_time is null".format(my_date)) + result = cursor.fetchall() + if len(result) > 0 and exists: + return result[0]['internal_id'] + elif len(result) == 0: + logger.console("Query to find the internal_id failed") + if not exists: + return True + time.sleep(1) + return False diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index efd9be7c223..1d1fc620783 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -1,6 +1,9 @@ +from array import array from os import makedirs, chmod from os.path import exists, dirname +from xml.etree.ElementTree import Comment from robot.api import logger +from robot.libraries.BuiltIn import BuiltIn import db_conf import random import shutil @@ -8,15 +11,18 @@ import time import re import stat +import string import grpc import engine_pb2 import engine_pb2_grpc -CONF_DIR = "/etc/centreon-engine" -ENGINE_HOME = "/var/lib/centreon-engine" SCRIPT_DIR: str = dirname(__file__) + "/engine-scripts/" +VAR_ROOT = BuiltIn().get_variable_value("${VarRoot}") +ETC_ROOT = BuiltIn().get_variable_value("${EtcRoot}") +CONF_DIR = ETC_ROOT + "/centreon-engine" +ENGINE_HOME = VAR_ROOT + "/lib/centreon-engine" class EngineInstance: @@ -30,6 +36,13 @@ def __init__(self, count: int, hosts: int = 50, srv_by_host: int = 20): self.instances = count self.service_cmd = {} self.build_configs(hosts, srv_by_host) + makedirs(ETC_ROOT, mode=0o777, exist_ok=True) + makedirs(VAR_ROOT, mode=0o777, exist_ok=True) + makedirs(CONF_DIR, mode=0o777, exist_ok=True) + makedirs(ENGINE_HOME, mode=0o777, exist_ok=True) + makedirs(ETC_ROOT + "/centreon-broker", mode=0o777, exist_ok=True) + makedirs(VAR_ROOT + "/log/centreon-engine/", mode=0o777, exist_ok=True) + makedirs(VAR_ROOT + "/log/centreon-broker/", mode=0o777, exist_ok=True) def create_centengine(self, id: int, debug_level=0): return ("cfg_file={2}/config{0}/hosts.cfg\n" @@ -47,15 +60,15 @@ def create_centengine(self, id: int, debug_level=0): "#cfg_file={2}/config{0}/meta_host.cfg\n" "#cfg_file={2}/config{0}/meta_services.cfg\n" "broker_module=/usr/lib64/centreon-engine/externalcmd.so\n" - "broker_module=/usr/lib64/nagios/cbmod.so /etc/centreon-broker/central-module{0}.json\n" + "broker_module=/usr/lib64/nagios/cbmod.so {4}/centreon-broker/central-module{0}.json\n" "interval_length=60\n" "use_timezone=:Europe/Paris\n" "resource_file={2}/config{0}/resource.cfg\n" - "log_file=/var/log/centreon-engine/config{0}/centengine.log\n" - "status_file=/var/log/centreon-engine/config{0}/status.dat\n" + "log_file={3}/log/centreon-engine/config{0}/centengine.log\n" + "status_file={3}/log/centreon-engine/config{0}/status.dat\n" "command_check_interval=1s\n" - "command_file=/var/lib/centreon-engine/config{0}/rw/centengine.cmd\n" - "state_retention_file=/var/log/centreon-engine/config{0}/retention.dat\n" + "command_file={3}/lib/centreon-engine/config{0}/rw/centengine.cmd\n" + "state_retention_file={3}/log/centreon-engine/config{0}/retention.dat\n" "retention_update_interval=60\n" "sleep_time=0.2\n" "service_inter_check_delay_method=s\n" @@ -81,7 +94,7 @@ def create_centengine(self, id: int, debug_level=0): "admin_pager=admin\n" "event_broker_options=-1\n" "cached_host_check_horizon=60\n" - "debug_file=/var/log/centreon-engine/config{0}/centengine.debug\n" + "debug_file={3}/log/centreon-engine/config{0}/centengine.debug\n" "debug_level={1}\n" "debug_verbosity=2\n" "log_pid=1\n" @@ -119,13 +132,14 @@ def create_centengine(self, id: int, debug_level=0): "log_level_macros=info\n" "log_level_process=info\n" "log_level_runtime=info\n" + "log_flush_period=0\n" "soft_state_dependencies=0\n" "obsess_over_services=0\n" "process_performance_data=0\n" "check_for_orphaned_services=0\n" "check_for_orphaned_hosts=0\n" "check_service_freshness=1\n" - "enable_flap_detection=0\n").format(id, debug_level, CONF_DIR) + "enable_flap_detection=0\n").format(id, debug_level, CONF_DIR, VAR_ROOT, ETC_ROOT) def create_host(self): self.last_host_id += 1 @@ -171,6 +185,21 @@ def create_service(self, host_id: int, cmd_ids: int): host_id, service_id, self.service_cmd[service_id]) return retval + def create_anomaly_detection(self, host_id: int, dependent_service_id: int, metric_name: string): + self.last_service_id += 1 + service_id = self.last_service_id + retval = """define anomalydetection {{ + host_id {0} + host_name host_{0} + service_id {1} + service_description anomaly_{1} + dependent_service_id {2} + metric_name {3} + status_change 1 + thresholds_file /tmp/anomaly_threshold.json +}} """.format(host_id, service_id, dependent_service_id, metric_name) + return retval + def create_bam_timeperiod(self): retval = """define timeperiod { timeperiod_name centreon-bam-timeperiod @@ -473,13 +502,24 @@ def centengine_conf_add_bam(self): f = open(config_dir + "/centengine.cfg", "r") lines = f.readlines() f.close - lines_to_prep = ["cfg_file=/etc/centreon-engine/config0/centreon-bam-command.cfg\n", "cfg_file=/etc/centreon-engine/config0/centreon-bam-timeperiod.cfg\n", - "cfg_file=/etc/centreon-engine/config0/centreon-bam-host.cfg\n", "cfg_file=/etc/centreon-engine/config0/centreon-bam-services.cfg\n"] + lines_to_prep = ["cfg_file=" + ETC_ROOT + "/centreon-engine/config0/centreon-bam-command.cfg\n", "cfg_file=" + ETC_ROOT + "/centreon-engine/config0/centreon-bam-timeperiod.cfg\n", + "cfg_file=" + ETC_ROOT + "/centreon-engine/config0/centreon-bam-host.cfg\n", "cfg_file=" + ETC_ROOT + "/centreon-engine/config0/centreon-bam-services.cfg\n"] f = open(config_dir + "/centengine.cfg", "w") f.writelines(lines_to_prep) f.writelines(lines) f.close() + def centengine_conf_add_anomaly(self): + config_dir = "{}/config0".format(CONF_DIR) + f = open(config_dir + "/centengine.cfg", "r") + lines = f.readlines() + f.close + f = open(config_dir + "/centengine.cfg", "w") + f.writelines("cfg_file=" + config_dir + + "/anomaly_detection.cfg\n") + f.writelines(lines) + f.close() + ## # @brief Configure all the necessary files for num instances of centengine. @@ -506,7 +546,8 @@ def get_engines_count(): # @param value the new value to set to the key variable. # def engine_config_set_value(idx: int, key: str, value: str, force: bool = False): - filename = "/etc/centreon-engine/config{}/centengine.cfg".format(idx) + filename = ETC_ROOT + \ + "/centreon-engine/config{}/centengine.cfg".format(idx) f = open(filename, "r") lines = f.readlines() f.close() @@ -532,7 +573,7 @@ def engine_config_set_value(idx: int, key: str, value: str, force: bool = False) # @param value the new value to set to the key variable. # def engine_config_set_value_in_services(idx: int, desc: str, key: str, value: str): - filename = "/etc/centreon-engine/config{}/services.cfg".format(idx) + filename = ETC_ROOT + "/centreon-engine/config{}/services.cfg".format(idx) f = open(filename, "r") lines = f.readlines() f.close() @@ -556,7 +597,7 @@ def engine_config_set_value_in_services(idx: int, desc: str, key: str, value: st # @param value the new value to set to the key variable. # def engine_config_set_value_in_hosts(idx: int, desc: str, key: str, value: str): - filename = "/etc/centreon-engine/config{}/hosts.cfg".format(idx) + filename = ETC_ROOT + "/centreon-engine/config{}/hosts.cfg".format(idx) f = open(filename, "r") lines = f.readlines() f.close() @@ -573,7 +614,7 @@ def engine_config_set_value_in_hosts(idx: int, desc: str, key: str, value: str): def add_host_group(index: int, id_host_group: int, members: list): mbs = [l for l in members if l in engine.hosts] - f = open("/etc/centreon-engine/config{}/hostgroups.cfg".format(index), "a+") + f = open(ETC_ROOT + "/centreon-engine/config{}/hostgroups.cfg".format(index), "a+") logger.console(mbs) f.write(engine.create_host_group(id_host_group, mbs)) f.close() @@ -581,7 +622,7 @@ def add_host_group(index: int, id_host_group: int, members: list): def rename_host_group(index: int, id_host_group: int, name: str, members: list): mbs = [l for l in members if l in engine.hosts] - f = open("/etc/centreon-engine/config{}/hostgroups.cfg".format(index), "w") + f = open(ETC_ROOT + "/centreon-engine/config{}/hostgroups.cfg".format(index), "w") logger.console(mbs) f.write("""define hostgroup {{ hostgroup_id {0} @@ -594,14 +635,15 @@ def rename_host_group(index: int, id_host_group: int, name: str, members: list): def add_service_group(index: int, id_service_group: int, members: list): - f = open("/etc/centreon-engine/config{}/servicegroups.cfg".format(index), "a+") + f = open( + ETC_ROOT + "/centreon-engine/config{}/servicegroups.cfg".format(index), "a+") logger.console(members) f.write(engine.create_service_group(id_service_group, members)) f.close() def create_service(index: int, host_id: int, cmd_id: int): - f = open("/etc/centreon-engine/config{}/services.cfg".format(index), "a+") + f = open(ETC_ROOT + "/centreon-engine/config{}/services.cfg".format(index), "a+") svc = engine.create_service(host_id, [1, cmd_id]) lst = svc.split('\n') good = [l for l in lst if "_SERVICE_ID" in l][0] @@ -617,6 +659,26 @@ def create_service(index: int, host_id: int, cmd_id: int): return retval +def create_anomaly_detection(index: int, host_id: int, dependent_service_id: int, metric_name: string): + f = open( + ETC_ROOT + "/centreon-engine/config{}/anomaly_detection.cfg".format(index), "a+") + to_append = engine.create_anomaly_detection( + host_id, dependent_service_id, metric_name) + lst = to_append.split('\n') + good = [l for l in lst if "service_id" in l][0] + m = re.search(r"service_id\s+([^\s]*)$", good) + if m is not None: + retval = int(m.group(1)) + else: + raise Exception( + "Impossible to get the service id from '{}'".format(good)) + m = 0 + f.write(to_append) + f.close() + engine.centengine_conf_add_anomaly() + return retval + + def engine_log_duplicate(result: list): dup = True for i in result: @@ -636,9 +698,9 @@ def add_bam_config_to_engine(): dbconf.init_bam() -def create_ba_with_services(name: str, typ: str, svc: list): +def create_ba_with_services(name: str, typ: str, svc: list, dt_policy="inherit"): global dbconf - dbconf.create_ba_with_services(name, typ, svc) + dbconf.create_ba_with_services(name, typ, svc, dt_policy) def get_command_id(service: int): @@ -652,7 +714,7 @@ def process_service_check_result(hst: str, svc: str, state: int, output: str): now = int(time.time()) cmd = "[{}] PROCESS_SERVICE_CHECK_RESULT;{};{};{};{}\n".format( now, hst, svc, state, output) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -667,7 +729,7 @@ def change_normal_svc_check_interval(use_grpc: int, hst: str, svc: str, check_in now = int(time.time()) cmd = "[{}] CHANGE_NORMAL_SVC_CHECK_INTERVAL;{};{};{}\n".format( now, hst, svc, check_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -682,7 +744,7 @@ def change_normal_host_check_interval(use_grpc: int, hst: str, check_interval: i now = int(time.time()) cmd = "[{}] CHANGE_NORMAL_HOST_CHECK_INTERVAL;{};{}\n".format( now, hst, check_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -697,7 +759,7 @@ def change_retry_svc_check_interval(use_grpc: int, hst: str, svc: str, retry_int now = int(time.time()) cmd = "[{}] CHANGE_RETRY_SVC_CHECK_INTERVAL;{};{};{}\n".format( now, hst, svc, retry_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -712,7 +774,7 @@ def change_retry_host_check_interval(use_grpc: int, hst: str, retry_interval: in now = int(time.time()) cmd = "[{}] CHANGE_RETRY_HOST_CHECK_INTERVAL;{};{}\n".format( now, hst, retry_interval) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -727,7 +789,7 @@ def change_max_svc_check_attempts(use_grpc: int, hst: str, svc: str, max_check_a now = int(time.time()) cmd = "[{}] CHANGE_MAX_SVC_CHECK_ATTEMPTS;{};{};{}\n".format( now, hst, svc, max_check_attempts) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -742,20 +804,11 @@ def change_max_host_check_attempts(use_grpc: int, hst: str, max_check_attempts: now = int(time.time()) cmd = "[{}] CHANGE_MAX_HOST_CHECK_ATTEMPTS;{};{}\n".format( now, hst, max_check_attempts) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() -def change_host_check_command(hst: str, Check_Command: str): - now = int(time.time()) - cmd = "[{}] CHANGE_HOST_CHECK_COMMAND;{};{}\n".format( - now, hst, Check_Command) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") - f.write(cmd) - f.close() - - def change_host_check_timeperiod(use_grpc: int, hst: str, check_timeperiod: str): if use_grpc > 0: with grpc.insecure_channel("127.0.0.1:50001") as channel: @@ -766,7 +819,7 @@ def change_host_check_timeperiod(use_grpc: int, hst: str, check_timeperiod: str) now = int(time.time()) cmd = "[{}] CHANGE_HOST_CHECK_TIMEPERIOD;{};{}\n".format( now, hst, check_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -781,7 +834,7 @@ def change_host_notification_timeperiod(use_grpc: int, hst: str, notification_ti now = int(time.time()) cmd = "[{}] CHANGE_HOST_NOTIFICATION_TIMEPERIOD;{};{}\n".format( now, hst, notification_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -796,7 +849,7 @@ def change_svc_check_timeperiod(use_grpc: int, hst: str, svc: str, check_timeper now = int(time.time()) cmd = "[{}] CHANGE_SVC_CHECK_TIMEPERIOD;{};{};{}\n".format( now, hst, svc, check_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -811,7 +864,7 @@ def change_svc_notification_timeperiod(use_grpc: int, hst: str, svc: str, notifi now = int(time.time()) cmd = "[{}] CHANGE_SVC_NOTIFICATION_TIMEPERIOD;{};{};{}\n".format( now, hst, svc, notification_timeperiod) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -826,7 +879,7 @@ def disable_host_and_child_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_AND_CHILD_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -841,7 +894,7 @@ def enable_host_and_child_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_AND_CHILD_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -851,7 +904,7 @@ def disable_host_check(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_CHECK;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -861,7 +914,7 @@ def enable_host_check(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_CHECK;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -871,7 +924,7 @@ def disable_host_event_handler(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_EVENT_HANDLER;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -881,7 +934,7 @@ def enable_host_event_handler(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_EVENT_HANDLER;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -891,7 +944,7 @@ def disable_host_flap_detection(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_FLAP_DETECTION;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -901,7 +954,7 @@ def enable_host_flap_detection(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_FLAP_DETECTION;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -916,7 +969,7 @@ def disable_host_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -931,7 +984,7 @@ def enable_host_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -941,7 +994,7 @@ def disable_host_svc_checks(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_SVC_CHECKS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -951,7 +1004,7 @@ def enable_host_svc_checks(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_SVC_CHECKS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -961,7 +1014,7 @@ def disable_host_svc_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_HOST_SVC_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -971,7 +1024,7 @@ def enable_host_svc_notifications(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_HOST_SVC_NOTIFICATIONS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -981,7 +1034,7 @@ def disable_passive_host_checks(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] DISABLE_PASSIVE_HOST_CHECKS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -991,7 +1044,7 @@ def enable_passive_host_checks(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] ENABLE_PASSIVE_HOST_CHECKS;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1001,7 +1054,7 @@ def disable_passive_svc_checks(use_grpc: int, hst: str, svc: str): now = int(time.time()) cmd = "[{}] DISABLE_PASSIVE_SVC_CHECKS;{};{}\n".format( now, hst, svc) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1011,7 +1064,7 @@ def enable_passive_svc_checks(use_grpc: int, hst: str, svc: str): now = int(time.time()) cmd = "[{}] ENABLE_PASSIVE_SVC_CHECKS;{};{}\n".format( now, hst, svc) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1021,7 +1074,7 @@ def start_obsessing_over_host(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] START_OBSESSING_OVER_HOST;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1031,7 +1084,7 @@ def stop_obsessing_over_host(use_grpc: int, hst: str): now = int(time.time()) cmd = "[{}] STOP_OBSESSING_OVER_HOST;{}\n".format( now, hst) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1041,7 +1094,7 @@ def start_obsessing_over_svc(use_grpc: int, hst: str, svc: str): now = int(time.time()) cmd = "[{}] START_OBSESSING_OVER_SVC;{};{}\n".format( now, hst, svc) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1051,7 +1104,7 @@ def stop_obsessing_over_svc(use_grpc: int, hst: str, svc: str): now = int(time.time()) cmd = "[{}] STOP_OBSESSING_OVER_SVC;{};{}\n".format( now, hst, svc) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1060,7 +1113,7 @@ def service_ext_commands(hst: str, svc: str, state: int, output: str): now = int(time.time()) cmd = "[{}] PROCESS_SERVICE_CHECK_RESULT;{};{};{};{}\n".format( now, hst, svc, state, output) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1069,7 +1122,7 @@ def process_host_check_result(hst: str, state: int, output: str): now = int(time.time()) cmd = "[{}] PROCESS_HOST_CHECK_RESULT;{};{};{}\n".format( now, hst, state, output) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() @@ -1078,30 +1131,34 @@ def schedule_service_downtime(hst: str, svc: str, duration: int): now = int(time.time()) cmd = "[{2}] SCHEDULE_SVC_DOWNTIME;{0};{1};{2};{3};1;0;{4};admin;Downtime set by admin".format( hst, svc, now, now + duration, duration) - f = open("/var/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") f.write(cmd) f.close() + def schedule_host_downtime(poller: int, hst: str, duration: int): now = int(time.time()) cmd1 = "[{1}] SCHEDULE_HOST_DOWNTIME;{0};{1};{2};1;0;{3};admin;Downtime set by admin\n".format( hst, now, now + duration, duration) cmd2 = "[{1}] SCHEDULE_HOST_SVC_DOWNTIME;{0};{1};{2};1;0;{3};admin;Downtime set by admin\n".format( hst, now, now + duration, duration) - f = open("/var/lib/centreon-engine/config{}/rw/centengine.cmd".format(poller), "w") + f = open( + VAR_ROOT + "/lib/centreon-engine/config{}/rw/centengine.cmd".format(poller), "w") f.write(cmd1) f.write(cmd2) f.close() + def delete_host_downtimes(poller: int, hst: str): now = int(time.time()) cmd = "[{}] DEL_HOST_DOWNTIME_FULL;{};;;;;;;;\n".format(now, hst) - f = open("/var/lib/centreon-engine/config{}/rw/centengine.cmd".format(poller), "w") + f = open( + VAR_ROOT + "/lib/centreon-engine/config{}/rw/centengine.cmd".format(poller), "w") f.write(cmd) f.close() -def schedule_forced_svc_check(host: str, svc: str, pipe: str = "/var/lib/centreon-engine/rw/centengine.cmd"): +def schedule_forced_svc_check(host: str, svc: str, pipe: str = VAR_ROOT + "/lib/centreon-engine/rw/centengine.cmd"): now = int(time.time()) f = open(pipe, "w") cmd = "[{2}] SCHEDULE_FORCED_SVC_CHECK;{0};{1};{2}".format(host, svc, now) @@ -1110,7 +1167,7 @@ def schedule_forced_svc_check(host: str, svc: str, pipe: str = "/var/lib/centreo time.sleep(0.05) -def schedule_forced_host_check(host: str, pipe: str = "/var/lib/centreon-engine/rw/centengine.cmd"): +def schedule_forced_host_check(host: str, pipe: str = VAR_ROOT + "/lib/centreon-engine/rw/centengine.cmd"): now = int(time.time()) f = open(pipe, "w") cmd = "[{1}] SCHEDULE_FORCED_HOST_CHECK;{0};{1}".format(host, now) @@ -1343,8 +1400,66 @@ def config_engine_remove_cfg_file(poller: int, fic: str): lines = ff.readlines() ff.close() r = re.compile( - r"^\s*cfg_file=/etc/centreon-engine/config{}/{}".format(poller, fic)) + r"^\s*cfg_file=" + ETC_ROOT + "/centreon-engine/config{}/{}".format(poller, fic)) linesearch = [l for l in lines if not r.match(l)] ff = open("{}/config{}/centengine.cfg".format(CONF_DIR, poller), "w") ff.writelines(linesearch) ff.close() + + +def send_custom_host_notification(hst, notification_option, author, comment): + now = int(time.time()) + cmd = f"[{now}] SEND_CUSTOM_HOST_NOTIFICATION;{hst};{notification_option};{author};{comment}\n" + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def add_svc_comment(host_name, svc_description, persistent, user_name, comment): + now = int(time.time()) + cmd = f"[{now}] ADD_SVC_COMMENT;{host_name};{svc_description};{persistent};{user_name};{comment}\n" + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def add_host_comment(host_name, persistent, user_name, comment): + now = int(time.time()) + cmd = f"[{now}] ADD_HOST_COMMENT;{host_name};{persistent};{user_name};{comment}\n" + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + + +def del_host_comment(comment_id): + now = int(time.time()) + cmd = f"[{now}] DEL_HOST_COMMENT;{comment_id}\n" + f = open(VAR_ROOT + "/lib/centreon-engine/config0/rw/centengine.cmd", "w") + f.write(cmd) + f.close() + +def create_anomaly_threshold_file(path: string, host_id: int, service_id: int, metric_name: string, values: array): + f = open(path, "w") + f.write("""[ + {{ + "host_id": "{0}", + "service_id": "{1}", + "metric_name": "{2}", + "predict": [ + """.format(host_id, service_id, metric_name)) + sep = "" + for ts_lower_upper in values: + f.write(sep) + sep = "," + f.write(""" + {{ + "timestamp": {0}, + "lower": {1}, + "upper": {2} + }}""".format(ts_lower_upper[0], ts_lower_upper[1], ts_lower_upper[2])) + f.write(""" + ] + } +] +""") + f.close() diff --git a/tests/resources/db_conf.py b/tests/resources/db_conf.py index 742764d19ed..3bb91555252 100755 --- a/tests/resources/db_conf.py +++ b/tests/resources/db_conf.py @@ -1,10 +1,22 @@ #!/usr/bin/python3 from robot.api import logger -import sys import pymysql.cursors +from robot.libraries.BuiltIn import BuiltIn + + +BuiltIn().import_resource('db_variables.robot') +DB_NAME_STORAGE = BuiltIn().get_variable_value("${DBName}") +DB_NAME_CONF = BuiltIn().get_variable_value("${DBNameConf}") +DB_USER = BuiltIn().get_variable_value("${DBUser}") +DB_PASS = BuiltIn().get_variable_value("${DBPass}") +DB_HOST = BuiltIn().get_variable_value("${DBHost}") +DB_PORT = BuiltIn().get_variable_value("${DBPort}") +VAR_ROOT = BuiltIn().get_variable_value("${VarRoot}") +ETC_ROOT = BuiltIn().get_variable_value("${EtcRoot}") + +CONF_DIR = ETC_ROOT + "/centreon-engine" +ENGINE_HOME = VAR_ROOT + "/lib/centreon-engine" -CONF_DIR = "/etc/centreon-engine" -ENGINE_HOME = "/var/lib/centreon-engine" class DbConf: def __init__(self, engine): @@ -26,10 +38,10 @@ def __init__(self, engine): def clear_db(self): # Connect to the database - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -56,10 +68,10 @@ def clear_db(self): connection.commit() def init_bam(self): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -73,10 +85,10 @@ def init_bam(self): self.engine.centengine_conf_add_bam() def create_conf_db(self): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -160,11 +172,11 @@ def create_conf_db(self): hid += 1 connection.commit() - def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)]): - connection = pymysql.connect(host='localhost', - user='centreon', - password='centreon', - database='centreon', + def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)], dt_policy): + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_CONF, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) @@ -174,7 +186,14 @@ def create_ba_with_services(self, name:str, typ:str, svc:[(str,str)]): elif typ == 'worst': t = 2 with connection.cursor() as cursor: - cursor.execute("INSERT INTO mod_bam (name, state_source, activate,id_reporting_period,level_w,level_c,id_notification_period,notifications_enabled,event_handler_enabled, inherit_kpi_downtimes) VALUES ('{}',{},'1',1, 80, 70, 1,'0', '0','1')".format(name, t)) + if dt_policy == "inherit": + inherit_dt = 1 + elif dt_policy == "ignore": + inherit_dt = 2 + else: + inherit_dt = 0 + + cursor.execute("INSERT INTO mod_bam (name, state_source, activate,id_reporting_period,level_w,level_c,id_notification_period,notifications_enabled,event_handler_enabled, inherit_kpi_downtimes) VALUES ('{}',{},'1',1, 80, 70, 1,'0', '0','{}')".format(name, t, inherit_dt)) id_ba = cursor.lastrowid sid = self.engine.create_bam_service("ba_{}".format(id_ba), name, "_Module_BAM_1", "centreon-bam-check!{}".format(id_ba)) cursor.execute("INSERT INTO service (service_id, service_description, display_name, service_active_checks_enabled, service_passive_checks_enabled,service_register) VALUES ({0}, \"ba_{1}\",\"{2}\",'2','2','2')".format(sid, id_ba, name)) diff --git a/tests/resources/db_variables.robot b/tests/resources/db_variables.robot new file mode 100644 index 00000000000..82ec633c8a9 --- /dev/null +++ b/tests/resources/db_variables.robot @@ -0,0 +1,12 @@ +*** Variables *** +${EtcRoot} /tmp/etc +${VarRoot} /tmp/var +${DBName} centreon_storage +${DBNameConf} centreon +${DBHost} localhost +${DBUser} centreon +${DBPass} centreon +${DBPort} 3306 +${DBUserRoot} root +${DBPassRoot} centreon + diff --git a/tests/resources/resources.robot b/tests/resources/resources.robot index f4665b48de4..a4600654f68 100644 --- a/tests/resources/resources.robot +++ b/tests/resources/resources.robot @@ -1,4 +1,5 @@ *** Settings *** +Resource ./db_variables.robot Library Process Library OperatingSystem Library Common.py @@ -23,13 +24,12 @@ Clear Engine Logs Create Directory ${ENGINE_LOG} Clear Broker Logs - Remove Files ${BROKER_LOG}${/}central-broker-master.log ${BROKER_LOG}${/}central-rrd-master.log ${BROKER_LOG}${/}central-module-master.log* - Remove Files ${BROKER_LOG}${/}central-module-master.log ${BROKER_LOG}${/}central-rrd-master.log ${BROKER_LOG}${/}central-module-master.log* - Remove Files ${BROKER_LOG}${/}central-rrd-master.log ${BROKER_LOG}${/}central-rrd-master.log ${BROKER_LOG}${/}central-module-master.log* + Remove Directory ${BROKER_LOG} Recursive=True + Create Directory ${BROKER_LOG} Start Broker - Start Process /usr/sbin/cbd /etc/centreon-broker/central-broker.json alias=b1 - Start Process /usr/sbin/cbd /etc/centreon-broker/central-rrd.json alias=b2 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-broker.json alias=b1 + Start Process /usr/sbin/cbd ${EtcRoot}/centreon-broker/central-rrd.json alias=b2 # ${log_pid1}= Get Process Id b1 # ${log_pid2}= Get Process Id b2 # Log To Console \npidcentral=${log_pid1} pidrrd=${log_pid2}\n @@ -61,9 +61,9 @@ Start Engine ${count}= Get Engines Count FOR ${idx} IN RANGE 0 ${count} ${alias}= Catenate SEPARATOR= e ${idx} - ${conf}= Catenate SEPARATOR= /etc/centreon-engine/config ${idx} /centengine.cfg - ${log}= Catenate SEPARATOR= /var/log/centreon-engine/config ${idx} - ${lib}= Catenate SEPARATOR= /var/lib/centreon-engine/config ${idx} + ${conf}= Catenate SEPARATOR= ${EtcRoot} /centreon-engine/config ${idx} /centengine.cfg + ${log}= Catenate SEPARATOR= ${VarRoot} /log/centreon-engine/config ${idx} + ${lib}= Catenate SEPARATOR= ${VarRoot} /lib/centreon-engine/config ${idx} Create Directory ${log} Create Directory ${lib} Start Process /usr/sbin/centengine ${conf} alias=${alias} @@ -118,9 +118,20 @@ Reset Eth Connection Run iptables -F Run iptables -X +Save Logs If failed + Run Keyword If Test Failed Save Logs + +Save Logs + Create Directory failed + ${failDir}= Catenate SEPARATOR= failed/ ${Test Name} + Create Directory ${failDir} + Copy files ${centralLog} ${failDir} + Copy files ${moduleLog} ${failDir} + Copy files ${logEngine0} ${failDir} + *** Variables *** -${BROKER_LOG} /var/log/centreon-broker -${ENGINE_LOG} /var/log/centreon-engine +${BROKER_LOG} ${VarRoot}/log/centreon-broker +${ENGINE_LOG} ${VarRoot}/log/centreon-engine ${SCRIPTS} ${CURDIR}${/}scripts${/} ${centralLog} ${BROKER_LOG}/central-broker-master.log ${moduleLog} ${BROKER_LOG}/central-module-master0.log @@ -129,9 +140,3 @@ ${rrdLog} ${BROKER_LOG}/central-rrd-master.log ${logEngine0} ${ENGINE_LOG}/config0/centengine.log ${logEngine1} ${ENGINE_LOG}/config1/centengine.log ${logEngine2} ${ENGINE_LOG}/config2/centengine.log - -${DBName} centreon_storage -${DBHost} localhost -${DBUser} centreon -${DBPass} centreon -${DBPort} 3306 diff --git a/tests/severities/severities.robot b/tests/severities/severities.robot index 8c53c84e040..3513d2d2207 100644 --- a/tests/severities/severities.robot +++ b/tests/severities/severities.robot @@ -35,7 +35,7 @@ BESEV1 ${result}= check severity With Timeout severity1 1 5 30 Should Be True ${result} msg=severity1 should be of level 1 with icon_id 5 Stop Engine - Stop Broker + Kindly Stop Broker BESEV2 [Documentation] Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Engine is started before. @@ -59,7 +59,7 @@ BESEV2 ${result}= check severity With Timeout severity1 1 5 30 Should Be True ${result} msg=severity1 should be of level 1 with icon_id 5 Stop Engine - Stop Broker + Kindly Stop Broker BEUSEV1 [Documentation] Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Broker is started before. @@ -86,7 +86,7 @@ BEUSEV1 ${result}= check severity With Timeout severity1 1 5 30 Should Be True ${result} msg=severity1 should be of level 1 with icon_id 5 Stop Engine - Stop Broker + Kindly Stop Broker BEUSEV2 [Documentation] Engine is configured with some severities. When broker receives them, it stores them in the centreon_storage.severities table. Engine is started before. @@ -114,7 +114,7 @@ BEUSEV2 ${result}= check severity With Timeout severity1 1 5 30 Should Be True ${result} msg=severity1 should be of level 1 with icon_id 5 Stop Engine - Stop Broker + Kindly Stop Broker BEUSEV3 [Documentation] Four services have a severity added. Then we remove the severity from service 1. Then we change severity 11 to severity7 for service 3. diff --git a/tests/summary.py b/tests/summary.py index 874c1296588..db63295e15c 100755 --- a/tests/summary.py +++ b/tests/summary.py @@ -1,4 +1,5 @@ #!/usr/bin/python3 +import argparse import datetime import os import re @@ -6,12 +7,18 @@ import matplotlib.pyplot as plt +parser = argparse.ArgumentParser(prog='summary.py', description='Draw a summary on the tests historical.') +parser.add_argument('--fail', '-f', action='store_true', help='Add a summary on tests that failed.') +parser.add_argument('--slow', '-s', action='store_true', help='Add a summary on slow tests.') +args = parser.parse_args() + content = os.listdir('.') r = re.compile(r"\d+") dirs = [] tests = [] gl_durations = [] gl_avg_duration = [] +fail_dict = {} for f in content: durations = [] @@ -33,6 +40,9 @@ durations.append((duration, p.attrib['name'], s.attrib['status'])) if s.attrib['status'] == 'FAIL': fail += 1 + if p.attrib['name'] not in fail_dict: + fail_dict[p.attrib['name']] = [] + fail_dict[p.attrib['name']].append(int(starttime.timestamp())) if s.attrib['status'] == 'PASS': success += 1 count += 1 @@ -44,12 +54,30 @@ print("%s: %d/%d passed tests" % (f, success, count)) # Display the arrays of longest tests -for i in range(len(gl_durations)): - t = gl_durations[i] - print("############# {:12} ##############".format(dirs[i])) - for tt in t: - print("{}: {:20}: {}".format(str(tt[0]), tt[1], tt[2])) -print("#" * 40) +if args.slow: + for i in range(len(gl_durations)): + t = gl_durations[i] + print("############# {:12} ##############".format(dirs[i])) + for tt in t: + print("{}: {:20}: {}".format(str(tt[0]), tt[1], tt[2])) + print("#" * 40) + +if args.fail: + lst = [] + fail_x = [] + fail_y = [] + for k in fail_dict: + s = len(fail_dict[k]) + m = min(fail_dict[k]) + M = max(fail_dict[k]) + n = k + d = f"############# {k} ##############\n * size = {s}\n * min = {m}\n * max = {M}\n" + lst.append((s, m, M, d, n)) + lst.sort() + for l in lst: + fail_x.append(l[4]) + fail_y.append(l[0]) + print(l[3]) tests.sort() x = [] @@ -67,7 +95,10 @@ x1.append(xx[0]) y1.append(xx[1]) -fig, ax = plt.subplots(2) +if args.fail: + fig, ax = plt.subplots(3) +else: + fig, ax = plt.subplots(2) fig.suptitle("Centreon-Tests") ax[0].set_ylabel('tests') ax[0].set_xlabel('date') @@ -83,4 +114,9 @@ ax[1].plot(x1, y1) ax[1].grid(color='gray', linestyle='dashed') +if args.fail: + ax[2].bar(fail_x, fail_y, linewidth=2) + ax[2].set_ylabel('Fails count') + ax[2].tick_params(labelrotation=90, labelsize=8) + plt.show() From 84d464eafdf888be25a17295efe05554bc00e9d9 Mon Sep 17 00:00:00 2001 From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com> Date: Fri, 8 Jul 2022 17:26:32 +0200 Subject: [PATCH 3/4] MON-14166 fix bbdo compression nego (#316) REFS:MON-14166 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 818ac397d90..61071a8e81f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,9 @@ Columns notes, notes\_url and action\_url are resized. *Compression* In the bbdo negotiation, compression was never activated +*Compression* +In the bbdo negotiation, compression was never activated + #### Enhancements *downtimes* From f4399812ba44f52d5a106e9c496dbfa2d8dd296e Mon Sep 17 00:00:00 2001 From: omercier <32134301+omercier@users.noreply.github.com> Date: Mon, 22 Aug 2022 18:58:08 +0200 Subject: [PATCH 4/4] enh(build): protect variables + bash comparisons 22.04 * fix(raspbian): parse only first line of /etc/issue --- cmake.sh | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/cmake.sh b/cmake.sh index e236669eba9..7146626977d 100755 --- a/cmake.sh +++ b/cmake.sh @@ -15,7 +15,7 @@ EOF BUILD_TYPE="Debug" CONAN_REBUILD="0" for i in $(cat conanfile.txt) ; do - if [[ $i =~ / ]] ; then + if [[ "$i" =~ / ]] ; then if [ ! -d ~/.conan/data/$i ] ; then echo "The package '$i' is missing" CONAN_REBUILD="1" @@ -26,7 +26,7 @@ done for i in "$@" do - case $i in + case "$i" in -f|--force) force=1 shift @@ -54,12 +54,12 @@ my_id=$(id -u) if [ -r /etc/centos-release ] ; then maj="centos$(cat /etc/centos-release | awk '{print $4}' | cut -f1 -d'.')" v=$(cmake --version) - if [[ $v =~ "version 3" ]] ; then + if [[ "$v" =~ "version 3" ]] ; then cmake='cmake' else if rpm -q cmake3 ; then cmake='cmake3' - elif [ $maj = "centos7" ] ; then + elif [[ "$maj" == "centos7" ]] ; then yum -y install epel-release cmake3 cmake='cmake3' else @@ -67,7 +67,7 @@ if [ -r /etc/centos-release ] ; then cmake='cmake' fi fi - if [ $maj = "centos7" ] ; then + if [[ "$maj" == "centos7" ]] ; then if [[ ! -x /opt/rh/rh-python38 ]] ; then yum -y install centos-release-scl yum -y install rh-python38 @@ -112,7 +112,7 @@ if [ -r /etc/centos-release ] ; then ) for i in "${pkgs[@]}"; do if ! rpm -q $i ; then - if [ $maj = 'centos7' ] ; then + if [[ "$maj" == 'centos7' ]] ; then yum install -y $i else dnf -y --enablerepo=PowerTools install $i @@ -120,17 +120,17 @@ if [ -r /etc/centos-release ] ; then fi done elif [ -r /etc/issue ] ; then - maj=$(cat /etc/issue | awk '{print $1}') - version=$(cat /etc/issue | awk '{print $3}') - if [ $version = "9" ] ; then + maj=$(head -1 /etc/issue | awk '{print $1}') + version=$(head -1 /etc/issue | awk '{print $3}') + if [[ "$version" == "9" ]] ; then dpkg="dpkg" else dpkg="dpkg --no-pager" fi v=$(cmake --version) - if [[ $v =~ "version 3" ]] ; then + if [[ "$v" =~ "version 3" ]] ; then cmake='cmake' - elif [ $maj = "Debian" ] || [ "$maj" = "Ubuntu" ]; then + elif [[ "$maj" == "Debian" ]] || [[ "$maj" == "Ubuntu" ]]; then if $dpkg -l cmake ; then echo "Bad version of cmake..." exit 1 @@ -138,7 +138,7 @@ elif [ -r /etc/issue ] ; then echo -e "cmake is not installed, you could enter, as root:\n\tapt install -y cmake\n\n" cmake='cmake' fi - elif [ $maj = "Raspbian" ] ; then + elif [[ "$maj" == "Raspbian" ]] ; then if $dpkg -l cmake ; then echo "Bad version of cmake..." exit 1 @@ -151,7 +151,7 @@ elif [ -r /etc/issue ] ; then exit 1 fi - if [ $maj = "Debian" ] || [ "$maj" = "Ubuntu" ]; then + if [[ "$maj" == "Debian" ]] || [[ "$maj" == "Ubuntu" ]]; then pkgs=( gcc g++ @@ -168,7 +168,7 @@ elif [ -r /etc/issue ] ; then ) for i in "${pkgs[@]}"; do if ! $dpkg -l $i | grep "^ii" ; then - if [ $my_id -eq 0 ] ; then + if [[ "$my_id" == 0 ]] ; then apt install -y $i else echo -e "The package \"$i\" is not installed, you can install it, as root, with the command:\n\tapt install -y $i\n\n" @@ -176,7 +176,7 @@ elif [ -r /etc/issue ] ; then fi fi done - elif [ $maj = "Raspbian" ] ; then + elif [[ "$maj" == "Raspbian" ]] ; then pkgs=( gcc g++ @@ -193,7 +193,7 @@ elif [ -r /etc/issue ] ; then ) for i in "${pkgs[@]}"; do if ! $dpkg -l $i | grep "^ii" ; then - if [ $my_id -eq 0 ] ; then + if [[ "$my_id" == 0 ]] ; then apt install -y $i else echo -e "The package \"$i\" is not installed, you can install it, as root, with the command:\n\tapt install -y $i\n\n" @@ -203,7 +203,7 @@ elif [ -r /etc/issue ] ; then done fi if [[ ! -x /usr/bin/python3 ]] ; then - if [ $my_id -eq 0 ] ; then + if [[ "$my_id" == 0 ]] ; then apt install -y python3 else echo -e "python3 is not installed, you can enter, as root:\n\tapt install -y python3\n\n" @@ -213,7 +213,7 @@ elif [ -r /etc/issue ] ; then echo "python3 already installed" fi if ! $dpkg -l python3-pip ; then - if [ $my_id -eq 0 ] ; then + if [[ "$my_id" == 0 ]] ; then apt install -y python3-pip else echo -e "python3-pip is not installed, you can enter, as root:\n\tapt install -y python3-pip\n\n" @@ -226,7 +226,7 @@ fi pip3 install conan --upgrade -if [ $my_id -eq 0 ] ; then +if [[ "$my_id" == 0 ]] ; then conan='/usr/local/bin/conan' elif which conan ; then conan=$(which conan) @@ -245,9 +245,9 @@ if [ "$force" = "1" ] ; then mkdir build fi cd build -if [ $maj = "centos7" ] ; then +if [[ "$maj" == "centos7" ]] ; then rm -rf ~/.conan/profiles/default - if [ "$CONAN_REBUILD" = "1" ] ; then + if [[ "$CONAN_REBUILD" == "1" ]] ; then $conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build="*" else $conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing @@ -256,9 +256,9 @@ else $conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing fi -if [ $maj = "Raspbian" ] ; then +if [[ "$maj" == "Raspbian" ]] ; then CXXFLAGS="-Wall -Wextra" $cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF $* .. -elif [ $maj = "Debian" ] ; then +elif [[ "$maj" == "Debian" ]] ; then CXXFLAGS="-Wall -Wextra" $cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF $* .. else CXXFLAGS="-Wall -Wextra" $cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF $* ..