diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index 1848c030357..c1da256ea6f 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -1,9 +1,19 @@ name: "tag version" description: "Tag package" inputs: - jira_token: + jira_api_token: description: "Token to authenticate to Jira" required: true + jira_user_email: + description: "Email linked to token" + required: true + jira_project_id: + description: "Jira project id to create release" + required: true + jira_base_url: + description: "Jira base url" + required: true + runs: using: "composite" steps: @@ -44,11 +54,11 @@ runs: TYPE=Release fi - VERSION_DATA="{\"archived\":false,\"releaseDate\":\"$(date +%Y-%m-%d)\",\"name\":\"centreon-collect-$NEW_VERSION\",\"description\":\"$TYPE:$RELEASE_ID\",\"projectId\":11789,\"released\":false}" + VERSION_DATA="{\"archived\":false,\"releaseDate\":\"$(date +%Y-%m-%d)\",\"name\":\"centreon-collect-$NEW_VERSION\",\"description\":\"$TYPE:$RELEASE_ID\",\"projectId\":${{ inputs.jira_project_id }},\"released\":false}" curl --fail --request POST \ - --url 'https://centreon.atlassian.net/rest/api/3/version' \ - --header 'Authorization: Basic ${{ inputs.jira_token }}' \ + --url '${{ inputs.jira_base_url }}/rest/api/3/version' \ + --user '${{ inputs.jira_user_email }}:${{ inputs.jira_api_token }}' \ --header 'Accept: application/json' \ --header 'Content-Type: application/json' \ --data ''$VERSION_DATA'' diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3db635e65a3..6eeff4da184 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,7 +3,7 @@ updates: - package-ecosystem: github-actions directory: '/' schedule: - interval: weekly + interval: monthly open-pull-requests-limit: 10 labels: - 'dependencies' diff --git a/.github/workflows/release-collect.yml b/.github/workflows/release-collect.yml index 730286513ec..ed2cecbf068 100644 --- a/.github/workflows/release-collect.yml +++ b/.github/workflows/release-collect.yml @@ -10,6 +10,8 @@ on: - "[2-9][0-9].[0-9][0-9].x" paths: - "centreon-collect/**" + - "!centreon-collect/ci/**" + - "!centreon-collect/tests/**" workflow_dispatch: env: @@ -29,4 +31,7 @@ jobs: id: release uses: ./.github/actions/release with: - jira_token: ${{ secrets.JIRA_TOKEN }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} + jira_project_id: ${{ secrets.JIRA_PROJECT_ID }} + jira_base_url: ${{ secrets.JIRA_BASE_URL }} diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 9e2290f5e1e..3e46172c13f 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -37,12 +37,15 @@ jobs: - name: Set routing mode id: routing-mode run: | - CHECK_BRANCH=`echo "${{ github.ref_name }}" | cut -d'/' -f2` - if [[ $CHECK_BRANCH != "merge" && '${{ github.event_name }}' != 'pull_request' ]]; then - DEVELOPMENT_STAGE="Release" - else - DEVELOPMENT_STAGE="Development" - fi + DEVELOPMENT_STAGE="Development" + + ALLOWED_BRANCHES=(develop master dev-${{ inputs.major_version }}.x ${{ inputs.major_version }}.x) + for BRANCH in "${ALLOWED_BRANCHES[@]}"; do + if [[ "${{ github.ref_name }}" == "$BRANCH" ]] && [[ '${{ github.event_name }}' != 'pull_request' ]]; then + DEVELOPMENT_STAGE="Release" + fi + done + echo "development_stage=$DEVELOPMENT_STAGE" >> $GITHUB_OUTPUT cat $GITHUB_OUTPUT @@ -66,10 +69,10 @@ jobs: mkdir build cd build - sudo pip3 install conan==1.57.0 --prefix=/usr --upgrade - sudo conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing + pip3 install conan==1.62.0 --prefix=/usr --upgrade + conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing - sudo cmake \ + cmake \ -G "Ninja" \ -DCMAKE_CXX_FLAGS="-gdwarf-2 -g3 -O0 -fno-builtin" \ -DWITH_TESTING=OFF \ @@ -88,7 +91,7 @@ jobs: -DWITH_CONFIG_FILES=ON \ .. - sudo ninja + ninja echo "[DEBUG] - Find compiled files" find ./ -name "*.so" @@ -134,13 +137,15 @@ jobs: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" - sandbox-scan: + policy-scan: name: Sandbox scan - needs: [build] + needs: [routing, build] + if: needs.routing.outputs.development_stage != 'Development' runs-on: ubuntu-latest steps: - name: Promote latest scan + # only develop will be promoted to policy scan if: github.ref_name == 'develop' env: VERACODE_API_ID: "${{ secrets.veracode_api_id }}" @@ -148,7 +153,7 @@ jobs: # Action forked as API calls hardcoded '.com' route uses: sc979/veracode-sandboxes-helper@cf67241c27cbe6405ad8705111121ece9a48c4ff # v0.2 - # Promote should not fail if sandbox was not found. + # Promote should not fail to trigger following sandbox scan. continue-on-error: true with: activity: "promote-latest-scan" diff --git a/CMakeLists.txt b/CMakeLists.txt index 3660d8ffc37..bd6ab69e896 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -117,7 +117,7 @@ endif() # Version. set(COLLECT_MAJOR 22) set(COLLECT_MINOR 10) -set(COLLECT_PATCH 8) +set(COLLECT_PATCH 9) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") diff --git a/broker/bam/inc/com/centreon/broker/bam/internal.hh b/broker/bam/inc/com/centreon/broker/bam/internal.hh new file mode 100644 index 00000000000..16d5fb10447 --- /dev/null +++ b/broker/bam/inc/com/centreon/broker/bam/internal.hh @@ -0,0 +1,39 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCB_BAM_INTERNAL_HH +#define CCB_BAM_INTERNAL_HH + +#include "broker.pb.h" +#include "com/centreon/broker/io/events.hh" +#include "com/centreon/broker/io/protobuf.hh" +#include "com/centreon/broker/namespace.hh" + +CCB_BEGIN() + +/* We have to declare the pb_ba_info also here because we don't control the + * order things are created. If the bam stream is created before brokerrpc, its + * muxer will be declared with known events (so without pb_ba_info) and if we + * want pb_ba_info to be known, thenwe have to force its declaration. */ +namespace extcmd { +using pb_ba_info = + io::protobuf; +} // namespace extcmd + +CCB_END() +#endif /* !CCB_BAM_INTERNAL_HH */ diff --git a/broker/bam/inc/com/centreon/broker/bam/monitoring_stream.hh b/broker/bam/inc/com/centreon/broker/bam/monitoring_stream.hh index 59be0fa4481..7fb359409ff 100644 --- a/broker/bam/inc/com/centreon/broker/bam/monitoring_stream.hh +++ b/broker/bam/inc/com/centreon/broker/bam/monitoring_stream.hh @@ -1,4 +1,4 @@ -/* +/** * Copyright 2014-2023 Centreon * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,18 +21,11 @@ #include #include "com/centreon/broker/bam/configuration/applier/state.hh" -#include "com/centreon/broker/database/mysql_stmt.hh" -#include "com/centreon/broker/database_config.hh" -#include "com/centreon/broker/io/stream.hh" +#include "com/centreon/broker/bam/internal.hh" #include "com/centreon/broker/mysql.hh" -#include "com/centreon/broker/namespace.hh" CCB_BEGIN() -namespace extcmd { -using pb_ba_info = - io::protobuf; -} namespace bam { /** * @class monitoring_stream monitoring_stream.hh diff --git a/broker/bam/src/main.cc b/broker/bam/src/main.cc index d2dd6909ff6..6596d133dcf 100644 --- a/broker/bam/src/main.cc +++ b/broker/bam/src/main.cc @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2015, 2020-2021 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2015, 2020-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "bbdo/bam/ba_duration_event.hh" #include "bbdo/bam/ba_event.hh" #include "bbdo/bam/ba_status.hh" @@ -31,17 +31,16 @@ #include "bbdo/bam/kpi_event.hh" #include "bbdo/bam/kpi_status.hh" #include "bbdo/bam/rebuild.hh" -#include "bbdo/events.hh" #include "bbdo/storage/index_mapping.hh" #include "bbdo/storage/metric.hh" #include "bbdo/storage/metric_mapping.hh" #include "bbdo/storage/status.hh" #include "com/centreon/broker/bam/factory.hh" +#include "com/centreon/broker/bam/internal.hh" #include "com/centreon/broker/broker_impl.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/protocols.hh" #include "com/centreon/broker/log_v2.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; diff --git a/broker/bam/src/monitoring_stream.cc b/broker/bam/src/monitoring_stream.cc index f5e5955a58c..9c0f95ea125 100644 --- a/broker/bam/src/monitoring_stream.cc +++ b/broker/bam/src/monitoring_stream.cc @@ -21,24 +21,16 @@ #include "bbdo/bam/ba_status.hh" #include "bbdo/bam/kpi_status.hh" #include "bbdo/bam/rebuild.hh" -#include "bbdo/events.hh" #include "com/centreon/broker/bam/configuration/reader_v2.hh" -#include "com/centreon/broker/bam/configuration/state.hh" #include "com/centreon/broker/bam/event_cache_visitor.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/exceptions/shutdown.hh" -#include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/log_v2.hh" #include "com/centreon/broker/misc/fifo_client.hh" -#include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/acknowledgement.hh" #include "com/centreon/broker/neb/downtime.hh" -#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service.hh" -#include "com/centreon/broker/neb/service_status.hh" #include "com/centreon/broker/pool.hh" -#include "com/centreon/broker/timestamp.hh" -#include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; @@ -367,6 +359,7 @@ int monitoring_stream::write(std::shared_ptr const& data) { _write_external_command(cmd); } break; case extcmd::pb_ba_info::static_type(): { + log_v2::bam()->info("BAM: dump BA"); extcmd::pb_ba_info const& e = *std::static_pointer_cast(data); auto& obj = e.obj(); diff --git a/broker/bam/src/timeperiod_map.cc b/broker/bam/src/timeperiod_map.cc index 5784af8ff55..df2abf376a4 100644 --- a/broker/bam/src/timeperiod_map.cc +++ b/broker/bam/src/timeperiod_map.cc @@ -1,22 +1,24 @@ -/* -** Copyright 2014 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2014, 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/bam/timeperiod_map.hh" + +#include "com/centreon/broker/log_v2.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -137,10 +139,14 @@ timeperiod_map::get_timeperiods_by_ba_id(uint32_t ba_id) const { uint32_t tp_id = found.first->second.first; bool is_default = found.first->second.second; time::timeperiod::ptr tp = get_timeperiod(tp_id); - if (!tp) - throw msg_fmt("BAM-BI: could not find the timeperiod {} in cache.", - tp_id); - res.push_back(std::make_pair(tp, is_default)); + if (!tp) { + SPDLOG_LOGGER_ERROR(log_v2::bam(), + "BAM-BI: could not find the timeperiod {} in cache " + "for ba {}, check timeperiod table in conf db", + tp_id, ba_id); + } else { + res.push_back(std::make_pair(tp, is_default)); + } } return (res); diff --git a/broker/core/src/processing/acceptor.cc b/broker/core/src/processing/acceptor.cc index ab1370f045b..d0d37eb7bb0 100644 --- a/broker/core/src/processing/acceptor.cc +++ b/broker/core/src/processing/acceptor.cc @@ -1,20 +1,20 @@ -/* -** Copyright 2015-2022 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2015-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/processing/acceptor.hh" @@ -35,10 +35,7 @@ using namespace com::centreon::broker::processing; * @param[in] name Name of the endpoint. */ acceptor::acceptor(std::shared_ptr endp, std::string const& name) - : endpoint(true, name), - _state(stopped), - _should_exit(false), - _endp(endp) {} + : endpoint(true, name), _state(stopped), _should_exit(false), _endp(endp) {} /** * Destructor. diff --git a/broker/core/src/processing/failover.cc b/broker/core/src/processing/failover.cc index 65384455898..d21729d6b9f 100644 --- a/broker/core/src/processing/failover.cc +++ b/broker/core/src/processing/failover.cc @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2017, 2021 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2017, 2021-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/processing/failover.hh" @@ -164,6 +164,12 @@ void failover::_run() { std::lock_guard stream_lock(_stream_m); _stream = s; set_state(s ? "connected" : "connecting"); + if (s) + SPDLOG_LOGGER_DEBUG(log_v2::processing(), "{} stream connected", + _name); + else + SPDLOG_LOGGER_DEBUG(log_v2::processing(), + "{} fail to create stream", _name); } _initialized = true; set_last_connection_success(timestamp::now()); diff --git a/broker/deleteMe b/broker/deleteMe new file mode 100644 index 00000000000..e69de29bb2d diff --git a/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh b/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh index 17edc43ad28..66cc66ab979 100644 --- a/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh +++ b/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2013 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_TCP_ACCEPTOR_HH #define CCB_TCP_ACCEPTOR_HH @@ -35,7 +35,7 @@ namespace tcp { class acceptor : public io::endpoint { tcp_config::pointer _conf; - std::list _children; + absl::flat_hash_set _children; std::mutex _childrenm; std::shared_ptr _acceptor; @@ -47,7 +47,6 @@ class acceptor : public io::endpoint { acceptor& operator=(const acceptor&) = delete; void add_child(std::string const& child); - void listen(); std::unique_ptr open() override; void remove_child(std::string const& child); void stats(nlohmann::json& tree) override; diff --git a/broker/tcp/src/acceptor.cc b/broker/tcp/src/acceptor.cc index dcb2ccffef4..58fb56e80e1 100644 --- a/broker/tcp/src/acceptor.cc +++ b/broker/tcp/src/acceptor.cc @@ -1,5 +1,5 @@ -/* - * Copyright 2011 - 2019 Centreon (https://www.centreon.com/) +/** + * Copyright 2011 - 2024 Centreon (https://www.centreon.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -55,7 +55,7 @@ acceptor::~acceptor() noexcept { */ void acceptor::add_child(std::string const& child) { std::lock_guard lock(_childrenm); - _children.push_back(child); + _children.insert(child); } /** @@ -74,6 +74,7 @@ std::unique_ptr acceptor::open() { if (conn) { assert(conn->port()); log_v2::tcp()->info("acceptor gets a new connection from {}", conn->peer()); + add_child(conn->peer()); return std::make_unique(conn, _conf); } return nullptr; @@ -91,7 +92,7 @@ bool acceptor::is_ready() const { */ void acceptor::remove_child(std::string const& child) { std::lock_guard lock(_childrenm); - _children.remove(child); + _children.erase(child); } /** diff --git a/broker/tcp/test/acceptor.cc b/broker/tcp/test/acceptor.cc index 01d1db33ae7..2343156a043 100644 --- a/broker/tcp/test/acceptor.cc +++ b/broker/tcp/test/acceptor.cc @@ -948,7 +948,8 @@ TEST_F(TcpAcceptor, ChildsAndStats) { nlohmann::json obj; acc.stats(obj); - ASSERT_EQ(obj.dump(), "{\"peers\":\"2: child1, child3\"}"); + ASSERT_TRUE(obj.dump() == "{\"peers\":\"2: child1, child3\"}" || + obj.dump() == "{\"peers\":\"2: child3, child1\"}"); } TEST_F(TcpAcceptor, QuestionAnswerMultiple) { diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index 8819a7d7e2d..976488fae00 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -288,8 +288,7 @@ class stream : public io::stream { database::mysql_stmt _severity_insert; database::mysql_stmt _severity_update; - database::mysql_stmt _tag_insert; - database::mysql_stmt _tag_update; + database::mysql_stmt _tag_insert_update; database::mysql_stmt _tag_delete; database::mysql_stmt _resources_tags_insert; database::mysql_stmt _resources_host_insert; @@ -348,9 +347,11 @@ class stream : public io::stream { void _process_responsive_instance(const std::shared_ptr& d); void _process_pb_host(const std::shared_ptr& d); + uint64_t _process_pb_host_in_resources(const Host& h, int32_t conn); void _process_pb_host_status(const std::shared_ptr& d); void _process_pb_adaptive_host(const std::shared_ptr& d); void _process_pb_service(const std::shared_ptr& d); + uint64_t _process_pb_service_in_resources(const Service& s, int32_t conn); void _process_pb_adaptive_service(const std::shared_ptr& d); void _process_pb_service_status(const std::shared_ptr& d); void _process_severity(const std::shared_ptr& d); diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index 167e4d9f3e6..f355fb20d60 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -35,6 +35,9 @@ using namespace com::centreon::broker::unified_sql; static bool time_is_undefined(uint64_t t) { return t == 0 || t == static_cast(-1); } +static const std::string _insert_or_update_tags = + "INSERT INTO tags (id,type,name) VALUES(?,?,?) ON DUPLICATE " + "KEY UPDATE tag_id=LAST_INSERT_ID(tag_id), name=VALUES(name)"; /** * @brief Clean tables with data associated to the instance. @@ -1264,235 +1267,7 @@ void stream::_process_pb_host(const std::shared_ptr& d) { _cache_host_instance.erase(h.host_id()); if (_store_in_resources) { - uint64_t res_id = 0; - auto found = _resource_cache.find({h.host_id(), 0}); - - if (h.enabled()) { - uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - h.name(), get_resources_col_size(resources_name))}; - fmt::string_view address{misc::string::truncate( - h.address(), get_resources_col_size(resources_address))}; - fmt::string_view alias{misc::string::truncate( - h.alias(), get_resources_col_size(resources_alias))}; - fmt::string_view parent_name{misc::string::truncate( - h.name(), get_resources_col_size(resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( - h.notes_url(), get_resources_col_size(resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( - h.notes(), get_resources_col_size(resources_notes))}; - fmt::string_view action_url{misc::string::truncate( - h.action_url(), get_resources_col_size(resources_action_url))}; - - // INSERT - if (found == _resource_cache.end()) { - _resources_host_insert.bind_value_as_u64(0, h.host_id()); - _resources_host_insert.bind_value_as_u32(1, h.state()); - _resources_host_insert.bind_value_as_u32( - 2, hst_ordered_status[h.state()]); - _resources_host_insert.bind_value_as_u64_ext( - 3u, h.last_state_change(), mapping::entry::invalid_on_zero); - _resources_host_insert.bind_value_as_bool( - 4, h.scheduled_downtime_depth() > 0); - _resources_host_insert.bind_value_as_bool( - 5, h.acknowledgement_type() != AckType::NONE); - _resources_host_insert.bind_value_as_bool( - 6, h.state_type() == Host_StateType_HARD); - _resources_host_insert.bind_value_as_u32(7, h.check_attempt()); - _resources_host_insert.bind_value_as_u32(8, h.max_check_attempts()); - _resources_host_insert.bind_value_as_u64( - 9, _cache_host_instance[h.host_id()]); - if (h.severity_id()) { - sid = _severity_cache[{h.severity_id(), 1}]; - log_v2::sql()->debug("host {} with severity_id {} => uid = {}", - h.host_id(), h.severity_id(), sid); - } else - log_v2::sql()->info("no host severity found in cache for host {}", - h.host_id()); - if (sid) - _resources_host_insert.bind_value_as_u64(10, sid); - else - _resources_host_insert.bind_null_u64(10); - _resources_host_insert.bind_value_as_str(11, name); - _resources_host_insert.bind_value_as_str(12, address); - _resources_host_insert.bind_value_as_str(13, alias); - _resources_host_insert.bind_value_as_str(14, parent_name); - _resources_host_insert.bind_value_as_str(15, notes_url); - _resources_host_insert.bind_value_as_str(16, notes); - _resources_host_insert.bind_value_as_str(17, action_url); - _resources_host_insert.bind_value_as_bool(18, h.notify()); - _resources_host_insert.bind_value_as_bool(19, h.passive_checks()); - _resources_host_insert.bind_value_as_bool(20, h.active_checks()); - _resources_host_insert.bind_value_as_u64(21, h.icon_id()); - - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _resources_host_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - _add_action(conn, actions::resources); - try { - res_id = future.get(); - _resource_cache.insert({{h.host_id(), 0}, res_id}); - } catch (const std::exception& e) { - log_v2::sql()->critical( - "SQL: unable to insert new host resource {}: {}", h.host_id(), - e.what()); - - std::promise promise_resource; - std::future future_resource = - promise_resource.get_future(); - _mysql.run_query_and_get_result( - fmt::format("SELECT resource_id FROM resources WHERE " - "parent_id=0 AND id={}", - h.host_id()), - std::move(promise_resource)); - try { - mysql_result res{future_resource.get()}; - if (_mysql.fetch_row(res)) { - auto r = _resource_cache.insert( - {{h.host_id(), 0}, res.value_as_u64(0)}); - found = r.first; - log_v2::sql()->debug( - "Host resource (host {}) found in database with id {}", - h.host_id(), found->second); - } else { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "Could not insert host resource in database and no host " - "resource in database with id {}: {}", - h.host_id(), e.what()); - return; - } - } catch (const std::exception& e) { - log_v2::sql()->critical( - "No host resource in database with id {}: {}", h.host_id(), - e.what()); - return; - } - } - } - if (res_id == 0) { - res_id = found->second; - // UPDATE - _resources_host_update.bind_value_as_u32(0, h.state()); - _resources_host_update.bind_value_as_u32( - 1, hst_ordered_status[h.state()]); - _resources_host_update.bind_value_as_u64_ext( - 2, h.last_state_change(), mapping::entry::invalid_on_zero); - _resources_host_update.bind_value_as_bool( - 3, h.scheduled_downtime_depth() > 0); - _resources_host_update.bind_value_as_bool( - 4, h.acknowledgement_type() != AckType::NONE); - _resources_host_update.bind_value_as_bool( - 5, h.state_type() == Host_StateType_HARD); - _resources_host_update.bind_value_as_u32(6, h.check_attempt()); - _resources_host_update.bind_value_as_u32(7, h.max_check_attempts()); - _resources_host_update.bind_value_as_u64( - 8, _cache_host_instance[h.host_id()]); - if (h.severity_id()) { - sid = _severity_cache[{h.severity_id(), 1}]; - log_v2::sql()->debug("host {} with severity_id {} => uid = {}", - h.host_id(), h.severity_id(), sid); - } else - log_v2::sql()->info("no host severity found in cache for host {}", - h.host_id()); - if (sid) - _resources_host_update.bind_value_as_u64(9, sid); - else - _resources_host_update.bind_null_u64(9); - _resources_host_update.bind_value_as_str(10, name); - _resources_host_update.bind_value_as_str(11, address); - _resources_host_update.bind_value_as_str(12, alias); - _resources_host_update.bind_value_as_str(13, parent_name); - _resources_host_update.bind_value_as_str(14, notes_url); - _resources_host_update.bind_value_as_str(15, notes); - _resources_host_update.bind_value_as_str(16, action_url); - _resources_host_update.bind_value_as_bool(17, h.notify()); - _resources_host_update.bind_value_as_bool(18, h.passive_checks()); - _resources_host_update.bind_value_as_bool(19, h.active_checks()); - _resources_host_update.bind_value_as_u64(20, h.icon_id()); - _resources_host_update.bind_value_as_u64(21, res_id); - - _mysql.run_statement(_resources_host_update, - database::mysql_error::store_host_resources, - conn); - _add_action(conn, actions::resources); - } - - if (!_resources_tags_insert.prepared()) { - _resources_tags_insert = _mysql.prepare_query( - "INSERT INTO resources_tags (tag_id,resource_id) VALUES(?,?)"); - } - if (!_resources_tags_remove.prepared()) - _resources_tags_remove = _mysql.prepare_query( - "DELETE FROM resources_tags WHERE resource_id=?"); - _finish_action(-1, actions::tags); - _resources_tags_remove.bind_value_as_u64(0, res_id); - _mysql.run_statement(_resources_tags_remove, - database::mysql_error::delete_resources_tags, - conn); - for (auto& tag : h.tags()) { - auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); - - if (it_tags_cache == _tags_cache.end()) { - log_v2::sql()->error( - "SQL: could not find in cache the tag ({}, {}) for host " - "'{}': " - "trying to add it.", - tag.id(), tag.type(), h.host_id()); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) VALUES(?,?,?)"); - _tag_insert.bind_value_as_u64(0, tag.id()); - _tag_insert.bind_value_as_u32(1, tag.type()); - _tag_insert.bind_value_as_str(2, "(unknown)"); - std::promise p; - std::future future = p.get_future(); - - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - try { - uint64_t tag_id = future.get(); - it_tags_cache = - _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; - } catch (const std::exception& e) { - log_v2::sql()->error( - "SQL: unable to insert new tag ({},{}): {}", tag.id(), - tag.type(), e.what()); - } - } - - if (it_tags_cache != _tags_cache.end()) { - _resources_tags_insert.bind_value_as_u64(0, - it_tags_cache->second); - _resources_tags_insert.bind_value_as_u64(1, res_id); - log_v2::sql()->debug( - "SQL: new relation between host (resource_id: {}, host_id: " - "{}) " - "and tag ({},{})", - res_id, h.host_id(), tag.id(), tag.type()); - _mysql.run_statement( - _resources_tags_insert, - database::mysql_error::store_tags_resources_tags, conn); - _add_action(conn, actions::resources_tags); - } - } - } else { - if (found != _resource_cache.end()) { - _resources_disable.bind_value_as_u64(0, found->second); - - _mysql.run_statement(_resources_disable, - database::mysql_error::clean_resources, conn); - _resource_cache.erase(found); - _add_action(conn, actions::resources); - } else { - log_v2::sql()->info( - "SQL: no need to remove host {}, it is not in database", - h.host_id()); - } - } + _process_pb_host_in_resources(h, conn); } } else log_v2::sql()->trace( @@ -1502,6 +1277,251 @@ void stream::_process_pb_host(const std::shared_ptr& d) { } } +uint64_t stream::_process_pb_host_in_resources(const Host& h, int32_t conn) { + auto found = _resource_cache.find({h.host_id(), 0}); + + uint64_t res_id = 0; + if (h.enabled()) { + uint64_t sid = 0; + fmt::string_view name{misc::string::truncate( + h.name(), get_resources_col_size(resources_name))}; + fmt::string_view address{misc::string::truncate( + h.address(), get_resources_col_size(resources_address))}; + fmt::string_view alias{misc::string::truncate( + h.alias(), get_resources_col_size(resources_alias))}; + fmt::string_view parent_name{misc::string::truncate( + h.name(), get_resources_col_size(resources_parent_name))}; + fmt::string_view notes_url{misc::string::truncate( + h.notes_url(), get_resources_col_size(resources_notes_url))}; + fmt::string_view notes{misc::string::truncate( + h.notes(), get_resources_col_size(resources_notes))}; + fmt::string_view action_url{misc::string::truncate( + h.action_url(), get_resources_col_size(resources_action_url))}; + + // INSERT + if (found == _resource_cache.end()) { + _resources_host_insert.bind_value_as_u64(0, h.host_id()); + _resources_host_insert.bind_value_as_u32(1, h.state()); + _resources_host_insert.bind_value_as_u32(2, + hst_ordered_status[h.state()]); + _resources_host_insert.bind_value_as_u64_ext( + 3u, h.last_state_change(), mapping::entry::invalid_on_zero); + _resources_host_insert.bind_value_as_bool( + 4, h.scheduled_downtime_depth() > 0); + _resources_host_insert.bind_value_as_bool( + 5, h.acknowledgement_type() != AckType::NONE); + _resources_host_insert.bind_value_as_bool( + 6, h.state_type() == Host_StateType_HARD); + _resources_host_insert.bind_value_as_u32(7, h.check_attempt()); + _resources_host_insert.bind_value_as_u32(8, h.max_check_attempts()); + _resources_host_insert.bind_value_as_u64( + 9, _cache_host_instance[h.host_id()]); + if (h.severity_id()) { + sid = _severity_cache[{h.severity_id(), 1}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "host {} with severity_id {} => uid = {}", + h.host_id(), h.severity_id(), sid); + } else + SPDLOG_LOGGER_INFO(log_v2::sql(), + "no host severity found in cache for host {}", + h.host_id()); + if (sid) + _resources_host_insert.bind_value_as_u64(10, sid); + else + _resources_host_insert.bind_null_u64(10); + _resources_host_insert.bind_value_as_str(11, name); + _resources_host_insert.bind_value_as_str(12, address); + _resources_host_insert.bind_value_as_str(13, alias); + _resources_host_insert.bind_value_as_str(14, parent_name); + _resources_host_insert.bind_value_as_str(15, notes_url); + _resources_host_insert.bind_value_as_str(16, notes); + _resources_host_insert.bind_value_as_str(17, action_url); + _resources_host_insert.bind_value_as_bool(18, h.notify()); + _resources_host_insert.bind_value_as_bool(19, h.passive_checks()); + _resources_host_insert.bind_value_as_bool(20, h.active_checks()); + _resources_host_insert.bind_value_as_u64(21, h.icon_id()); + + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _resources_host_insert, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + _add_action(conn, actions::resources); + try { + res_id = future.get(); + _resource_cache.insert({{h.host_id(), 0}, res_id}); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(log_v2::sql(), + "SQL: unable to insert new host resource {}: {}", + h.host_id(), e.what()); + + std::promise promise_resource; + std::future future_resource = + promise_resource.get_future(); + _mysql.run_query_and_get_result( + fmt::format("SELECT resource_id FROM resources WHERE " + "parent_id=0 AND id={}", + h.host_id()), + std::move(promise_resource)); + try { + mysql_result res{future_resource.get()}; + if (_mysql.fetch_row(res)) { + auto r = + _resource_cache.insert({{h.host_id(), 0}, res.value_as_u64(0)}); + found = r.first; + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "Host resource (host {}) found in database with id {}", + h.host_id(), found->second); + } else { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "Could not insert host resource in database and no host " + "resource in database with id {}: {}", + h.host_id(), e.what()); + return 0; + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(log_v2::sql(), + "No host resource in database with id {}: {}", + h.host_id(), e.what()); + return 0; + } + } + SPDLOG_LOGGER_DEBUG(log_v2::sql(), "insert resource {} for host{}", + res_id, h.host_id()); + } + if (res_id == 0) { + res_id = found->second; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), "update resource {} for host{}", + res_id, h.host_id()); + // UPDATE + _resources_host_update.bind_value_as_u32(0, h.state()); + _resources_host_update.bind_value_as_u32(1, + hst_ordered_status[h.state()]); + _resources_host_update.bind_value_as_u64_ext( + 2, h.last_state_change(), mapping::entry::invalid_on_zero); + _resources_host_update.bind_value_as_bool( + 3, h.scheduled_downtime_depth() > 0); + _resources_host_update.bind_value_as_bool( + 4, h.acknowledgement_type() != AckType::NONE); + _resources_host_update.bind_value_as_bool( + 5, h.state_type() == Host_StateType_HARD); + _resources_host_update.bind_value_as_u32(6, h.check_attempt()); + _resources_host_update.bind_value_as_u32(7, h.max_check_attempts()); + _resources_host_update.bind_value_as_u64( + 8, _cache_host_instance[h.host_id()]); + if (h.severity_id()) { + sid = _severity_cache[{h.severity_id(), 1}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "host {} with severity_id {} => uid = {}", + h.host_id(), h.severity_id(), sid); + } else + SPDLOG_LOGGER_INFO(log_v2::sql(), + "no host severity found in cache for host {}", + h.host_id()); + if (sid) + _resources_host_update.bind_value_as_u64(9, sid); + else + _resources_host_update.bind_null_u64(9); + _resources_host_update.bind_value_as_str(10, name); + _resources_host_update.bind_value_as_str(11, address); + _resources_host_update.bind_value_as_str(12, alias); + _resources_host_update.bind_value_as_str(13, parent_name); + _resources_host_update.bind_value_as_str(14, notes_url); + _resources_host_update.bind_value_as_str(15, notes); + _resources_host_update.bind_value_as_str(16, action_url); + _resources_host_update.bind_value_as_bool(17, h.notify()); + _resources_host_update.bind_value_as_bool(18, h.passive_checks()); + _resources_host_update.bind_value_as_bool(19, h.active_checks()); + _resources_host_update.bind_value_as_u64(20, h.icon_id()); + _resources_host_update.bind_value_as_u64(21, res_id); + + _mysql.run_statement(_resources_host_update, + database::mysql_error::store_host_resources, conn); + _add_action(conn, actions::resources); + } + + if (!_resources_tags_insert.prepared()) { + _resources_tags_insert = _mysql.prepare_query( + "INSERT INTO resources_tags (tag_id,resource_id) " + "VALUES(?,?)"); + } + if (!_resources_tags_remove.prepared()) + _resources_tags_remove = _mysql.prepare_query( + "DELETE FROM resources_tags WHERE resource_id=?"); + _finish_action(-1, actions::tags); + _resources_tags_remove.bind_value_as_u64(0, res_id); + _mysql.run_statement(_resources_tags_remove, + database::mysql_error::delete_resources_tags, conn); + for (auto& tag : h.tags()) { + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "add tag ({}, {}) for resource {} for host{}", + tag.id(), tag.type(), res_id, h.host_id()); + + auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); + + if (it_tags_cache == _tags_cache.end()) { + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find in cache the tag ({}, {}) for host " + "'{}': " + "trying to add it.", + tag.id(), tag.type(), h.host_id()); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); + _tag_insert_update.bind_value_as_u64(0, tag.id()); + _tag_insert_update.bind_value_as_u32(1, tag.type()); + _tag_insert_update.bind_value_as_str(2, "(unknown)"); + std::promise p; + std::future future = p.get_future(); + + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + it_tags_cache = + _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "SQL: unable to insert new tag ({},{}): {}", + tag.id(), tag.type(), e.what()); + } + } + + if (it_tags_cache != _tags_cache.end()) { + _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); + _resources_tags_insert.bind_value_as_u64(1, res_id); + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "SQL: new relation between host (resource_id: {}, host_id: " + "{}) " + "and tag ({},{},{})", + res_id, h.host_id(), it_tags_cache->second, tag.id(), tag.type()); + _mysql.run_statement(_resources_tags_insert, + database::mysql_error::store_tags_resources_tags, + conn); + _add_action(conn, actions::resources_tags); + } + } + } else { + if (found != _resource_cache.end()) { + _resources_disable.bind_value_as_u64(0, found->second); + + _mysql.run_statement(_resources_disable, + database::mysql_error::clean_resources, conn); + _resource_cache.erase(found); + _add_action(conn, actions::resources); + } else { + SPDLOG_LOGGER_INFO( + log_v2::sql(), + "SQL: no need to remove host {}, it is not in database", h.host_id()); + } + } + return res_id; +} + /** * Process an adaptive host event. * @@ -2471,245 +2491,257 @@ void stream::_process_pb_service(const std::shared_ptr& d) { _check_and_update_index_cache(s); if (_store_in_resources) { - uint64_t res_id = 0; - auto found = _resource_cache.find({s.service_id(), s.host_id()}); - - if (s.enabled()) { - uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - s.display_name(), get_resources_col_size(resources_name))}; - fmt::string_view parent_name{misc::string::truncate( - s.host_name(), get_resources_col_size(resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( - s.notes_url(), get_resources_col_size(resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( - s.notes(), get_resources_col_size(resources_notes))}; - fmt::string_view action_url{misc::string::truncate( - s.action_url(), get_resources_col_size(resources_action_url))}; - - // INSERT - if (found == _resource_cache.end()) { - _resources_service_insert.bind_value_as_u64(0, s.service_id()); - _resources_service_insert.bind_value_as_u64(1, s.host_id()); - _resources_service_insert.bind_value_as_u32(2, s.type()); - if (s.internal_id()) - _resources_service_insert.bind_value_as_u64(3, s.internal_id()); - else - _resources_service_insert.bind_null_u64(3); - _resources_service_insert.bind_value_as_u32(4, s.state()); - _resources_service_insert.bind_value_as_u32( - 5, svc_ordered_status[s.state()]); - _resources_service_insert.bind_value_as_u64_ext( - 6, s.last_state_change(), mapping::entry::invalid_on_zero); - _resources_service_insert.bind_value_as_bool( - 7, s.scheduled_downtime_depth() > 0); - _resources_service_insert.bind_value_as_bool( - 8, s.acknowledgement_type() != AckType::NONE); - _resources_service_insert.bind_value_as_bool( - 9, s.state_type() == Service_StateType_HARD); - _resources_service_insert.bind_value_as_u32(10, s.check_attempt()); - _resources_service_insert.bind_value_as_u32(11, - s.max_check_attempts()); - _resources_service_insert.bind_value_as_u64( - 12, _cache_host_instance[s.host_id()]); - if (s.severity_id() > 0) { - sid = _severity_cache[{s.severity_id(), 0}]; - log_v2::sql()->debug( - "service ({}, {}) with severity_id {} => uid = {}", s.host_id(), - s.service_id(), s.severity_id(), sid); - } - if (sid) - _resources_service_insert.bind_value_as_u64(13, sid); - else - _resources_service_insert.bind_null_u64(13); - _resources_service_insert.bind_value_as_str(14, name); - _resources_service_insert.bind_value_as_str(15, parent_name); - _resources_service_insert.bind_value_as_str(16, notes_url); - _resources_service_insert.bind_value_as_str(17, notes); - _resources_service_insert.bind_value_as_str(18, action_url); - _resources_service_insert.bind_value_as_bool(19, s.notify()); - _resources_service_insert.bind_value_as_bool(20, s.passive_checks()); - _resources_service_insert.bind_value_as_bool(21, s.active_checks()); - _resources_service_insert.bind_value_as_u64(22, s.icon_id()); - - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _resources_service_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - _add_action(conn, actions::resources); - try { - res_id = future.get(); - _resource_cache.insert({{s.service_id(), s.host_id()}, res_id}); - } catch (const std::exception& e) { - log_v2::sql()->critical( - "SQL: unable to insert new service resource ({}, {}): {}", - s.host_id(), s.service_id(), e.what()); + _process_pb_service_in_resources(s, conn); + } + } else + SPDLOG_LOGGER_TRACE( + log_v2::sql(), + "SQL: service '{}' has no host ID, service ID nor hostname, probably " + "bam fake service", + s.description()); +} - std::promise promise_resource; - std::future future_resource = - promise_resource.get_future(); - _mysql.run_query_and_get_result( - fmt::format("SELECT resource_id FROM resources WHERE " - "parent_id={} AND id={}", - s.host_id(), s.service_id()), - std::move(promise_resource)); - try { - mysql_result res{future_resource.get()}; - if (_mysql.fetch_row(res)) { - auto r = _resource_cache.insert( - {{s.service_id(), s.host_id()}, res.value_as_u64(0)}); - found = r.first; - log_v2::sql()->debug( - "Service resource ({}, {}) found in database with id {}", - s.host_id(), s.service_id(), found->second); - } else { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "Could not insert service resource in database and no " - "service resource in database with id ({},{}): {}", - s.host_id(), s.service_id(), e.what()); - return; - } - } catch (const std::exception& e) { - log_v2::sql()->critical( - "No service resource in database with id ({}, {}): {}", - s.host_id(), s.service_id(), e.what()); - return; - } - } - } - if (res_id == 0) { - res_id = found->second; - // UPDATE - _resources_service_update.bind_value_as_u32(0, s.type()); - if (s.internal_id()) - _resources_service_update.bind_value_as_u64(1, s.internal_id()); - else - _resources_service_update.bind_null_u64(1); - _resources_service_update.bind_value_as_u32(2, s.state()); - _resources_service_update.bind_value_as_u32( - 3, svc_ordered_status[s.state()]); - _resources_service_update.bind_value_as_u64_ext( - 4, s.last_state_change(), mapping::entry::invalid_on_zero); - _resources_service_update.bind_value_as_bool( - 5, s.scheduled_downtime_depth() > 0); - _resources_service_update.bind_value_as_bool( - 6, s.acknowledgement_type() != AckType::NONE); - _resources_service_update.bind_value_as_bool( - 7, s.state_type() == Service_StateType_HARD); - _resources_service_update.bind_value_as_u32(8, s.check_attempt()); - _resources_service_update.bind_value_as_u32(9, - s.max_check_attempts()); - _resources_service_update.bind_value_as_u64( - 10, _cache_host_instance[s.host_id()]); - if (s.severity_id() > 0) { - sid = _severity_cache[{s.severity_id(), 0}]; - log_v2::sql()->debug( - "service ({}, {}) with severity_id {} => uid = {}", s.host_id(), - s.service_id(), s.severity_id(), sid); +uint64_t stream::_process_pb_service_in_resources(const Service& s, + int32_t conn) { + uint64_t res_id = 0; + + auto found = _resource_cache.find({s.service_id(), s.host_id()}); + + if (s.enabled()) { + uint64_t sid = 0; + fmt::string_view name{misc::string::truncate( + s.display_name(), get_resources_col_size(resources_name))}; + fmt::string_view parent_name{misc::string::truncate( + s.host_name(), get_resources_col_size(resources_parent_name))}; + fmt::string_view notes_url{misc::string::truncate( + s.notes_url(), get_resources_col_size(resources_notes_url))}; + fmt::string_view notes{misc::string::truncate( + s.notes(), get_resources_col_size(resources_notes))}; + fmt::string_view action_url{misc::string::truncate( + s.action_url(), get_resources_col_size(resources_action_url))}; + + // INSERT + if (found == _resource_cache.end()) { + _resources_service_insert.bind_value_as_u64(0, s.service_id()); + _resources_service_insert.bind_value_as_u64(1, s.host_id()); + _resources_service_insert.bind_value_as_u32(2, s.type()); + if (s.internal_id()) + _resources_service_insert.bind_value_as_u64(3, s.internal_id()); + else + _resources_service_insert.bind_null_u64(3); + _resources_service_insert.bind_value_as_u32(4, s.state()); + _resources_service_insert.bind_value_as_u32( + 5, svc_ordered_status[s.state()]); + _resources_service_insert.bind_value_as_u64_ext( + 6, s.last_state_change(), mapping::entry::invalid_on_zero); + _resources_service_insert.bind_value_as_bool( + 7, s.scheduled_downtime_depth() > 0); + _resources_service_insert.bind_value_as_bool( + 8, s.acknowledgement_type() != AckType::NONE); + _resources_service_insert.bind_value_as_bool( + 9, s.state_type() == Service_StateType_HARD); + _resources_service_insert.bind_value_as_u32(10, s.check_attempt()); + _resources_service_insert.bind_value_as_u32(11, s.max_check_attempts()); + _resources_service_insert.bind_value_as_u64( + 12, _cache_host_instance[s.host_id()]); + if (s.severity_id() > 0) { + sid = _severity_cache[{s.severity_id(), 0}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "service ({}, {}) with severity_id {} => uid = {}", + s.host_id(), s.service_id(), s.severity_id(), sid); + } + if (sid) + _resources_service_insert.bind_value_as_u64(13, sid); + else + _resources_service_insert.bind_null_u64(13); + _resources_service_insert.bind_value_as_str(14, name); + _resources_service_insert.bind_value_as_str(15, parent_name); + _resources_service_insert.bind_value_as_str(16, notes_url); + _resources_service_insert.bind_value_as_str(17, notes); + _resources_service_insert.bind_value_as_str(18, action_url); + _resources_service_insert.bind_value_as_bool(19, s.notify()); + _resources_service_insert.bind_value_as_bool(20, s.passive_checks()); + _resources_service_insert.bind_value_as_bool(21, s.active_checks()); + _resources_service_insert.bind_value_as_u64(22, s.icon_id()); + + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _resources_service_insert, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + _add_action(conn, actions::resources); + try { + res_id = future.get(); + _resource_cache.insert({{s.service_id(), s.host_id()}, res_id}); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "SQL: unable to insert new service resource ({}, {}): {}", + s.host_id(), s.service_id(), e.what()); + + std::promise promise_resource; + std::future future_resource = + promise_resource.get_future(); + _mysql.run_query_and_get_result( + fmt::format("SELECT resource_id FROM resources WHERE " + "parent_id={} AND id={}", + s.host_id(), s.service_id()), + std::move(promise_resource)); + try { + mysql_result res{future_resource.get()}; + if (_mysql.fetch_row(res)) { + auto r = _resource_cache.insert( + {{s.service_id(), s.host_id()}, res.value_as_u64(0)}); + found = r.first; + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "Service resource ({}, {}) found in database with id {}", + s.host_id(), s.service_id(), found->second); + } else { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "Could not insert service resource in database and no " + "service resource in database with id ({},{}): {}", + s.host_id(), s.service_id(), e.what()); + return 0; } - if (sid) - _resources_service_update.bind_value_as_u64(11, sid); - else - _resources_service_update.bind_null_u64(11); - _resources_service_update.bind_value_as_str(12, name); - _resources_service_update.bind_value_as_str(13, parent_name); - _resources_service_update.bind_value_as_str(14, notes_url); - _resources_service_update.bind_value_as_str(15, notes); - _resources_service_update.bind_value_as_str(16, action_url); - _resources_service_update.bind_value_as_bool(17, s.notify()); - _resources_service_update.bind_value_as_bool(18, s.passive_checks()); - _resources_service_update.bind_value_as_bool(19, s.active_checks()); - _resources_service_update.bind_value_as_u64(20, s.icon_id()); - _resources_service_update.bind_value_as_u64(21, res_id); - - _mysql.run_statement(_resources_service_update, - database::mysql_error::store_service, conn); - _add_action(conn, actions::resources); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "No service resource in database with id ({}, {}): {}", + s.host_id(), s.service_id(), e.what()); + return 0; } + } + } + if (res_id == 0) { + res_id = found->second; + // UPDATE + _resources_service_update.bind_value_as_u32(0, s.type()); + if (s.internal_id()) + _resources_service_update.bind_value_as_u64(1, s.internal_id()); + else + _resources_service_update.bind_null_u64(1); + _resources_service_update.bind_value_as_u32(2, s.state()); + _resources_service_update.bind_value_as_u32( + 3, svc_ordered_status[s.state()]); + _resources_service_update.bind_value_as_u64_ext( + 4, s.last_state_change(), mapping::entry::invalid_on_zero); + _resources_service_update.bind_value_as_bool( + 5, s.scheduled_downtime_depth() > 0); + _resources_service_update.bind_value_as_bool( + 6, s.acknowledgement_type() != AckType::NONE); + _resources_service_update.bind_value_as_bool( + 7, s.state_type() == Service_StateType_HARD); + _resources_service_update.bind_value_as_u32(8, s.check_attempt()); + _resources_service_update.bind_value_as_u32(9, s.max_check_attempts()); + _resources_service_update.bind_value_as_u64( + 10, _cache_host_instance[s.host_id()]); + if (s.severity_id() > 0) { + sid = _severity_cache[{s.severity_id(), 0}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "service ({}, {}) with severity_id {} => uid = {}", + s.host_id(), s.service_id(), s.severity_id(), sid); + } + if (sid) + _resources_service_update.bind_value_as_u64(11, sid); + else + _resources_service_update.bind_null_u64(11); + _resources_service_update.bind_value_as_str(12, name); + _resources_service_update.bind_value_as_str(13, parent_name); + _resources_service_update.bind_value_as_str(14, notes_url); + _resources_service_update.bind_value_as_str(15, notes); + _resources_service_update.bind_value_as_str(16, action_url); + _resources_service_update.bind_value_as_bool(17, s.notify()); + _resources_service_update.bind_value_as_bool(18, s.passive_checks()); + _resources_service_update.bind_value_as_bool(19, s.active_checks()); + _resources_service_update.bind_value_as_u64(20, s.icon_id()); + _resources_service_update.bind_value_as_u64(21, res_id); + + _mysql.run_statement(_resources_service_update, + database::mysql_error::store_service, conn); + _add_action(conn, actions::resources); + } - if (!_resources_tags_insert.prepared()) { - _resources_tags_insert = _mysql.prepare_query( - "INSERT INTO resources_tags (tag_id,resource_id) VALUES(?,?)"); + if (!_resources_tags_insert.prepared()) { + _resources_tags_insert = _mysql.prepare_query( + "INSERT INTO resources_tags (tag_id,resource_id) " + "VALUES(?,?)"); + } + if (!_resources_tags_remove.prepared()) + _resources_tags_remove = _mysql.prepare_query( + "DELETE FROM resources_tags WHERE resource_id=?"); + _finish_action(-1, actions::tags); + _resources_tags_remove.bind_value_as_u64(0, res_id); + _mysql.run_statement(_resources_tags_remove, + database::mysql_error::delete_resources_tags, conn); + for (auto& tag : s.tags()) { + auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); + + if (it_tags_cache == _tags_cache.end()) { + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find in cache the tag ({}, {}) for service " + "({},{}): trying to add it.", + tag.id(), tag.type(), s.host_id(), s.service_id()); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); + _tag_insert_update.bind_value_as_u64(0, tag.id()); + _tag_insert_update.bind_value_as_u32(1, tag.type()); + _tag_insert_update.bind_value_as_str(2, "(unknown)"); + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + it_tags_cache = + _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "SQL: unable to insert new tag ({},{}): {}", + tag.id(), tag.type(), e.what()); } - if (!_resources_tags_remove.prepared()) - _resources_tags_remove = _mysql.prepare_query( - "DELETE FROM resources_tags WHERE resource_id=?"); - _finish_action(-1, actions::tags); - _resources_tags_remove.bind_value_as_u64(0, res_id); - _mysql.run_statement(_resources_tags_remove, - database::mysql_error::delete_resources_tags, - conn); - for (auto& tag : s.tags()) { - auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); - - if (it_tags_cache == _tags_cache.end()) { - log_v2::sql()->error( - "SQL: could not find in cache the tag ({}, {}) for service " - "({},{}): trying to add it.", - tag.id(), tag.type(), s.host_id(), s.service_id()); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) VALUES(?,?,?)"); - _tag_insert.bind_value_as_u64(0, tag.id()); - _tag_insert.bind_value_as_u32(1, tag.type()); - _tag_insert.bind_value_as_str(2, "(unknown)"); - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), database::mysql_task::LAST_INSERT_ID, - conn); - try { - uint64_t tag_id = future.get(); - it_tags_cache = - _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; - } catch (const std::exception& e) { - log_v2::sql()->error("SQL: unable to insert new tag ({},{}): {}", - tag.id(), tag.type(), e.what()); - } - } + } - if (it_tags_cache != _tags_cache.end()) { - _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); - _resources_tags_insert.bind_value_as_u64(1, res_id); - log_v2::sql()->debug( - "SQL: new relation between service (resource_id: {}, ({}, " - "{})) and tag ({},{})", - res_id, s.host_id(), s.service_id(), tag.id(), tag.type()); - _mysql.run_statement( - _resources_tags_insert, - database::mysql_error::store_tags_resources_tags, conn); - _add_action(conn, actions::resources_tags); - } else { - log_v2::sql()->error( - "SQL: could not find the tag ({}, {}) in cache for host '{}'", - tag.id(), tag.type(), s.service_id()); - } - } + if (it_tags_cache != _tags_cache.end()) { + _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); + _resources_tags_insert.bind_value_as_u64(1, res_id); + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "SQL: new relation between service (resource_id: {}, ({}, " + "{})) and tag ({},{})", + res_id, s.host_id(), s.service_id(), tag.id(), tag.type()); + _mysql.run_statement(_resources_tags_insert, + database::mysql_error::store_tags_resources_tags, + conn); + _add_action(conn, actions::resources_tags); } else { - if (found != _resource_cache.end()) { - _resources_disable.bind_value_as_u64(0, found->second); - - _mysql.run_statement(_resources_disable, - database::mysql_error::clean_resources, conn); - _resource_cache.erase(found); - _add_action(conn, actions::resources); - } else { - log_v2::sql()->info( - "SQL: no need to remove service ({}, {}), it is not in " - "database", - s.host_id(), s.service_id()); - } + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find the tag ({}, {}) in cache for host '{}'", + tag.id(), tag.type(), s.service_id()); } } - } else - log_v2::sql()->trace( - "SQL: service '{}' has no host ID, service ID nor hostname, probably " - "bam fake service", - s.description()); -} + } else { + if (found != _resource_cache.end()) { + _resources_disable.bind_value_as_u64(0, found->second); + _mysql.run_statement(_resources_disable, + database::mysql_error::clean_resources, conn); + _resource_cache.erase(found); + _add_action(conn, actions::resources); + } else { + SPDLOG_LOGGER_INFO( + log_v2::sql(), + "SQL: no need to remove service ({}, {}), it is not in " + "database", + s.host_id(), s.service_id()); + } + } + return res_id; +} /** * Process an adaptive service event. * @@ -2889,7 +2921,7 @@ void stream::_check_and_update_index_cache(const Service& ss) { // Create the metric mapping. auto im{std::make_shared( - info.index_id, ss.host_id(), ss.service_id())}; + info.index_id, ss.host_id(), ss.service_id())}; multiplexing::publisher pblshr; pblshr.write(im); @@ -3326,14 +3358,8 @@ void stream::_process_tag(const std::shared_ptr& d) { _finish_action(-1, actions::tags); // Prepare queries. - if (!_tag_update.prepared()) - _tag_update = _mysql.prepare_query( - "UPDATE tags SET id=?,type=?,name=? WHERE " - "tag_id=?"); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) " - "VALUES(?,?,?)"); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); if (!_tag_delete.prepared()) _tag_delete = _mysql.prepare_query("DELETE FROM resources_tags WHERE tag_id=?"); @@ -3341,54 +3367,36 @@ void stream::_process_tag(const std::shared_ptr& d) { // Processed object. auto s{static_cast(d.get())}; auto& tg = s->obj(); - uint64_t tag_id = _tags_cache[{tg.id(), tg.type()}]; int32_t conn = special_conn::tag % _mysql.connections_count(); switch (tg.action()) { case Tag_Action_ADD: - if (tag_id) { - log_v2::sql()->trace("SQL: add already existing tag {}", tg.id()); - _tag_update.bind_value_as_u64(0, tg.id()); - _tag_update.bind_value_as_u32(1, tg.type()); - _tag_update.bind_value_as_str(2, tg.name()); - _tag_update.bind_value_as_u64(3, tag_id); - _mysql.run_statement(_tag_update, database::mysql_error::store_tag, - conn); - } else { - log_v2::sql()->trace("SQL: add tag {}", tg.id()); - _tag_insert.bind_value_as_u64(0, tg.id()); - _tag_insert.bind_value_as_u32(1, tg.type()); - _tag_insert.bind_value_as_str(2, tg.name()); - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), database::mysql_task::LAST_INSERT_ID, - conn); - try { - tag_id = future.get(); - _tags_cache[{tg.id(), tg.type()}] = tag_id; - } catch (const std::exception& e) { - log_v2::sql()->error( - "unified sql: unable to insert new tag ({},{}): {}", tg.id(), - tg.type(), e.what()); - } + case Tag_Action_MODIFY: { + const char* debug_action = + tg.action() == Tag_Action_ADD ? "insert" : "update"; + SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: {} tag {}", debug_action, + tg.id()); + _tag_insert_update.bind_value_as_u64(0, tg.id()); + _tag_insert_update.bind_value_as_u32(1, tg.type()); + _tag_insert_update.bind_value_as_str(2, tg.name()); + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + _tags_cache[{tg.id(), tg.type()}] = tag_id; + SPDLOG_LOGGER_TRACE(log_v2::sql(), "new tag ({}, {}, {}) {}", tag_id, + tg.id(), tg.type(), tg.name()); + + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "unified sql: unable to {} tag ({},{}): {}", + debug_action, tg.id(), tg.type(), e.what()); } _add_action(conn, actions::tags); break; - case Tag_Action_MODIFY: - log_v2::sql()->trace("SQL: modify tag {}", tg.id()); - _tag_update.bind_value_as_u64(0, tg.id()); - _tag_update.bind_value_as_u32(1, tg.type()); - _tag_update.bind_value_as_str(2, tg.name()); - if (tag_id) { - _tag_update.bind_value_as_u64(3, tag_id); - _mysql.run_statement(_tag_update, database::mysql_error::store_tag, - conn); - _add_action(conn, actions::tags); - } else - log_v2::sql()->error( - "unified sql: unable to modify tag ({}, {}): not in cache", tg.id(), - tg.type()); - break; + } case Tag_Action_DELETE: { auto it = _tags_cache.find({tg.id(), tg.type()}); if (it != _tags_cache.end()) { diff --git a/engine/precomp_inc/precomp.hh b/engine/precomp_inc/precomp.hh index 844c52ad835..27b59558134 100644 --- a/engine/precomp_inc/precomp.hh +++ b/engine/precomp_inc/precomp.hh @@ -1,21 +1,21 @@ -/* -** Copyright 2022 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2022-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifndef CCE_PRECOMP_HH #define CCE_PRECOMP_HH @@ -64,6 +64,7 @@ #include #include +#include #include #include #include diff --git a/engine/src/anomalydetection.cc b/engine/src/anomalydetection.cc index 9f49b5af619..ab46931d2e0 100644 --- a/engine/src/anomalydetection.cc +++ b/engine/src/anomalydetection.cc @@ -1120,9 +1120,11 @@ void anomalydetection::init_thresholds() { try { t.open(_thresholds_file); } catch (const std::system_error& e) { - SPDLOG_LOGGER_ERROR(log_v2::config(), - "Fail to read thresholds file '{}' : {}", - _thresholds_file, e.code().message()); + if (!verify_config) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "Fail to read thresholds file '{}' : {}", + _thresholds_file, e.code().message()); + } return; } catch (const std::exception& e) { SPDLOG_LOGGER_ERROR(log_v2::config(), diff --git a/engine/src/configuration/object.cc b/engine/src/configuration/object.cc index 76fd084e27f..c151c628dc6 100644 --- a/engine/src/configuration/object.cc +++ b/engine/src/configuration/object.cc @@ -1,21 +1,21 @@ -/* -** Copyright 2011-2014 Merethis -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2011-2014, 2024 Merethis + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include "com/centreon/engine/configuration/object.hh" #include "com/centreon/engine/configuration/anomalydetection.hh" @@ -204,7 +204,7 @@ bool object::parse(std::string const& line) { key.assign(line, 0, pos); value.assign(line, pos + 1, std::string::npos); } - string::trim(value); + boost::algorithm::trim(value); if (!parse(key.c_str(), value.c_str())) return object::parse(key.c_str(), value.c_str()); return true; diff --git a/engine/src/string.cc b/engine/src/string.cc index 77fa2765d1b..6ed5c8b10d2 100644 --- a/engine/src/string.cc +++ b/engine/src/string.cc @@ -41,7 +41,7 @@ bool string::get_next_line(std::ifstream& stream, unsigned int& pos) { while (std::getline(stream, line, '\n')) { ++pos; - string::trim(line); + boost::algorithm::trim(line); if (!line.empty()) { char c(line[0]); if (c != '#' && c != ';' && c != '\x0') diff --git a/packaging/rpm/centreon-collect.spec b/packaging/rpm/centreon-collect.spec index 316f6e56274..f4e9b33e192 100644 --- a/packaging/rpm/centreon-collect.spec +++ b/packaging/rpm/centreon-collect.spec @@ -491,9 +491,10 @@ fi %{_exec_prefix}/lib/systemd/system/centengine.service %{_localstatedir}/log/centreon-engine/centengine.debug %{_localstatedir}/log/centreon-engine/centengine.log -%{_localstatedir}/log/centreon-engine/retention.dat %{_localstatedir}/log/centreon-engine/status.dat +%ghost +%{_localstatedir}/log/centreon-engine/retention.dat %changelog * Fri Dec 3 2021 David Boucher 22.04.0-1 diff --git a/resources/centreon_storage.sql b/resources/centreon_storage.sql index 1d31e093377..72a2576675b 100644 --- a/resources/centreon_storage.sql +++ b/resources/centreon_storage.sql @@ -765,7 +765,7 @@ DROP TABLE IF EXISTS `metrics`; CREATE TABLE `metrics` ( `metric_id` int(11) NOT NULL AUTO_INCREMENT, `index_id` bigint unsigned DEFAULT NULL, - `metric_name` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `metric_name` varchar(1021) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `data_source_type` enum('0','1','2','3') DEFAULT NULL, `unit_name` varchar(32) DEFAULT NULL, `current_value` float DEFAULT NULL, diff --git a/tests/bam/bam_pb.robot b/tests/bam/bam_pb.robot index d7adf35ffad..af3f30e1210 100644 --- a/tests/bam/bam_pb.robot +++ b/tests/bam/bam_pb.robot @@ -19,7 +19,7 @@ Test Teardown Save Logs If Failed *** Test Cases *** BAPBSTATUS [Documentation] With bbdo version 3.0.0, a BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. - [Tags] broker downtime engine bam + [Tags] broker downtime engine bam MON-35113 BAM Init @{svc} Set Variable ${{ [("host_16", "service_314")] }} @@ -58,6 +58,43 @@ BAPBSTATUS ... SELECT current_level, acknowledged, downtime, in_downtime, current_status FROM mod_bam WHERE name='test' Should Be Equal As Strings ${output} ((100.0, 0.0, 0.0, 0, 2),) + # check broker stats + ${res} Get Broker Stats central 1: 127.0.0.1:[0-9]+ 10 endpoint central-broker-master-input peers + Should Be True ${res} no central-broker-master-input.peers found in broker stat output + + ${res} Get Broker Stats central listening 10 endpoint central-broker-master-input state + Should Be True ${res} central-broker-master-input not listening + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-monitoring state + Should Be True ${res} central-bam-monitoring not connected + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-reporting state + Should Be True ${res} central-bam-reporting not connected + + Reload Engine + Reload Broker + + # check broker stats + ${res} Get Broker Stats central 1: 127.0.0.1:[0-9]+ 10 endpoint central-broker-master-input peers + Should Be True ${res} no central-broker-master-input.peers found in broker stat output + + ${res} Get Broker Stats central listening 10 endpoint central-broker-master-input state + Should Be True ${res} central-broker-master-input not listening + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-monitoring state + Should Be True ${res} central-bam-monitoring not connected + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-reporting state + Should Be True ${res} central-bam-reporting not connected + + # Little check of the GetBa gRPC command + ${result} Run Keyword And Return Status File Should Exist /tmp/output + Run Keyword If ${result} is True Remove File /tmp/output + Broker Get Ba 51001 1 /tmp/output + Wait Until Created /tmp/output + ${result} Grep File /tmp/output digraph + Should Not Be Empty ${result} /tmp/output does not contain the word 'digraph' + Stop Engine Kindly Stop Broker diff --git a/tests/broker-engine/anomaly-detection.robot b/tests/broker-engine/anomaly-detection.robot index 38e8a9617e8..82fe3afc128 100644 --- a/tests/broker-engine/anomaly-detection.robot +++ b/tests/broker-engine/anomaly-detection.robot @@ -35,6 +35,26 @@ ANO_NOFILE Stop Broker Stop Engine +ANO_NOFILE_VERIF_CONFIG_NO_ERROR + [Documentation] an anomaly detection without threshold file doesn't display error on config check + [Tags] broker engine anomaly MON-35578 + Config Engine ${1} ${50} ${20} + Create Anomaly Detection ${0} ${1} ${1} metric + Remove File /tmp/anomaly_threshold.json + Clear Retention + Create Directory ${ENGINE_LOG}/config0 + Start Process + ... /usr/sbin/centengine + ... -v + ... ${EtcRoot}/centreon-engine/config0/centengine.cfg + ... alias=e0 + ... stderr=${engineLog0} + ... stdout=${engineLog0} + ${result} Wait For Process e0 30 + Should Be Equal As Integers ${result.rc} 0 engine not gracefully stopped + ${content} Grep File ${engineLog0} Fail to read thresholds file + Should Be True len("""${content}""") < 2 anomalydetection error message must not be found + ANO_TOO_OLD_FILE [Documentation] an anomaly detection with an oldest threshold file must be in unknown state [Tags] broker engine anomaly diff --git a/tests/broker-engine/services-and-bulk-stmt.robot b/tests/broker-engine/services-and-bulk-stmt.robot index 59074d784ad..f08a7b6de14 100644 --- a/tests/broker-engine/services-and-bulk-stmt.robot +++ b/tests/broker-engine/services-and-bulk-stmt.robot @@ -7,6 +7,8 @@ Library OperatingSystem Library DateTime Library Collections Library DatabaseLibrary +Library String +Library Examples Library ../resources/Engine.py Library ../resources/Broker.py Library ../resources/Common.py @@ -277,6 +279,62 @@ EBPS2 Stop Engine Start Mysql +Services_and_bulks_${id} + [Documentation] One service is configured with one metric with a name of 150 to 1021 characters. + [Tags] broker engine services unified_sql benchmark MON-32951 + Clear Metrics + Config Engine ${1} ${1} ${1} + # We want all the services to be passive to avoid parasite checks during our test. + ${random_string} Generate Random String ${metric_num_char} [LOWER] + Set Services passive ${0} service_.* + Config Broker central + Config Broker rrd + Config Broker module ${1} + Broker Config Add Item module0 bbdo_version 3.0.1 + Broker Config Add Item central bbdo_version 3.0.1 + Broker Config Log central core error + Broker Config Log central tcp error + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + Broker Config Source Log central 1 + + Config Broker Remove Rrd Output central + Clear Retention + Clear Db metrics + + ${start} Get Current Date + Start Broker + Start Engine + Broker Set Sql Manager Stats 51001 5 5 + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${start_1} Get Round Current Date + + Process Service Check result with metrics + ... host_1 + ... service_${1} + ... ${1} + ... warning${0} + ... 1 + ... config0 + ... ${random_string} + + ${content} Create List perfdata on connection + ${log} Catenate SEPARATOR= ${BROKER_LOG} /central-broker-master.log + ${result} Find In Log With Timeout ${log} ${start_1} ${content} 60 + Should Be True ${result} A message fail to handle a metric with ${metric_num_char} characters. + + ${metrics} Get Metrics For Service 1 ${random_string}0 + Should Not Be Equal ${metrics} ${None} no metric found for service + + Examples: id metric_num_char -- + ... 1 1020 + ... 2 150 + *** Keywords *** Test Clean diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index 236548de8e1..bda45e7b218 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -59,7 +59,7 @@ EBNSVC1 Service_increased_huge_check_interval [Documentation] New services with high check interval at creation time. - [Tags] broker engine services protobuf + [Tags] broker engine services protobuf MON-32951 Config Engine ${1} ${10} ${10} Config Broker rrd Config Broker central @@ -140,7 +140,7 @@ Service_increased_huge_check_interval ${metrics} Get Metrics For Service ${new_service_id} - Should Not Be Equal ${metrics} None no metric found for service ${new_service_id} + Should Not Be Equal ${metrics} ${None} no metric found for service ${new_service_id} FOR ${m} IN @{metrics} ${result} Wait Until File Modified ${VarRoot}/lib/centreon/metrics/${m}.rrd ${start} diff --git a/tests/broker-engine/tags.robot b/tests/broker-engine/tags.robot index 12e14688885..2b1025b23ee 100644 --- a/tests/broker-engine/tags.robot +++ b/tests/broker-engine/tags.robot @@ -12,14 +12,14 @@ Library ../resources/specific-duplication.py Suite Setup Clean Before Suite Suite Teardown Clean After Suite -Test Setup Stop Processes -Test Teardown Save logs If Failed +Test Setup Init Test +Test Teardown Stop Engine Broker And Save Logs *** Test Cases *** BETAG1 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -30,17 +30,22 @@ BETAG1 Broker Config Log central sql debug Clear Retention Start Broker + ${start} Get Current Date Start Engine - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BETAG2 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -51,18 +56,23 @@ BETAG2 Broker Config Log central sql debug Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG1 [Documentation] Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. - [Tags] broker engine protobuf bbdo tags unified_sql + [Tags] broker engine protobuf bbdo tags unified_sql MON-32811 Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -70,24 +80,27 @@ BEUTAG1 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Start Broker + ${start} Get Current Date Start Engine - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG2 [Documentation] Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. - [Tags] broker engine protobuf bbdo tags unified_sql + [Tags] broker engine protobuf bbdo tags unified_sql MON-32811 Clear Db resources Config Engine ${1} Create Tags File ${0} ${20} @@ -96,9 +109,7 @@ BEUTAG2 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Output Set central central-broker-unified-sql connections_count 1 Broker Config Output Set central central-broker-unified-sql queries_per_transaction 1 Broker Config Output Set central central-broker-unified-sql read_timeout 1 @@ -107,24 +118,33 @@ BEUTAG2 Broker Config Log central sql error Clear Retention Start Broker + ${start} Get Current Date Start Engine - Sleep 1s - ${svc}= Create Service ${0} 1 1 + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${svc} Create Service ${0} 1 1 Add Tags To Services ${0} group_tags 4 [${svc}] Stop Engine + ${start} Get Current Date Start Engine Reload Broker - Sleep 2s - ${result}= check resources tags With Timeout 1 ${svc} servicegroup [4] 60 - Should Be True ${result} msg=New service should have a service group tag of id 4. - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 1 ${svc} servicegroup [4] 60 + Should Be True ${result} New service should have a service group tag of id 4. BEUTAG3 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. - [Tags] broker engine protobuf bbdo tags unified_sql + [Tags] broker engine protobuf bbdo tags unified_sql MON-32811 Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -132,26 +152,29 @@ BEUTAG3 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG4 [Documentation] Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. - [Tags] broker engine protobuf bbdo tags unified_sql - # Clear DB tags + [Tags] broker engine protobuf bbdo tags unified_sql MON-32811 + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -161,30 +184,33 @@ BEUTAG4 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention + ${start} Get Current Date Start Engine Sleep 1s Start Broker - ${result}= check resources tags With Timeout 1 1 servicegroup [4, 5] 60 - Should Be True ${result} msg=Service (1, 1) should have servicegroup tag ids 4 and 5 - ${result}= check resources tags With Timeout 1 3 servicegroup [4, 5] 60 - Should Be True ${result} msg=Service (1, 3) should have servicegroup tag ids 4, 5 - ${result}= check resources tags With Timeout 1 3 servicecategory [2, 4] 60 - Should Be True ${result} msg=Service (1, 3) should have servicecategory tag ids 2, 4 - ${result}= check resources tags With Timeout 1 5 servicecategory [2, 4] 60 - Should Be True ${result} msg=Service (1, 5) should have servicecategory tag ids 2, 4 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 1 1 servicegroup [4, 5] 60 + Should Be True ${result} Service (1, 1) should have servicegroup tag ids 4 and 5 + ${result} Check Resources Tags With Timeout 1 3 servicegroup [4, 5] 60 + Should Be True ${result} Service (1, 3) should have servicegroup tag ids 4, 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [2, 4] 60 + Should Be True ${result} Service (1, 3) should have servicecategory tag ids 2, 4 + ${result} Check Resources Tags With Timeout 1 5 servicecategory [2, 4] 60 + Should Be True ${result} Service (1, 5) should have servicecategory tag ids 2, 4 BEUTAG5 [Documentation] Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. - [Tags] broker engine protobuf bbdo tags - # Clear DB tags + [Tags] broker engine protobuf bbdo tags MON-32811 + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -194,30 +220,33 @@ BEUTAG5 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 2 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 2 should have hostcategory tags 2 and 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 3 should have hostcategory tags 2 and 3 - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 + Should Be True ${result} Host 1 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 + Should Be True ${result} Host 2 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2, 3] 60 + Should Be True ${result} Host 2 should have hostcategory tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2, 3] 60 + Should Be True ${result} Host 3 should have hostcategory tags 2 and 3 BEUTAG6 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. - [Tags] broker engine protobuf bbdo tags - # Clear DB tags + [Tags] broker engine protobuf bbdo tags MON-32811 + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -229,29 +258,32 @@ BEUTAG6 Config Broker rrd Config Broker module ${1} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - ${result}= check resources tags With Timeout 0 1 hostgroup [2,4] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tag_id 2 and 4 - ${result}= check resources tags With Timeout 0 1 hostcategory [1,5] 60 - Should Be True ${result} msg=Host 1 should have hostcategory tag_id 1 and 5 - ${result}= check resources tags With Timeout 1 1 servicegroup [2,4] 60 - Should Be True ${result} msg=Service (1, 1) should have servicegroup tag_id 2 and 4. - ${result}= check resources tags With Timeout 1 1 servicecategory [3,5] 60 - Should Be True ${result} msg=Service (1, 1) should have servicecategory tag_id 3 and 5. - Stop Engine - Kindly Stop Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,4] 60 + Should Be True ${result} Host 1 should have hostgroup tag_id 2 and 4 + ${result} Check Resources Tags With Timeout 0 1 hostcategory [1,5] 60 + Should Be True ${result} Host 1 should have hostcategory tag_id 1 and 5 + ${result} Check Resources Tags With Timeout 1 1 servicegroup [2,4] 60 + Should Be True ${result} Service (1, 1) should have servicegroup tag_id 2 and 4. + ${result} Check Resources Tags With Timeout 1 1 servicecategory [3,5] 60 + Should Be True ${result} Service (1, 1) should have servicecategory tag_id 3 and 5. BEUTAG7 - [Documentation] some services are configured and deleted with tags on two pollers. - [Tags] broker engine protobuf bbdo tags + [Documentation] Some services are configured with tags on two pollers. Then tags configuration is modified. + [Tags] broker engine protobuf bbdo tags unstable MON-32811 Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -267,25 +299,28 @@ BEUTAG7 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention + ${start} Get Current Date Start Engine Start Broker - Sleep 5s - # We need to wait a little before reloading Engine - ${result}= check resources tags With Timeout 1 1 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 1) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 26 502 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 502) should have servicecategory tags 13, 9, 3 and 11. - ${result}= check resources tags With Timeout 26 502 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 502) should have servicegroup tags 3 and 5. + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + # We check in the DB if the service (1,1) has well its servicegroup tags configured. + ${result} Check Resources Tags With Timeout 1 1 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 1) should have servicegroup tags 2 and 4 + + ${result} Check Resources Tags With Timeout 26 502 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 502) should have servicecategory tags 13, 9, 3 and 11. + ${result} Check Resources Tags With Timeout 26 502 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 502) should have servicegroup tags 3 and 5. Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags @@ -294,21 +329,23 @@ BEUTAG7 Create Tags File ${0} ${18} Create Tags File ${1} ${18} Add Tags To Services ${1} group_tags 3,5 [505, 506, 507, 508] + ${start} Get Round Current Date Reload Engine Reload Broker - Sleep 3s - ${result}= check resources tags With Timeout 26 507 servicegroup [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 507) should have servicegroup tags 3 and 5 + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 26 508 servicegroup [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 508) should have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 507 servicegroup [3,5] 60 + Should Be True ${result} Second step: Service (26, 507) should have servicegroup tags 3 and 5 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 508 servicegroup [3,5] 60 + Should Be True ${result} Second step: Service (26, 508) should have servicegroup tags 3 and 5 BEUTAG8 [Documentation] Services have tags provided by templates. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${2} Create Tags File ${0} ${40} Create Tags File ${1} ${40} @@ -333,38 +370,38 @@ BEUTAG8 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention + ${start} Get Current Date Start Engine Start Broker - Sleep 5s - # We need to wait a little before reloading Engine - ${result}= check resources tags With Timeout 1 2 servicecategory [3,5] 60 - Should Be True ${result} msg=First step: Service (1, 2) should have servicecategory tags 3 and 5. - ${result}= check resources tags With Timeout 1 2 servicegroup [1] 60 - Should Be True ${result} msg=First step: Service (1, 2) should have servicegroup tag 1. - ${result}= check resources tags With Timeout 1 5 servicegroup [9] 60 - Should Be True ${result} msg=First step: Service (1, 5) should have servicegroup tag 9 + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 26 502 servicegroup [1,4,5] 60 - Should Be True ${result} msg=First step: Service (26, 502) should have tags 1, 4 and 5 + # We need to wait a little before reloading Engine + ${result} Check Resources Tags With Timeout 1 2 servicecategory [3,5] 60 + Should Be True ${result} First step: Service (1, 2) should have servicecategory tags 3 and 5. + ${result} Check Resources Tags With Timeout 1 2 servicegroup [1] 60 + Should Be True ${result} First step: Service (1, 2) should have servicegroup tag 1. - ${result}= check resources tags With Timeout 26 503 servicegroup [7] 60 - Should Be True ${result} msg=First step: Service (26, 503) should have servicegroup tag 7 + ${result} Check Resources Tags With Timeout 1 5 servicegroup [9] 60 + Should Be True ${result} First step: Service (1, 5) should have servicegroup tag 9 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 502 servicegroup [1,4,5] 60 + Should Be True ${result} First step: Service (26, 502) should have tags 1, 4 and 5 + + ${result} Check Resources Tags With Timeout 26 503 servicegroup [7] 60 + Should Be True ${result} First step: Service (26, 503) should have servicegroup tag 7 BEUTAG9 [Documentation] hosts have tags provided by templates. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${2} Create Tags File ${0} ${40} Create Tags File ${1} ${40} @@ -385,49 +422,49 @@ BEUTAG9 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - # We need to wait a little before reloading Engine - ${result}= check resources tags With Timeout 0 9 hostgroup [2] 60 - Should Be True ${result} msg=First step: resource 9 should have hostgroup tag with id=2 + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 0 10 hostgroup [2] 60 - Should Be True ${result} msg=First step: resource 10 should have hostgroup tag with id=2 + # We need to wait a little before reloading Engine + ${result} Check Resources Tags With Timeout 0 9 hostgroup [2] 60 + Should Be True ${result} First step: resource 9 should have hostgroup tag with id=2 - ${result}= check resources tags With Timeout 0 11 hostgroup [6] 60 - Should Be True ${result} msg=First step: resource 11 should have hostgroup tag with id=6 + ${result} Check Resources Tags With Timeout 0 10 hostgroup [2] 60 + Should Be True ${result} First step: resource 10 should have hostgroup tag with id=2 - ${result}= check resources tags With Timeout 0 12 hostgroup [6] 60 - Should Be True ${result} msg=First step: resource 12 should have hostgroup tag with id=6 + ${result} Check Resources Tags With Timeout 0 11 hostgroup [6] 60 + Should Be True ${result} First step: resource 11 should have hostgroup tag with id=6 - ${result}= check resources tags With Timeout 0 30 hostgroup [8] 60 - Should Be True ${result} msg=First step: resource 30 should have hostgroup tag with id=10 + ${result} Check Resources Tags With Timeout 0 12 hostgroup [6] 60 + Should Be True ${result} First step: resource 12 should have hostgroup tag with id=6 - ${result}= check resources tags With Timeout 0 31 hostgroup [8] 60 - Should Be True ${result} msg=First step: resource 31 should have hostgroup tag with id=10 + ${result} Check Resources Tags With Timeout 0 30 hostgroup [8] 60 + Should Be True ${result} First step: resource 30 should have hostgroup tag with id=10 - ${result}= check resources tags With Timeout 0 32 hostgroup [9] 60 - Should Be True ${result} msg=First step: resource 32 should have hostgroup tag with id=14 + ${result} Check Resources Tags With Timeout 0 31 hostgroup [8] 60 + Should Be True ${result} First step: resource 31 should have hostgroup tag with id=10 - ${result}= check resources tags With Timeout 0 33 hostgroup [9] 60 - Should Be True ${result} msg=First step: host 33 should have hostgroup tag with id=14 + ${result} Check Resources Tags With Timeout 0 32 hostgroup [9] 60 + Should Be True ${result} First step: resource 32 should have hostgroup tag with id=14 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 0 33 hostgroup [9] 60 + Should Be True ${result} First step: host 33 should have hostgroup tag with id=14 BEUTAG10 [Documentation] some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -443,25 +480,29 @@ BEUTAG10 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention + ${start} Get Current Date Start Engine Start Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 4) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 - Should Be True ${result} msg=First step: Service (1, 3) should have servicecategory tags 3 and 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 504) should have servicegroup tags 3 and 5. - ${result}= check resources tags With Timeout 26 503 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 503) should have servicecategory tags 2 and 4. + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 4) should have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 + Should Be True ${result} First step: Service (1, 3) should have servicecategory tags 3 and 5 + + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 504) should have servicegroup tags 3 and 5. + ${result} Check Resources Tags With Timeout 26 503 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 503) should have servicecategory tags 2 and 4. Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags @@ -475,24 +516,21 @@ BEUTAG10 Add Tags To Services ${1} category_tags 2,4 [501, 502, 504] Reload Engine Reload Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 False - Should Be True ${result} msg=Second step: Service (1, 4) should not have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 False + Should Be True ${result} Second step: Service (1, 4) should not have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 False - Should Be True ${result} msg=Second step: Service (1, 3) should not have servicecategory tags 3 and 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 False + Should Be True ${result} Second step: Service (1, 3) should not have servicecategory tags 3 and 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 504) should not have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 False + Should Be True ${result} Second step: Service (26, 504) should not have servicegroup tags 3 and 5 - ${result}= check resources tags With Timeout 26 503 servicecategory [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 503) should not have servicecategory tags 3 and 5 - - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 503 servicecategory [3,5] 60 False + Should Be True ${result} Second step: Service (26, 503) should not have servicecategory tags 3 and 5 BEUTAG11 [Documentation] some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. - [Tags] broker engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags MON-32811 Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -508,57 +546,59 @@ BEUTAG11 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention + ${start} Get Current Date Start Engine Start Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 4) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 - Should Be True ${result} msg=First step: Service (1, 3) should have servicecategory tags 3 and 5 - - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 504) should have servicegroup tags 3 and 5. - ${result}= check resources tags With Timeout 26 503 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 503) should have servicecategory tags 2 and 4. + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 4) should have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 + Should Be True ${result} First step: Service (1, 3) should have servicecategory tags 3 and 5 + + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 504) should have servicegroup tags 3 and 5. + ${result} Check Resources Tags With Timeout 26 503 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 503) should have servicecategory tags 2 and 4. + Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags Remove Tags From Services ${1} group_tags Remove Tags From Services ${1} category_tags Create Tags File ${0} ${18} - Create Tags File ${1} ${18} Add Tags To Services ${0} group_tags 2,4 [1, 2, 3, 4] Add Tags To Services ${0} category_tags 3 [1, 2, 3, 4] Add Tags To Services ${1} group_tags 3,5 [501, 502, 503] Add Tags To Services ${1} category_tags 2,4 [501, 502, 504] + log to console toto0 Reload Engine + log to console toto1 Reload Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=Second step: Service (1, 4) should not have servicegroup tags 2 and 4 - - ${result}= check resources tags With Timeout 1 3 servicecategory [5] 60 False - Should Be True ${result} msg=Second step: Service (1, 3) should not have servicecategory tags 5 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} Second step: Service (1, 4) should not have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 504) should not have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [5] 60 False + Should Be True ${result} Second step: Service (1, 3) should not have servicecategory tags 5 - ${result}= check resources tags With Timeout 26 503 servicecategory [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 503) should not have servicecategory tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 False + Should Be True ${result} Second step: Service (26, 504) should not have servicegroup tags 3 and 5 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 503 servicecategory [3,5] 60 + Should Be True ${result} Second step: Service (26, 503) should not have servicecategory tags 3 and 5 BEUTAG12 [Documentation] Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. - [Tags] broker engine protobuf bbdo tags - # Clear DB tags + [Tags] broker engine protobuf bbdo tags MON-32811 + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -568,23 +608,28 @@ BEUTAG12 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s + ${start} Get Current Date Start Engine Start Broker - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 2 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 2 should have hostcategory tags 2 and 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 3 should have hostcategory tags 2 and 3 + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 + Should Be True ${result} Host 1 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 + Should Be True ${result} Host 2 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2, 3] 60 + Should Be True ${result} Host 2 should have hostcategory tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2, 3] 60 + Should Be True ${result} Host 3 should have hostcategory tags 2 and 3 Remove Tags From Hosts ${0} group_tags Remove Tags From Hosts ${0} category_tags @@ -594,16 +639,80 @@ BEUTAG12 Reload Engine Reload Broker - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 False - Should Be True ${result} msg=Host 1 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 False - Should Be True ${result} msg=Host 2 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 2 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 3 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 4 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 4 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 False + Should Be True ${result} Host 1 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 False + Should Be True ${result} Host 2 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2,3] 60 False + Should Be True ${result} Host 2 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2,3] 60 False + Should Be True ${result} Host 3 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 4 hostcategory [2,3] 60 False + Should Be True ${result} Host 4 should not have hostgroup tags 2 nor 3 + +BEUTAG_REMOVE_HOST_FROM_HOSTGROUP + [Documentation] remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error + [Tags] broker engine tags MON-32811 + Clear Db tags + Config Engine ${1} + Create Tags File ${0} ${3} ${0} hostgroup + Config Engine Add Cfg File ${0} tags.cfg + Add Tags To Hosts ${0} group_tags 2 1 + Add Tags To Hosts ${0} group_tags 1 4 + Config Broker central + Config Broker rrd + Config Broker module + Config Broker Sql Output central unified_sql + Config BBDO3 1 + Broker Config Log module0 neb debug + Broker Config Log central sql trace + Broker Config Log central perfdata trace + Clear Retention + Sleep 1s + ${start} Get Current Date + Start Engine + Start Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2] 60 True + Should Be True ${result} Host 1 should not have hostgroup tags 2 + + ${content} Create List unified_sql: end check_queue + ${result} Find In Log With Timeout ${centralLog} ${start} ${content} 60 + Should Be True ${result} A message unified_sql: end check_queue should be available. + + Engine Config Remove Service Host ${0} host_1 + Engine Config Remove Host 0 host_1 + Engine Config Remove Tag 0 2 + Reload Engine + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2] 60 False + Should Be True ${result} Host 1 should not have hostgroup tags 2 + + # wait for commits + ${start} Get Current Date + ${content} Create List unified_sql: end check_queue + ${result} Find In Log With Timeout ${centralLog} ${start} ${content} 60 + Should Be True ${result} A message unified_sql: end check_queue should be available. + + Sleep 5 + + Create Tags File ${0} ${3} ${0} hostgroup + Add Tags To Hosts ${0} group_tags 2 [2,3] + Reload Engine + + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2] 60 True + Should Be True ${result} Host 2 should have hostgroup tags 2 + + ${result} Check Resources Tags With Timeout 0 3 hostgroup [2] 60 True + Should Be True ${result} Host 3 should have hostgroup tags 2 + + +*** Keywords *** +Init Test + Stop Processes + Truncate Resource Host Service diff --git a/tests/engine/forced_checks.robot b/tests/engine/forced_checks.robot index 676fb196c2f..a0f84f45a22 100644 --- a/tests/engine/forced_checks.robot +++ b/tests/engine/forced_checks.robot @@ -275,3 +275,39 @@ EMACROS_NOTIF Stop Engine Kindly Stop Broker + + +EMACROS_SEMICOLON + [Documentation] Macros with a semicolon are used even if they contain a semicolon. + [Tags] engine external_cmd macros MON-35558 + Config Engine ${1} + Config Broker central + Config Broker rrd + Config Broker module ${1} + Engine Config Set Value ${0} log_legacy_enabled ${0} + Engine Config Set Value ${0} log_v2_enabled ${1} + Engine Config Set Value 0 log_level_checks trace True + Engine Config Set Value In Hosts 0 host_1 _KEY2 VAL1;val3; + Engine Config Change Command + ... 0 + ... \\d+ + ... /bin/echo "KEY2=$_HOSTKEY2$" + Clear Retention + ${start} Get Current Date + Start Engine + Start Broker + + ${content} Create List INITIAL HOST STATE: host_1; + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True + ... ${result} + ... An Initial host state on host_1 should be raised before we can start our external commands. + Schedule Forced Svc Check host_1 service_1 + Sleep 5s + + ${content} Create List KEY2=VAL1;val3; + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} VAL1;val3; not found in log. + + Stop Engine + Kindly Stop Broker diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 90125cdaf40..bb1a02ec30a 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -1,14 +1,10 @@ from os import makedirs -from os.path import exists, dirname +from os.path import exists import pymysql.cursors import time import re import shutil import psutil -import socket -import sys -import time -from datetime import datetime from subprocess import getoutput import subprocess as subp from robot.api import logger @@ -500,7 +496,7 @@ def add_broker_crypto(json_dict, add_cert: bool, only_ca_cert: bool): json_dict["encryption"] = "yes" if (add_cert): json_dict["ca_certificate"] = "/tmp/ca_1234.crt" - if (only_ca_cert == False): + if not only_ca_cert: json_dict["public_cert"] = "/tmp/server_1234.crt" json_dict["private_key"] = "/tmp/server_1234.key" @@ -1195,7 +1191,7 @@ def get_not_existing_metrics(count: int): index = 1 retval = [] while len(retval) < count: - if not index in inter: + if index not in inter: retval.append(index) index += 1 return retval @@ -1581,7 +1577,6 @@ def remove_graphs_from_db(indexes, metrics, timeout=10): charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) - ids_db = [] with connection: with connection.cursor() as cursor: if len(indexes) > 0: @@ -1613,7 +1608,6 @@ def rebuild_rrd_graphs(port, indexes, timeout: int = TIMEOUT): time.sleep(1) with grpc.insecure_channel("127.0.0.1:{}".format(port)) as channel: stub = broker_pb2_grpc.BrokerStub(channel) - k = 0.0 idx = broker_pb2.IndexIds() idx.index_ids.extend(indexes) try: @@ -1636,7 +1630,6 @@ def rebuild_rrd_graphs_from_db(indexes): charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) - ids_db = [] with connection: with connection.cursor() as cursor: if len(indexes) > 0: @@ -1685,9 +1678,9 @@ def compare_rrd_average_value_with_grpc(metric, key, value: float): ) lst = res.split('\n') if len(lst) >= 2: - for l in lst: - if key in l: - last_update = int(l.split('=')[1]) + for line in lst: + if key in line: + last_update = int(line.split('=')[1]) logger.console(f"{key}: {last_update}") return last_update == value*60 else: @@ -2006,3 +1999,50 @@ def broker_get_ba(port: int, ba_id: int, output_file: str, timeout=TIMEOUT): except: logger.console("gRPC server not ready") return res + +def get_broker_stats(name: str, expected:str, timeout: int, *keys): + """! + read a value from broker stats + @param name central, module or rrd + @param expected: value expected (regexp) + @timeout delay to find key in stats + @param keys keys in json stats output + @return True if value found and matches expected + """ + + def json_get(json_dict, keys: tuple, index: int): + try: + key = keys[index] + if index == len(keys) -1: + return json_dict[key] + else: + return json_get(json_dict[key], keys, index + 1) + except: + return None + limit = time.time() + timeout + if name == 'central': + filename = "central-broker-master-stats.json" + elif name == 'module': + filename = "central-module-master-stats.json" + else: + filename = "central-rrd-master-stats.json" + r_expected = re.compile(expected) + while time.time() < limit: + retry = True + while retry and time.time() < limit: + retry = False + with open(f"{VAR_ROOT}/lib/centreon-broker/{filename}", "r") as f: + buf = f.read() + try: + conf = json.loads(buf) + except: + retry = True + time.sleep(1) + if conf is None: + continue + value = json_get(conf, keys, 0) + if value is not None and r_expected.match(value): + return True + time.sleep(5) + logger.console(f"key:{keys} value not expected: {value}") + return False diff --git a/tests/resources/Common.py b/tests/resources/Common.py index 45c63551f03..a797624a090 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -431,6 +431,26 @@ def set_command_status(cmd, status): f.close() +def truncate_resource_host_service(): + """! + clear resources_tags resources hosts and services tables + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + autocommit=True, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + with connection: + with connection.cursor() as cursor: + cursor.execute("DELETE FROM resources_tags") + cursor.execute("DELETE FROM resources") + cursor.execute("DELETE FROM hosts") + cursor.execute("DELETE FROM services") + + def check_service_resource_status_with_timeout(hostname: str, service_desc: str, status: int, timeout: int, state_type: str = "SOFT"): limit = time.time() + timeout while time.time() < limit: diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 2fddffb5ef4..77d47948b83 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -377,7 +377,7 @@ def create_template_file(poller: int, typ: str, what: str, ids): ff.close() @staticmethod - def create_tags(poller: int, nb: int, offset: int): + def create_tags(poller: int, nb: int, offset: int, tag_type: str): tt = ["servicegroup", "hostgroup", "servicecategory", "hostcategory"] config_file = "{}/config{}/tags.cfg".format(CONF_DIR, poller) @@ -385,9 +385,13 @@ def create_tags(poller: int, nb: int, offset: int): content = "" tid = 0 for i in range(nb): - if i % 4 == 0: + if not tag_type: + if i % 4 == 0: + tid += 1 + typ = tt[i % 4] + else: + typ = tag_type tid += 1 - typ = tt[i % 4] content += """define tag {{ id {0} name tag{2} @@ -780,10 +784,9 @@ def engine_config_remove_service_host(idx: int, host: str): def engine_config_remove_host(idx: int, host: str): - filename = ETC_ROOT + "/centreon-engine/config{}/services.cfg".format(idx) - f = open(filename, "r") - lines = f.readlines() - f.close() + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/hosts.cfg" + with open(filename, "r") as f: + lines = f.readlines() host_name = re.compile(r"^\s*host_name\s+" + host + "\s*$") host_begin = re.compile(r"^define host {$") @@ -1489,8 +1492,42 @@ def create_template_file(poller: int, typ: str, what: str, ids: list): engine.create_template_file(poller, typ, what, ids) -def create_tags_file(poller: int, nb: int, offset: int = 1): - engine.create_tags(poller, nb, offset) +def create_tags_file(poller: int, nb: int, offset: int = 1, tag_type: str = ""): + engine.create_tags(poller, nb, offset, tag_type) + + +def engine_config_remove_tag(poller: int, tag_id: int): + """! remove tags from tags.cfg where tag id = tag_id + @param poller poller index + @param tag_id id of the tag to remove + """ + filename = f"{CONF_DIR}/config{poller}/tags.cfg" + with open(filename, "r") as ff: + lines = ff.readlines() + + tag_name = re.compile(f"^\s*id\s+{tag_id}\s*$") + tag_begin = re.compile(r"^define tag {$") + tag_end = re.compile(r"^}$") + tag_begin_idx = 0 + while tag_begin_idx < len(lines): + if (tag_begin.match(lines[tag_begin_idx])): + for tag_line_idx in range(tag_begin_idx, len(lines)): + if (tag_name.match(lines[tag_line_idx])): + for end_tag_line in range(tag_line_idx, len(lines)): + if tag_end.match(lines[end_tag_line]): + del lines[tag_begin_idx:end_tag_line + 1] + break + break + elif tag_end.match(lines[tag_line_idx]): + tag_begin_idx = tag_line_idx + break + else: + tag_begin_idx = tag_begin_idx + 1 + + f = open(filename, "w") + f.writelines(lines) + f.close() + def config_engine_add_cfg_file(poller: int, cfg: str): @@ -1755,21 +1792,51 @@ def wrapper(*args): return wrapper -def process_service_check_result_with_metrics(hst: str, svc: str, state: int, output: str, metrics: int, config='config0'): +def process_service_check_result_with_metrics(hst: str, svc: str, state: int, output: str, metrics: int, config='config0', metric_name='metric'): now = int(time.time()) pd = [output + " | "] for m in range(metrics): v = math.sin((now + m) / 1000) * 5 - pd.append(f"metric{m}={v}") + pd.append(f"{metric_name}{m}={v}") + logger.trace(f"{metric_name}{m}={v}") full_output = " ".join(pd) process_service_check_result(hst, svc, state, full_output, config) +def process_service_check_result(hst: str, svc: str, state: int, output: str, config='config0', use_grpc=0, nb_check=1): + if use_grpc > 0: + port = 50001 + int(config[6:]) + with grpc.insecure_channel(f"127.0.0.1:{port}") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + for i in range(nb_check): + indexed_output = f"{output}_{i}" + stub.ProcessServiceCheckResult(engine_pb2.Check( + host_name=hst, svc_desc=svc, output=indexed_output, code=state)) + else: + now = int(time.time()) + with open(f"{VAR_ROOT}/lib/centreon-engine/{config}/rw/centengine.cmd", "w") as f: + for i in range(nb_check): + cmd = f"[{now}] PROCESS_SERVICE_CHECK_RESULT;{hst};{svc};{state};{output}_{i}\n" + f.write(cmd) + -def process_service_check_result(hst: str, svc: str, state: int, output: str, config='config0'): - now = int(time.time()) - with open(f"{VAR_ROOT}/lib/centreon-engine/{config}/rw/centengine.cmd", "w") as f: - cmd = f"[{now}] PROCESS_SERVICE_CHECK_RESULT;{hst};{svc};{state};{output}\n" - f.write(cmd) +@external_command +def acknowledge_service_problem(hst, service, typ='NORMAL'): + if typ == 'NORMAL': + logger.console('acknowledgement is normal') + sticky = 1 + elif typ == 'STICKY': + logger.console('acknowledgement is sticky') + sticky = 2 + else: + logger.console('acknowledgement type is none') + sticky = 0 + + return f"ACKNOWLEDGE_SVC_PROBLEM;{hst};{service};{sticky};0;0;admin;Service ({hst},{service}) acknowledged\n" + + +@external_command +def remove_service_acknowledgement(hst, service): + return f"REMOVE_SVC_ACKNOWLEDGEMENT;{hst};{service}\n" @external_command diff --git a/tests/update-doc.py b/tests/update-doc.py index 752ffe36b87..906afaa9e88 100755 --- a/tests/update-doc.py +++ b/tests/update-doc.py @@ -75,7 +75,7 @@ def parse_dir(d): On other rpm based distributions, you can try the following commands to initialize your robot tests: ``` -pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl pymysql +pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl robotframework-examples pymysql yum install python3-devel -y