diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 08a9bd35381..408ee0dfdef 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,9 @@ -* @centreon/owners-cpp +* @centreon/owners-cpp -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines +.github/** @centreon/owners-pipelines +packaging/** @centreon/owners-pipelines +selinux/** @centreon/owners-pipelines -tests/** @centreon/owners-robot-e2e +stream-connectors/** @centreon/owners-lua + +tests/** @centreon/owners-robot-e2e diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index b51c1ae496e..30dcec46773 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -10,12 +10,16 @@ inputs: distrib: description: The package distrib required: true + major_version: + description: The major version + required: false version: description: The package version required: false release: description: The package release number required: false + default: "1" arch: description: The package architecture required: false @@ -53,6 +57,7 @@ runs: RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | + export MAJOR_VERSION="${{ inputs.major_version }}" export VERSION="${{ inputs.version }}" export RELEASE="${{ inputs.release }}" export ARCH="${{ inputs.arch }}" @@ -68,6 +73,20 @@ runs: fi fi + MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) + MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) + if [ "$MAJOR_RIGHT" = "04" ]; then + BUMP_MAJOR_LEFT="$MAJOR_LEFT" + BUMP_MAJOR_RIGHT="10" + else + BUMP_MAJOR_LEFT=$(( $MAJOR_LEFT + 1 )) + BUMP_MAJOR_RIGHT="04" + fi + + export NEXT_MAJOR_VERSION="$BUMP_MAJOR_LEFT.$BUMP_MAJOR_RIGHT" + + luaver=$(lua -e "print(string.sub(_VERSION, 5))" 2>/dev/null || echo 0) + export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" @@ -76,9 +95,12 @@ runs: DIRNAME=$(dirname $FILE) BASENAME=$(basename $FILE) if [ -f $DIRNAME/env/.env.${{ inputs.distrib }} ]; then + set -o allexport source $DIRNAME/env/.env.${{ inputs.distrib }} + set +o allexport fi cd $DIRNAME + sed -i "s/@luaver@/$luaver/g" $BASENAME sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" $BASENAME nfpm package --config $BASENAME --packager ${{ inputs.package_extension }} cd - diff --git a/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 new file mode 100644 index 00000000000..06885ef3bf0 --- /dev/null +++ b/.github/docker/Dockerfile.packaging-stream-connectors-nfpm-alma8 @@ -0,0 +1,19 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/almalinux:8 + +RUN bash -e <> conanfile.txt + [requires] + libcurl/8.0.1 + openssl/1.1.1t + zlib/1.2.13 + + [generators] + CMakeToolchain + + [options] + libcurl/*:with_ca_bundle=/etc/ssl/certs/ca-bundle.crt + libcurl/*:with_ca_fallback=False + libcurl/*:with_ca_path=/etc/ssl/certs/ + EOF + + conan install . --build=missing --deployer=full_deploy + + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread#" Makefile + else + sed -i "s#^CURL_LIBS.*#CURL_LIBS=-Lfull_deploy/host/libcurl/8.0.1/Release/x86_64/lib -l:libcurl.a -Lfull_deploy/host/openssl/1.1.1t/Release/x86_64/lib -l:libssl.a -l:libcrypto.a -Lfull_deploy/host/zlib/1.2.13/Release/x86_64/lib -l:libz.a -lpthread -I/usr/include/lua5.3#" Makefile + fi + + make + + cd .. + + mkdir -p stream-connectors/dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/lcurl.so stream-connectors/dependencies/lua-curl/lua-curl/ + cp -rp lua-curl-src/src/lua/cURL stream-connectors/dependencies/lua-curl/lua-curl/ + cp -p lua-curl-src/src/lua/cURL.lua stream-connectors/dependencies/lua-curl/lua-curl/ + shell: bash + + - name: Update package name + run: | + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + NAME="lua-curl" + else + NAME="lua5.3-curl" + fi + sed -i "s/@NAME@/$NAME/g" ./stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + arch: amd64 + major_version: ${{ needs.get-version.outputs.version }} + version: ${{ needs.get-version.outputs.version }}.0 + release: "1" + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-curl-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/.github/workflows/lua-tz.yml b/.github/workflows/lua-tz.yml new file mode 100644 index 00000000000..45120b19bc3 --- /dev/null +++ b/.github/workflows/lua-tz.yml @@ -0,0 +1,135 @@ +name: lua-tz + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - stream-connectors/dependencies/lua-tz/** + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - stream-connectors/dependencies/lua-tz/** + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + include: + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma8 + distrib: el8 + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma9 + distrib: el9 + - package_extension: deb + image: packaging-stream-connectors-nfpm-bullseye + distrib: bullseye + - package_extension: deb + image: packaging-stream-connectors-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-stream-connectors-nfpm-jammy + distrib: jammy + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Checkout luatz sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + repository: "daurnimator/luatz" + path: "luatz-src" + ref: "e49b496e112ae1f0efdec24fc1c6a6f978f68014" # v0.4-1 + + - name: Prepare packaging of lua-tz + run: cp -r luatz-src/luatz stream-connectors/dependencies/lua-tz/lua-tz + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "stream-connectors/dependencies/lua-tz/packaging/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + arch: all + major_version: ${{ needs.get-version.outputs.version }} + version: ${{ needs.get-version.outputs.version }}.0 + release: "1" + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-tz-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: lua-tz + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-tz-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: lua-tz + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-tz-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index 621f2635e78..3bed775ecb1 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -3,7 +3,10 @@ name: Centreon collect packaging on: workflow_call: inputs: - version: + major_version: + required: true + type: string + minor_version: required: true type: string img_version: @@ -102,7 +105,7 @@ jobs: cd selinux for MODULE in "centreon-engine" "centreon-broker"; do cd $MODULE - sed -i "s/@VERSION@/${{ inputs.version }}/g" $MODULE.te + sed -i "s/@VERSION@/${{ inputs.major_version }}.${{ inputs.minor_version }}/g" $MODULE.te make -f /usr/share/selinux/devel/Makefile cd - done @@ -172,7 +175,8 @@ jobs: nfpm_file_pattern: "packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - version: ${{ inputs.version }} + major_version: ${{ inputs.major_version }} + version: ${{ inputs.major_version }}.${{ inputs.minor_version }} release: ${{ inputs.release }} arch: ${{ matrix.arch }} commit_hash: ${{ inputs.commit_hash }} diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index 13db94f259a..e3458489a67 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -49,11 +49,12 @@ jobs: needs: [get-version] uses: ./.github/workflows/package-collect.yml with: - stability: ${{ needs.get-version.outputs.stability }} - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} + stability: ${{ needs.get-version.outputs.stability }} secrets: inherit robot-test: diff --git a/.github/workflows/stream-connectors-lib.yml b/.github/workflows/stream-connectors-lib.yml new file mode 100644 index 00000000000..a94b688b734 --- /dev/null +++ b/.github/workflows/stream-connectors-lib.yml @@ -0,0 +1,126 @@ +name: stream-connectors-lib + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - stream-connectors/packaging/connectors-lib/** + - stream-connectors/modules/centreon-stream-connectors-lib/** + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - stream-connectors/packaging/connectors-lib/** + - stream-connectors/modules/centreon-stream-connectors-lib/** + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + include: + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma8 + distrib: el8 + - package_extension: rpm + image: packaging-stream-connectors-nfpm-alma9 + distrib: el9 + - package_extension: deb + image: packaging-stream-connectors-nfpm-bullseye + distrib: bullseye + - package_extension: deb + image: packaging-stream-connectors-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-stream-connectors-nfpm-jammy + distrib: jammy + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "stream-connectors/packaging/connectors-lib/*.yaml" + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.version }} + version: ${{ needs.get-version.outputs.version }}.0 + release: "1" + package_extension: ${{ matrix.package_extension }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: stream-connectors-lib + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: stream-connectors-lib + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/.github/workflows/stream-connectors.yml b/.github/workflows/stream-connectors.yml new file mode 100644 index 00000000000..bccec035214 --- /dev/null +++ b/.github/workflows/stream-connectors.yml @@ -0,0 +1,186 @@ +name: stream-connectors + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - stream-connectors/centreon-certified/** + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - stream-connectors/centreon-certified/** + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + detect-changes: + runs-on: ubuntu-22.04 + outputs: + connectors: ${{ steps.list-connectors.outputs.connectors }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: dorny/paths-filter@0bc4621a3135347011ad047f9ecf449bf72ce2bd # v3.0.0 + id: filter + with: + base: ${{ github.ref }} + list-files: shell + filters: | + connectors: + - stream-connectors/centreon-certified/** + + - name: transform to directories + id: list-connectors + run: | + folders=() + for f in ${{ steps.filter.outputs.connectors_files }}; do + DIR_NAME=($(dirname $f)) + BASE_NAME=($(basename $DIR_NAME)) + echo "Adding $BASE_NAME to folders" + folders+=($BASE_NAME) + done + unique_folders=($(printf "%s\n" "${folders[@]}" | sort -u | tr '\n' ' ')) + echo "connectors=$(jq --compact-output --null-input '$ARGS.positional' --args -- ${unique_folders[@]})" >> $GITHUB_OUTPUT + shell: bash + + package: + if: ${{ needs.detect-changes.outputs.connectors != '[]' }} + needs: [get-version, detect-changes] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + include: + - distrib: el8 + image: packaging-stream-connectors-nfpm-alma8 + package_extension: rpm + - distrib: el9 + image: packaging-stream-connectors-nfpm-alma9 + package_extension: rpm + - distrib: bullseye + image: packaging-stream-connectors-nfpm-bullseye + package_extension: deb + - distrib: bookworm + image: packaging-stream-connectors-nfpm-bookworm + package_extension: deb + - distrib: jammy + image: packaging-stream-connectors-nfpm-jammy + package_extension: deb + + name: package ${{ matrix.distrib }} ${{ matrix.connector_path }} + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + steps: + + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Replace package and connector name variables + run: | + package_name="centreon-stream-connector-`basename ${{ matrix.connector_path }}`" + sed -i "s/@PACKAGE_NAME@/$package_name/g" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + connector_name="`basename ${{ matrix.connector_path }}`" + sed -i "s/@CONNECTOR_NAME@/$connector_name/g" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + shell: bash + + - name: Add specific dependencies + run: | + DEB_DEPENDENCIES="" + RPM_DEPENDENCIES="" + if [ "${{ matrix.connector_path }}" = "kafka" ]; then + DEB_DEPENDENCIES='librdkafka1,"lua-cffi (>= ${MAJOR_VERSION}~)","lua-cffi (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='librdkafka,"lua-cffi >= ${MAJOR_VERSION}","lua-cffi ${NEXT_MAJOR_VERSION}"' + elif [ "${{ matrix.connector_path }}" = "pagerduty" ]; then + DEB_DEPENDENCIES='"lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='"lua-tz >= ${MAJOR_VERSION}","lua-tz ${NEXT_MAJOR_VERSION}"' + elif [ "${{ matrix.connector_path }}" = "splunk" ]; then + DEB_DEPENDENCIES='"lua-tz (>= ${MAJOR_VERSION}~)","lua-tz (<< ${NEXT_MAJOR_VERSION}~)"' + RPM_DEPENDENCIES='"lua-tz >= ${MAJOR_VERSION}","lua-tz ${NEXT_MAJOR_VERSION}"' + fi + sed -i "s/@RPM_DEPENDENCIES@/$RPM_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + sed -i "s/@DEB_DEPENDENCIES@/$DEB_DEPENDENCIES/g;" ./stream-connectors/packaging/connectors/centreon-stream-connectors.yaml + shell: bash + + - name: Export package version + id: package-version + run: echo "package_version=`date '+%Y%m%d'`" >> $GITHUB_OUTPUT + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "stream-connectors/packaging/connectors/centreon-stream-connectors.yaml" + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.version }} + version: ${{ needs.get-version.outputs.version }}.${{ steps.package-version.outputs.package_version }} + release: "1" + package_extension: ${{ matrix.package_extension }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.connector_path }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, detect-changes, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [el8, el9] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: stream-connector-${{ matrix.connector_path }} + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.connector_path }}-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["unstable", "testing", "stable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, detect-changes, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + connector_path: ${{ fromJson(needs.detect-changes.outputs.connectors) }} + name: deliver ${{ matrix.distrib }} ${{ matrix.connector_path }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: stream-connector-${{ matrix.connector_path }} + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.connector_path }}-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} diff --git a/stream-connectors/CONTRIBUTE.md b/stream-connectors/CONTRIBUTE.md new file mode 100644 index 00000000000..22abb20d32d --- /dev/null +++ b/stream-connectors/CONTRIBUTE.md @@ -0,0 +1,56 @@ +# Contribute to the Centreon Stream Connectors project + +## How to contribute + +There are many ways you can contribute to this project and everyone should be able to help in its own way. + +### For code lovers + +You can work on Stream Connectors + +- Create a new stream connector +- Update an existing stream connector +- [Fix issues](https://github.com/centreon/centreon-stream-connector-scripts/issues) + +You can improve our Lua modules + +- Add a new module + - Comment it + - Document it + - *optional* Provide usage examples +- Update an existing module + - Update the documentation (if it changes the input and/or output of a method) + - Update usage examples if there are any and if they are impacted by the change + +### For everybody + +Since we are not all found of code, there are still ways to be part of this project + +- Open issues for bugs or feedbacks (or help people) +- Update an already existing example or provide new ones + +## Code guidelines + +If you want to work on our LUA modules, you must follow the coding style provided by luarocks +[Coding style guidelines](https://github.com/luarocks/lua-style-guide) + +While it is mandatory to follow those guidelines for modules, they will not be enforced on community powered Stream Connectors scripts. +It is however recommened to follow them as much as possible. + +## Documentations + +When creating a module you must comment your methods as follow + +```lua +--- This is a local function that does things +-- @param first_name (string) the first name +-- @param last_name (string) the last name +-- @return age (number) the age of the person +local function get_age(first_name, last_name) + -- some code -- +end +``` + +You should comment complicated or long code blocks to help people review your code. + +It is also required to create or update the module documentation for a more casual reading to help people use your module in their Stream Connector diff --git a/stream-connectors/README.md b/stream-connectors/README.md new file mode 100644 index 00000000000..c1aa110edf3 --- /dev/null +++ b/stream-connectors/README.md @@ -0,0 +1,54 @@ +# Centreon Stream Connectors + +The content of the repository has been moved to https://github.com/centreon/centreon-collect + + +[![Contributors][contributors-shield]][contributors-url] +[![Stars][stars-shield]][stars-url] +[![Forks][forks-shield]][forks-url] +[![Issues][issues-shield]][issues-url] + +Centreon stream connectors are LUA scripts that help you send your Centreon monitoring datas to your favorite tools + +## Stream connectors + +Available scripts + +Here is a list of the Centreon powered scripts: + +| Software | Connectors | Documentations | +| ------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| BSM | [BSM Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/bsm) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-bsm.html) | +| ElasticSearch | [ElasticSearch Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/elasticsearch) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-elastic-metrics.html) | +| InfluxDB | [InfluxDB Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/influxdb) | WIP | +| NDO | [NDO Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/ndo) | [Documentation](https://docs.centreon.com/current/en/integrations/stream-connectors/ndo.html) | +| OMI | [OMI Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/omi) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-hp-omi.html) | +| Opsgenie | [Opsgenie Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/opsgenie) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-opsgenie.html) | +| PagerDuty | [PagerDuty Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/pagerduty) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-pagerduty-events.html) | +| Prometheus | [Prometheus Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/prometheus) | WIP | +| ServiceNow | [ServiceNow Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/servicenow) | [Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-service-now-events.html) | +| Signl4 | [Signl4 Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/signl4) | [Events Documentation](https://docs.centreon.com/current/en/integrations/event-management/sc-signl4-events.html) | +| Splunk | [Splunk Stream Connectors](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/splunk) | [Events Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-events.html), [Metrics Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-splunk-metrics.html) | +| Warp10 | [Warp10 Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/warp10) | [Documentation](https://docs.centreon.com/current/en/integrations/data-analytics/sc-warp10.html) | +| Kafka | [Kafka stream connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/centreon-certified/kafka) | [Documentation](https://docs.centreon.com/docs/integrations/data-analytics/sc-kafka-events/) | + +Here is a list of the Community powered scripts + +| Software | Connectors | Documentations | Contributors | Organizations | +| -------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | --------------------------------------- | +| Canopsis | [Canopsis Stream Connector](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis) | [Documentation](https://github.com/centreon/centreon-stream-connector-scripts/tree/master/community-powered/canopsis/README.md) | [ppremont-capensis](https://github.com/ppremont-capensis) | [Capensis](https://www.capensis.fr/en/) | + +## Contribute + +If you wish to help us improve this project, feel free to read the [Contribute.md](https://github.com/centreon/centreon-stream-connector-scripts/blob/master/CONTRIBUTE.md) file. + + +[contributors-shield]: https://img.shields.io/github/contributors/centreon/centreon-stream-connector-scripts?color=%2384BD00&label=CONTRIBUTORS&style=for-the-badge +[stars-shield]: https://img.shields.io/github/stars/centreon/centreon-stream-connector-scripts?color=%23433b02a&label=STARS&style=for-the-badge +[forks-shield]: https://img.shields.io/github/forks/centreon/centreon-stream-connector-scripts?color=%23009fdf&label=FORKS&style=for-the-badge +[issues-shield]: https://img.shields.io/github/issues/centreon/centreon-stream-connector-scripts?color=%230072ce&label=ISSUES&style=for-the-badge + +[contributors-url]: https://github.com/centreon/centreon-stream-connector-scripts/graphs/contributors +[forks-url]: https://github.com/centreon/centreon-stream-connector-scripts/network/members +[stars-url]: https://github.com/centreon/centreon-stream-connector-scripts/stargazers +[issues-url]: https://github.com/centreon/centreon-stream-connector-scripts/issues diff --git a/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua new file mode 100644 index 00000000000..d1f418f708d --- /dev/null +++ b/stream-connectors/centreon-certified/bsm/bsm_connector-apiv1.lua @@ -0,0 +1,372 @@ +-- +-- Copyright 2024 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- http_server_url (string): the full HTTP URL. Default: https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/. +-- http_proxy_string (string): the full proxy URL if needed to reach the BSM server. Default: empty. +-- log_path (string): the log file to use +-- log_level (number): the log level (0, 1, 2, 3) where 3 is the maximum level. 0 logs almost nothing. 1 logs only the beginning of the script and errors. 2 logs a reasonable amount of verbose. 3 logs almost everything possible, to be used only for debug. Recommended value in production: 1. +-- max_buffer_size (number): how many events to store before sending them to the server. +-- max_buffer_age (number): flush the events when the specified time (in second) is reached (even if max_size is not reached). + +-- Libraries +local curl = require "cURL" +require("LuaXML") + + +-- workaround https://github.com/centreon/centreon-broker/issues/201 +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if not var or var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + source_ci = "Centreon", + http_server_url = "https://my.bsm.server:30005/bsmc/rest/events/ws-centreon/", + http_proxy_string = "", + http_timeout = 10, + filter_type = "metric,status", + filter_hostgroups = "", + max_output_length = 1024, + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {} + -- Storing the allowed hostgroups in an array + retval.filter_hostgroups_array = {} + if retval.filter_hostgroups and retval.filter_hostgroups ~= "" then + filter_hostgroups_regex = "^(" + for hg in string.gmatch(retval.filter_hostgroups, "([^,]+)") do + table.insert(retval.filter_hostgroups_array, hg) + end + broker_log:info(3, "EventQueue.new: Allowed hostgroups are: " .. table.concat(retval.filter_hostgroups_array, ' - ')) + end + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = "Meta" + if e.host_id then + hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(1, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + end + + local service_description = "host" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(1, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + elseif hostname == "Meta" then + service_description = e.output + end + + -- Getting the host extended information + local xml_url = '' + local xml_notes = '' + local xml_service_severity = '' + local xml_host_severity = '' + if e.host_id then + xml_host_severity = "" .. ifnil(broker_cache:get_severity(e.host_id), '0') .. "" + if e.service_id then + xml_url = ifnil(broker_cache:get_notes_url(e.host_id, e.service_id), 'no notes url for this service') + xml_service_severity = "" ..ifnil(broker_cache:get_severity(e.host_id, e.service_id), '0') .. "" + else + xml_url = ifnil(broker_cache:get_action_url(e.host_id), 'no action url for this host') + xml_notes = "" .. ifnil(broker_cache:get_notes(e.host_id), 'OS not set') .. "" + end + end + + -- Event to send + local event_to_send = "" + + -- Host and Service Status + event_to_send = "" .. + "" .. service_description .. "" .. + "" .. string.match(e.output, "^(.*)\n") .. "" .. + "" .. e.state .. "" .. + "" .. e.last_update .. "" .. + "" .. hostname .. "" .. + xml_host_severity .. + xml_service_severity .. + xml_notes .. + "" .. xml_url .. "" .. + "" .. ifnil(self.source_ci, 'Centreon') .. "" .. + "" .. ifnil(e.host_id, '0') .. "" .. + "" .. ifnil(e.service_id, '0') .. "" .. + "" .. ifnil(e.scheduled_downtime_depth, '0') .. "" .. + "" + + -- Appending to the event queue + self.events[#self.events + 1] = event_to_send + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + local http_post_data = "" + for xml_i, xml_str in pairs(self.events) do + http_post_data = http_post_data .. tostring(xml.eval(xml_str)) + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt(curl.OPT_SSL_VERIFYPEER, 0) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "Content-Type: text/xml", + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 202 or http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code .. " message is:\n\"" .. http_response_body .. "\"\n") + end + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-bsm.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting BSM StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check whether the event queue is already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, flushing data.") + return queue:flush() + end + + -- Here come the filters + -- Host Status/Service Status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- on drop les meta services pour le moment + if not e.host_id then + return true + end + + if not e.host_id and not e.output:find("Meta-Service") == 1 then + broker_log:error(1, "write: Event has no host_id: " .. broker.json_encode(e)) + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime -- Modif du 18/02/2020 => Simon Bomm + broker_log:info(3, "write: Scheduled downtime. Dropping.") + return true + end + + -- Ignore SOFT + if e.state_type and e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_check and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end diff --git a/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua new file mode 100644 index 00000000000..911cabc7ce7 --- /dev/null +++ b/stream-connectors/centreon-certified/capensis/canopsis2-events-apiv2.lua @@ -0,0 +1,536 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Canopsis Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "canopsis_user", + "canopsis_password", + "canopsis_host" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/canopsis2-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.canopsis_user = params.canopsis_user + self.sc_params.params.canopsis_password = params.canopsis_password + self.sc_params.params.connector = params.connector or "centreon-stream" + self.sc_params.params.connector_name_type = params.connector_name_type or "poller" + self.sc_params.params.connector_name = params.connector_name or "centreon-stream-central" + self.sc_params.params.canopsis_event_route = params.canopsis_event_route or "/api/v2/event" + self.sc_params.params.canopsis_downtime_route = params.canopsis_downtime_route or "/api/v2/pbehavior" + self.sc_params.params.canopsis_host = params.canopsis_host + self.sc_params.params.canopsis_port = params.canopsis_port or 8082 + self.sc_params.params.sending_method = params.sending_method or "api" + self.sc_params.params.sending_protocol = params.sending_protocol or "http" + self.sc_params.params.timezone = params.timezone or "Europe/Paris" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement,downtime" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.send_mixed_events = 0 + self.sc_params.params.max_buffer_size = 1 + + if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then + self.sc_params.params.connector_name_type = "poller" + end + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " + .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end, + [elements.downtime.id] = function () return self:format_event_downtime() end, + [elements.acknowledgement.id] = function () return self:format_event_acknowledgement() end + }, + [categories.bam.id] = {} + } + + self.centreon_to_canopsis_state = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = 0, + [1] = 3, + [2] = 2 + }, + [elements.service_status.id] = { + [0] = 0, + [1] = 1, + [2] = 3, + [3] = 2 + } + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:list_servicegroups() + local servicegroups = {} + + for _, sg in pairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg.group_name) + end + + return servicegroups +end + +function EventQueue:list_hostgroups() + local hostgroups = {} + + for _, hg in pairs(self.sc_event.event.cache.hostgroups) do + table.insert(hostgroups, hg.group_name) + end + + return hostgroups +end + +function EventQueue:get_connector_name() + -- use poller name as a connector name + if self.sc_params.params.connector_name_type == "poller" then + return tostring(self.sc_event.event.cache.poller) + end + + return tostring(self.sc_params.params.connector_name) +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "component", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = "", + timestamp = event.last_check, + output = event.output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + -- extra informations + hostgroups = self:list_hostgroups(), + notes_url = tostring(event.cache.host.notes_url), + action_url = tostring(event.cache.host.action_url) + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "resource", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = tostring(event.cache.service.description), + timestamp = event.last_check, + output = event.output, + state = self.centreon_to_canopsis_state[event.category][event.element][event.state], + -- extra informations + servicegroups = self:list_servicegroups(), + notes_url = event.cache.service.notes_url, + action_url = event.cache.service.action_url, + hostgroups = self:list_hostgroups() + } +end + +function EventQueue:format_event_acknowledgement() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + + self.sc_event.event.formated_event = { + event_type = "ack", + crecord_type = "ack", + author = event.author, + resource = "", + component = tostring(event.cache.host.name), + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + timestamp = event.entry_time, + output = event.comment_data, + origin = "centreon", + ticket = "", + state_type = 1, + ack_resources = false + } + + if event.service_id then + self.sc_event.event.formated_event['source_type'] = "resource" + self.sc_event.event.formated_event['resource'] = tostring(event.cache.service.description) + self.sc_event.event.formated_event['ref_rk'] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.service_status.id][event.state] + else + self.sc_event.event.formated_event['source_type'] = "component" + self.sc_event.event.formated_event['ref_rk'] = "undefined/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.host_status.id][event.state] + end + + -- send ackremove + if event.deletion_time then + self.sc_event.event.formated_event['event_type'] = "ackremove" + self.sc_event.event.formated_event['crecord_type'] = "ackremove" + self.sc_event.event.formated_event['timestamp'] = event.deletion_time + end +end + +function EventQueue:format_event_downtime() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + local canopsis_downtime_id = "centreon-downtime-".. event.internal_id .. "-" .. event.entry_time + + if event.cancelled or event.deletion_time then + local metadata = { + event_route = self.sc_params.params.canopsis_downtime_route .. "/" .. canopsis_downtime_id, + method = "DELETE" + } + self:send_data({}, metadata) + else + self.sc_event.event.formated_event = { + _id = canopsis_downtime_id, + author = event.author, + name = canopsis_downtime_id, + tstart = event.start_time, + tstop = event.end_time, + type_ = "Maintenance", + reason = "Autre", + timezone = self.sc_params.params.timezone, + comments = { + { + ['author'] = event.author, + ['message'] = event.comment_data + } + }, + filter = { + ['$and'] = { + { + ['_id'] = "" + } + } + }, + exdate = {}, + } + + if event.service_id then + self.sc_event.event.formated_event['filter']['$and'][1]['_id'] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + else + self.sc_event.event.formated_event['filter']['$and'][1]['_id'] = tostring(event.cache.host.name) + end + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local next = next + + if next(self.sc_event.event.formated_event) ~= nil then + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end + +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = event + else + table.insert(payload, event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local params = self.sc_params.params + local url = params.sending_protocol .. "://" .. params.canopsis_user .. ":" .. params.canopsis_password + .. "@" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route + payload = broker.json_encode(payload) + queue_metadata.headers = { + "content-length: " .. string.len(payload), + "content-type: application/json" + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. payload) + self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username + .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata.method == "DELETE" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + else + http_request:setopt_postfields(payload) + end + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " + .. tostring(http_response_code)) + retval = true + elseif http_response_code == 400 and string.match(http_response_body, "Trying to insert PBehavior with already existing _id") then + self.sc_logger:notice("[EventQueue:send_data]: Ignoring downtime with id: " .. tostring(payload._id) + .. ". Canopsis result: " .. tostring(http_response_body)) + self.sc_logger:info("[EventQueue:send_data]: duplicated downtime event: " .. tostring(data)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " + .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua new file mode 100644 index 00000000000..6ee7b4bc446 --- /dev/null +++ b/stream-connectors/centreon-certified/capensis/canopsis4-events-apiv2.lua @@ -0,0 +1,551 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Canopsis Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "canopsis_authkey", + "canopsis_host" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/canopsis4-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.canopsis_authkey = params.canopsis_authkey + self.sc_params.params.connector = params.connector or "centreon-stream" + self.sc_params.params.connector_name_type = params.connector_name_type or "poller" + self.sc_params.params.connector_name = params.connector_name or "centreon-stream-central" + self.sc_params.params.canopsis_event_route = params.canopsis_event_route or "/api/v4/event" + self.sc_params.params.canopsis_downtime_route = params.canopsis_downtime_route or "/api/v4/bulk/pbehavior" + self.sc_params.params.canopsis_host = params.canopsis_host + self.sc_params.params.canopsis_port = params.canopsis_port or 8082 + self.sc_params.params.sending_method = params.sending_method or "api" + self.sc_params.params.sending_protocol = params.sending_protocol or "http" + self.sc_params.params.timezone = params.timezone or "Europe/Paris" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status,acknowledgement" + self.sc_params.params.use_severity_as_state = params.use_severity_as_state or 0 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.send_mixed_events = 0 + + if self.sc_params.params.connector_name_type ~= "poller" and self.sc_params.params.connector_name_type ~= "custom" then + self.sc_params.params.connector_name_type = "poller" + end + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file + and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) + then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " + .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_flush:add_queue_metadata(categories.neb.id, elements.host_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.service_status.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.acknowledgement.id, {event_route = self.sc_params.params.canopsis_event_route}) + self.sc_flush:add_queue_metadata(categories.neb.id, elements.downtime.id, {event_route = self.sc_params.params.canopsis_downtime_route}) + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end, + [elements.downtime.id] = function () return self:format_event_downtime() end, + [elements.acknowledgement.id] = function () return self:format_event_acknowledgement() end + }, + [categories.bam.id] = {} + } + + self.centreon_to_canopsis_state = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = 0, + [1] = 3, + [2] = 2 + }, + [elements.service_status.id] = { + [0] = 0, + [1] = 1, + [2] = 3, + [3] = 2 + } + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:list_servicegroups() + local servicegroups = {} + + for _, sg in pairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg.group_name) + end + + return servicegroups +end + +function EventQueue:list_hostgroups() + local hostgroups = {} + + for _, hg in pairs(self.sc_event.event.cache.hostgroups) do + table.insert(hostgroups, hg.group_name) + end + + return hostgroups +end + +function EventQueue:get_state(event, severity) + -- return standard centreon state + if severity and self.sc_params.params.use_severity_as_state == 1 then + return severity + end + + return self.centreon_to_canopsis_state[event.category][event.element][event.state] +end + +function EventQueue:get_connector_name() + -- use poller name as a connector name + if self.sc_params.params.connector_name_type == "poller" then + return tostring(self.sc_event.event.cache.poller) + end + + return tostring(self.sc_params.params.connector_name) +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "component", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = "", + output = event.short_output, + long_output = event.long_output, + state = self:get_state(event, event.cache.severity.host), + timestamp = event.last_check, + hostgroups = self:list_hostgroups(), + notes_url = tostring(event.cache.host.notes_url), + action_url = tostring(event.cache.host.action_url) + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "check", + source_type = "resource", + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + component = tostring(event.cache.host.name), + resource = tostring(event.cache.service.description), + output = event.short_output, + long_output = event.long_output, + state = self:get_state(event, event.cache.severity.service), + timestamp = event.last_check, + servicegroups = self:list_servicegroups(), + notes_url = event.cache.service.notes_url, + action_url = event.cache.service.action_url, + hostgroups = self:list_hostgroups() + } +end + +function EventQueue:format_event_acknowledgement() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + + self.sc_event.event.formated_event = { + event_type = "ack", + author = event.author, + resource = "", + component = tostring(event.cache.host.name), + connector = self.sc_params.params.connector, + connector_name = self:get_connector_name(), + timestamp = event.entry_time, + output = event.comment_data, + long_output = event.comment_data + -- no available in api v4 ? + -- crecord_type = "ack", + -- origin = "centreon", + -- ticket = "", + -- state_type = 1, + -- ack_resources = false + } + + if event.service_id then + self.sc_event.event.formated_event['source_type'] = "resource" + self.sc_event.event.formated_event['resource'] = tostring(event.cache.service.description) + -- only with v2 api ? + -- self.sc_event.event.formated_event['ref_rk'] = tostring(event.cache.service.description) + -- .. "/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.service_status.id][event.state] + else + self.sc_event.event.formated_event['source_type'] = "component" + -- only with v2 api ? + -- self.sc_event.event.formated_event['ref_rk'] = "undefined/" .. tostring(event.cache.host.name) + self.sc_event.event.formated_event['state'] = self.centreon_to_canopsis_state[event.category] + [elements.host_status.id][event.state] + end + + -- send ackremove + if event.deletion_time then + self.sc_event.event.formated_event['event_type'] = "ackremove" + -- only with v2 api ? + -- self.sc_event.event.formated_event['crecord_type'] = "ackremove" + -- self.sc_event.event.formated_event['timestamp'] = event.deletion_time + end +end + +function EventQueue:format_event_downtime() + local event = self.sc_event.event + local elements = self.sc_params.params.bbdo.elements + local downtime_name = "centreon-downtime-" .. event.internal_id .. "-" .. event.entry_time + + if event.cancelled or event.deletion_time then + local metadata = { + method = "DELETE", + event_route = "/api/v4/pbehaviors" + } + self:send_data({name = downtime_name}, metadata) + else + self.sc_event.event.formated_event = { + -- _id = canopsis_downtime_id, + author = event.author, + name = downtime_name, + tstart = event.start_time, + tstop = event.end_time, + type = "Maintenance", + reason = "Other", + timezone = self.sc_params.params.timezone, + comments = { + { + ['author'] = event.author, + ['message'] = event.comment_data + } + }, + entity_pattern = { + { + { + field = "name", + cond = { + type = "eq" + } + } + } + }, + exdates = {} + } + + if event.service_id then + self.sc_event.event.formated_event["entity_pattern"][1][1]["cond"]["value"] = tostring(event.cache.service.description) + .. "/" .. tostring(event.cache.host.name) + else + self.sc_event.event.formated_event["entity_pattern"][1][1]["cond"]["value"] = tostring(event.cache.host.name) + end + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local next = next + + if next(self.sc_event.event.formated_event) ~= nil then + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = {event} + else + table.insert(payload, event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local params = self.sc_params.params + local url = params.sending_protocol .. "://" .. params.canopsis_host .. ':' .. params.canopsis_port .. queue_metadata.event_route + payload = broker.json_encode(payload) + queue_metadata.headers = { + "content-length: " .. string.len(payload), + "content-type: application/json", + "x-canopsis-authkey: " .. tostring(self.sc_params.params.canopsis_authkey) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. payload) + self.sc_logger:info("[EventQueue:send_data]: Canopsis address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username + .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata.method == "DELETE" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + end + + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " + .. tostring(http_response_code)) + retval = true + elseif http_response_code == 400 and string.match(http_response_body, "Trying to insert PBehavior with already existing _id") then + self.sc_logger:notice("[EventQueue:send_data]: Ignoring downtime with id: " .. tostring(payload._id) + .. ". Canopsis result: " .. tostring(http_response_body)) + self.sc_logger:info("[EventQueue:send_data]: duplicated downtime event: " .. tostring(data)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " + .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua new file mode 100644 index 00000000000..eb45ad6bdec --- /dev/null +++ b/stream-connectors/centreon-certified/datadog/datadog-events-apiv2.lua @@ -0,0 +1,360 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Datadog Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "api_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/datadog-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.api_key = params.api_key + self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" + self.sc_params.params.datadog_event_endpoint = params.datadog_event_endpoint or "/api/v1/events" + self.sc_params.params.http_server_url = params.http_server_url or "https://api.datadoghq.com" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.state_to_alert_type_mapping = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = "info", + [1] = "error", + [2] = "warning" + }, + [elements.service_status.id] = { + [0] = "info", + [1] = "warning", + [2] = "error", + [3] = "warning" + } + } + } + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = tostring(self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. event.cache.host.name), + text = event.output, + aggregation_key = "host_" .. tostring(event.host_id), + alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], + host = tostring(event.cache.host.name), + date_happened = event.last_check + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = tostring(self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. event.cache.host.name .. ": " .. event.cache.service.description), + text = event.output, + aggregation_key = "service_" .. tostring(event.host_id) .. "_" .. tostring(event.service_id), + alert_type = self.state_to_alert_type_mapping[event.category][event.element][event.state], + host = tostring(event.cache.host.name), + date_happened = event.last_check + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url .. self.sc_params.params.datadog_event_endpoint + queue_metadata.headers = { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Datadog address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua new file mode 100644 index 00000000000..b308d787620 --- /dev/null +++ b/stream-connectors/centreon-certified/datadog/datadog-metrics-apiv2.lua @@ -0,0 +1,409 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Datadog Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "api_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/datadog-metrics.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.api_key = params.api_key + self.sc_params.params.datadog_centreon_url = params.datadog_centreon_url or "http://yourcentreonaddress.local" + self.sc_params.params.datadog_metric_endpoint = params.datadog_metric_endpoint or "/api/v1/series" + self.sc_params.params.http_server_url = params.http_server_url or "https://api.datadoghq.com" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_%.]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +------------------------------------------------------------------------------- +function EventQueue:format_metric_event(metric) + self.sc_logger:debug("[EventQueue:format_metric]: start real format metric ") + local event = self.sc_event.event + self.sc_event.event.formated_event = { + host = tostring(event.cache.host.name), + metric = metric.metric_name, + points = {{event.last_check, metric.value}}, + tags = self:build_metadata(metric) + } + + self:add() + self.sc_logger:debug("[EventQueue:format_metric]: end real format metric ") +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_metadata method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:build_metadata(metric) + local tags = {} + + -- add service name in tags + if self.sc_event.event.cache.service.description then + table.insert(tags, "service:" .. self.sc_event.event.cache.service.description) + end + + -- add metric instance in tags + if metric.instance ~= "" then + table.insert(tags, "instance:" .. metric.instance) + end + + -- add metric subinstances in tags + if metric.subinstance[1] then + for _, subinstance in ipairs(metric.subinstance) do + table.insert(tags, "subinstance:" .. subinstance) + end + end + + return tags +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = { + series = {event} + } + else + table.insert(payload.series, event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url .. tostring(self.sc_params.params.datadog_metric_endpoint) + local payload_json = broker.json_encode(payload) + queue_metadata.headers = { + "content-type: application/json", + "DD-API-KEY:" .. self.sc_params.params.api_key + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload_json) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload_json)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload_json)) + self.sc_logger:info("[EventQueue:send_data]: Datadog address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload_json) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.datadoghq.com/fr/api/latest/events/ other than 202 is not good + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua new file mode 100644 index 00000000000..c4547613d51 --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-events-apiv2.lua @@ -0,0 +1,350 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elastic Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require("cURL") +local ltn12 = require("ltn12") +local mime = require("mime") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "elastic_url", + "elastic_username", + "elastic_password", + "elastic_index_status" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/elastic-events-apiv2.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + local http_post_metadata = { + ["index"] = { + ["_index"] = tostring((self.sc_params.params.elastic_index_status)) + } + } + + self.http_post_metadata = broker.json_encode(http_post_metadata) + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self + end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") + end + + function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + event_type = "host", + timestamp = self.sc_event.event.last_check, + host = self.sc_event.event.cache.host.name, + output = self.sc_event.event.output, + status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type + } + end + + function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + event_type = "service", + timestamp = self.sc_event.event.last_check, + host = self.sc_event.event.cache.host.name, + service = self.sc_event.event.cache.service.description, + status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + output = self.sc_event.event.output, + } + end + + -------------------------------------------------------------------------------- + -- EventQueue:add, add an event to the sending queue + -------------------------------------------------------------------------------- + + function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) + end + + -------------------------------------------------------------------------------- + -- EventQueue:build_payload, concatenate data so it is ready to be sent + -- @param payload {string} json encoded string + -- @param event {table} the event that is going to be added to the payload + -- @return payload {string} json encoded string + -------------------------------------------------------------------------------- + function EventQueue:build_payload(payload, event) + if not payload then + payload = self.http_post_metadata .. '\n' .. broker.json_encode(event) .. '\n' + else + payload = payload .. self.http_post_metadata .. '\n' .. broker.json_encode(event) .. '\n' + end + + return payload + end + + function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.elastic_url .. "/_bulk" + queue_metadata.headers = { + "content-type: application/json;charset=UTF-8", + "content-length: " .. string.len(payload), + "Authorization: Basic " .. (mime.b64(self.sc_params.params.elastic_username .. ":" .. self.sc_params.params.elastic_password)) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:info("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Elastic URL is: " .. tostring(self.sc_params.params.elastic_url) .. "/_bulk") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval + end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua new file mode 100644 index 00000000000..d98ac8e0748 --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv1.lua @@ -0,0 +1,193 @@ +local elastic = { + rows = {} +} + +-------------------------------------------------------------------------------- +-- Check if the desired index exists on the elasticsearch server +-- @param socket the socket connected to the elasticsearch server +-- +-- @return a boolean true on success, false otherwise. +-------------------------------------------------------------------------------- +local function check_index(socket) + -- Ask for the index + socket:write('GET /centreon/_mapping?pretty HTTP/1.1\r\nHost: ' + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\n\r\n') + local answer = socket:read() + if string.match(answer, "HTTP/1.1 200 OK") then + return true + end + return false +end + +-------------------------------------------------------------------------------- +-- Initializes the mapping on the elasticsearcg server +-- @param socket the socket connected to the elasticsearch server +-- +-- @return true on success, false otherwise +-------------------------------------------------------------------------------- +local function init_index(socket) + broker_log:info(1, "init_index") + -- Initialize the index + local header = 'PUT /centreon?pretty HTTP/1.1\r\nHost: ' + .. elastic.address .. ':' .. elastic.port + .. '\r\nAccept: */*\r\nContent-Type: application/json\r\n' + local content = [[{ + "mappings": { + "metrics": { + "_all": { "enabled": false }, + "properties": { + "host": { "type": "keyword" }, + "metric": { "type": "keyword" }, + "value": { "type": "double" }, + "timestamp": { "type": "date" } + } + } + } + } +]] + + header = header .. 'Content-Length: ' + .. content:len() .. "\r\n\r\n" .. content + socket:write(header) + local answer = socket:read() + if answer:match("HTTP/1.1 200 OK") then + broker_log:info(1, "Index constructed") + return true + else + broker_log:info(1, "Index construction failed") + return false + end +end + +-------------------------------------------------------------------------------- +-- Initialization of the module +-- @param conf A table containing data entered by the user through the GUI +-------------------------------------------------------------------------------- +function init(conf) + if conf['log-file'] then + elastic.log_file = conf['log-file'] + else + elastic.log_file = '/tmp/elastic-centreon.log' + broker_log:info(2, "no 'log-file' value given, '/tmp/elastic-centreon.log' set by default") + end + broker_log:set_parameters(3, elastic.log_file) + + if conf['elastic-address'] and conf['elastic-address'] ~= "" then + elastic.address = conf['elastic-address'] + else + error("Unable to find the 'elastic-address' value of type 'string'") + end + + if conf['elastic-port'] and conf['elastic-port'] ~= "" then + elastic.port = conf['elastic-port'] + else + elastic.port = 9200 + broker_log:info(2, "no 'elastic-port' value given, 9200 set by default") + end + + if conf['max-row'] then + elastic.max_row = conf['max-row'] + else + elastic.max_row = 10 + broker_log:info(2, "no 'max-row' value given, 10 set by default") + end + + elastic.socket = broker_tcp_socket.new() + elastic.socket:connect(elastic.address, elastic.port) + + if not check_index(elastic.socket) then + broker_log:info(3, "Index missing") + if init_index(elastic.socket) then + broker_log:info(1, "Index constructed") + else + broker_log:error(1, "Index construction failed") + error("Index construction failed") + end + end + elastic.socket:close() +end + +-------------------------------------------------------------------------------- +-- Called when the data limit count is reached. +-------------------------------------------------------------------------------- +local function flush() + broker_log:info(2, "flush called with " .. #elastic.rows .. " data") + local retval = true + if #elastic.rows > 0 then + elastic.socket:connect(elastic.address, elastic.port) + local header = "POST /centreon/metrics/_bulk HTTP/1.1\r\nHost: " + .. elastic.address .. ":" .. elastic.port .. "\r\n" + .. "Accept: */*\r\n" + .. "Content-Type: application/json\r\n" + + local data = '' + for k,v in pairs(elastic.rows) do + data = data .. '{"index":{}}\n' .. broker.json_encode(v) .. '\n' + end + + header = header .. 'Content-Length: ' + .. data:len() .. "\r\n\r\n" .. data + broker_log:info(3, 'Data sent: ' .. header) + elastic.socket:write(header) + local answer = elastic.socket:read() + local ret + if answer:match("HTTP/1.1 200 OK") then + broker_log:info(2, "flush: " .. #elastic.rows .. " data successfully sent") + ret = true + else + broker_log:error(1, "Unable to write data on the server") + ret = false + end + if ret then + elastic.rows = {} + elastic.socket:close() + else + retval = false + end + end + return retval +end + +-------------------------------------------------------------------------------- +-- Function attached to the write event. +-------------------------------------------------------------------------------- +function write(d) + + local hostname = broker_cache:get_hostname(d.host_id) + if not hostname then + broker_log:error(1, "host name for id " .. d.host_id .. " unknown") + else + broker_log:info(3, tostring(d.ctime) + .. ' --- ' .. hostname .. ' ; ' + .. d.name .. ' ; ' .. tostring(d.value)) + + elastic.rows[#elastic.rows + 1] = { + timestamp = d.ctime * 1000, + host = hostname, + metric = d.name, + value = d.value + } + end + + if #elastic.rows >= elastic.max_row then + return flush() + end + return false +end + +-------------------------------------------------------------------------------- +-- The filter function. When it returns false, the write function is not +-- called. +-- @param category The event category +-- @param element The event sub-category. +-- +-- @return a boolean true when the event is accepted, false otherwise. +-------------------------------------------------------------------------------- +function filter(category, element) + if category == 3 and element == 1 then + return true + end + return false +end + diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua new file mode 100644 index 00000000000..5339b832893 --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-metrics-apiv2.lua @@ -0,0 +1,780 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elastic Connector Metrics +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local mime = require("mime") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + -- "elastic_username", + -- "elastic_password", + "http_server_url" + } + + self.fail = false + self.last_fail_message_date = 0 + self.fail_message_counter = 0 + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/elastic-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.elastic_username = params.elastic_username or "" + self.sc_params.params.elastic_password = params.elastic_password or "" + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_%.]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" + + -- elastic search index parameters + self.sc_params.params.index_template_api_endpoint = params.index_template_api_endpoint or "/_index_template" + self.sc_params.params.index_name = params.index_name or "centreon-metrics" + self.sc_params.params.index_pattern = params.index_pattern or self.sc_params.params.index_name .. "*" + self.sc_params.params.index_priority = params.index_priority or 200 + self.sc_params.params.create_datastream_index_template = params.create_datastream_index_template or 1 + self.sc_params.params.update_datastream_index_template = params.update_datastream_index_template or 0 + + -- index dimensions parameters + self.sc_params.params.add_hostgroups_dimension = params.add_hostgroups_dimension or 1 + self.sc_params.params.add_poller_dimension = params.add_poller_dimension or 0 + self.sc_params.params.add_servicegroups_dimension = params.add_servicegroups_dimension or 0 + self.sc_params.params.add_min_max_dimension = params.add_min_max_dimension or 0 + self.sc_params.params.add_thresholds_dimension = params.add_thresholds_dimension or 0 + -- can't get geo coords from cache nor event + -- self.sc_params.params.add_geocoords_dimension = params.add_geocoords_dimension or 0 + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + local queue_metadata = { + endpoint = "/" .. self.sc_params.params.index_name .. "/_bulk", + method = "PUT" + } + + self.sc_flush.queues.global_queues_metadata = queue_metadata + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + self:build_index_template(self.sc_params.params) + self:handle_index(self.sc_params.params) + return self +end + +function EventQueue:build_index_template(params) + self.index_template_meta = { + description = "Timeseries index template for Centreon metrics", + created_by_centreon = true + } + + self.index_routing_path = { + "host_name", + "service_description", + "metric_name", + "metric_instance", + -- "metric.subinstances" + } + + self.elastic_index_template = { + index_patterns = {params.index_pattern}, + priority = params.index_priority, + _meta = self.index_template_meta, + template = { + settings = { + ["index.mode"] = "time_series" + }, + mappings = { + properties = { + ["host_name"] = { + type = "keyword", + time_series_dimension = true + }, + ["service_description"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric_name"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric_unit"] = { + type = "keyword", + time_series_dimension = false + }, + ["metric_instance"] = { + type = "keyword", + time_series_dimension = true + }, + ["metric_subinstances"] = { + type = "keyword", + time_series_dimension = false + }, + ["metric_value"] = { + type = "double", + time_series_metric = gauge + }, + ["@timestamp"] = { + type = "date", + format = "epoch_second" + } + } + } + } + } + + -- add hostgroup property in the template + if params.add_hostgroups_dimension == 1 then + self.elastic_index_template.template.mappings.properties["host_groups"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "host.groups") + end + + -- add servicegroup property in the template + if params.add_servicegroups_dimension == 1 then + self.elastic_index_template.template.mappings.properties["service_groups"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "service.groups") + end + + -- add poller property in the template + if params.add_poller_dimension == 1 then + self.elastic_index_template.template.mappings.properties["poller"] = { + type = "keyword", + time_series_dimension = false + } + + -- table.insert(self.index_routing_path, "poller") + end + + -- add min and max property in the template + if params.add_min_max_dimension == 1 then + self.elastic_index_template.template.mappings.properties["metric_min"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_max"] = { + type = "keyword", + time_series_dimension = false + } + end + + -- add warn and max property in the template + if params.add_thresholds_dimension == 1 then + self.elastic_index_template.template.mappings.properties["metric_warning_low"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_warning_high"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_critical_low"] = { + type = "keyword", + time_series_dimension = false + } + + self.elastic_index_template.template.mappings.properties["metric_critical_high"] = { + type = "keyword", + time_series_dimension = false + } + end + + + self.elastic_index_template.template.settings["index.routing_path"] = self.index_routing_path + -- add geocoords property in the template + -- can't get geo coords from cache nor event + --[[ + if params.add_geocoords_dimension == 1 then + self.elastic_index_template.mappings.properties["host.geocoords"] = { + type = "geo_point" + } + end + ]]-- + self.sc_logger:notice("[EventQueue:build_index_template]: The following index template is going to be created: " .. self.sc_common:dumper(self.elastic_index_template)) +end + +function EventQueue:handle_index(params) + local index_state = self:check_index_template(params) + + if (not index_state.is_created or not index_state.is_up_to_date) then + self.sc_logger:error("[EventQueue:handle_index]: It will not be possible to send data to elasticsearch because of an invalid index template structure") + self.fail = true + end +end + +function EventQueue:check_index_template(params) + local metadata = { + method = "GET", + endpoint = params.index_template_api_endpoint .. "/" .. params.index_name + } + local payload = nil + local index_state = { + is_created = false, + is_up_to_date = false, + } + + local return_code = self:send_data(payload, metadata) + + if return_code then + self.sc_logger:notice("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has been found") + index_state.is_created = true + index_state.is_up_to_date = self:validate_index_template(params) + return index_state + end + + if (not return_code and params.create_datastream_index_template == 1) then + self.sc_logger:notice("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has not been found" + .. ". Trying to create it because create_datastream_index_template parameter is set to 1...") + + if self:create_index_template(params) then + index_state.is_created = true + -- it has just been created so obviously, it is up to date + index_state.is_up_to_date = true + return index_state + end + end + + self.sc_logger:error("[EventQueue:check_index_template]: Elasticsearch index template " .. tostring(params.index_name) .. " has not been found" + .. " and could not be created.") + + return index_state +end + +function EventQueue:create_index_template(params) + local metadata = { + endpoint = params.index_template_api_endpoint .. "/" .. params.index_name, + method = "PUT" + } + + if not self:send_data(broker.json_encode(self.elastic_index_template), metadata) then + self.sc_logger:error("[EventQueue:create_index_template]: Index template " .. tostring(params.index_name) .. " could not be created." + .. ". Error is: " .. tostring(self.elastic_result)) + return false + end + + self.sc_logger:notice("[EventQueue:create_index_template]: Index template " .. tostring(params.index_name) .. " successfully created") + return true +end + +function EventQueue:validate_index_template(params) + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[EventQueue:validate_index_template]: send_data_test is set to 1, ignoring template validation") + return true + end + + local index_template_structure, error = broker.json_decode(self.elastic_result) + + if error then + self.sc_logger:error("[EventQueue:validate_index_template]: Could not decode json: " .. tostring(self.elastic_result) .. ". Error message is: " .. tostring(error)) + return true + end + + local required_index_mapping_properties = { + "host_name", + "service_description", + "metric_value", + "metric_unit", + "metric_value", + "metric_instance", + "metric_subinstances" + } + + if params.add_hostgroups_dimension == 1 then + table.insert(required_index_mapping_properties, "host_groups") + end + + if params.add_servicegroups_dimension == 1 then + table.insert(required_index_mapping_properties, "service_groups") + end + + if params.add_min_max_dimension == 1 then + table.insert(required_index_mapping_properties, "metric_min") + table.insert(required_index_mapping_properties, "metric_max") + end + + if params.add_thresholds_dimension == 1 then + table.insert(required_index_mapping_properties, "metric_warning_low") + table.insert(required_index_mapping_properties, "metric_warning_high") + table.insert(required_index_mapping_properties, "metric_critical_low") + table.insert(required_index_mapping_properties, "metric_critical_high") + end + + -- can't get geo coords from cache nor event + --[[ + if params.add_geocoords_dimension == 1 then + table.insert(required_index_mapping_properties, "host.geocoords") + end + ]]-- + + if params.add_poller_dimension == 1 then + table.insert(required_index_mapping_properties, "poller") + end + + local return_code = true + local update_template = false + + -- this double for_loop is only doing two things: logging all the missing properties in the index template for the sake of verbosity + -- and change above flags + for _, index_information in ipairs(index_template_structure.index_templates) do + if index_information.name == params.index_name then + for _, index_mapping_property_name in pairs(required_index_mapping_properties) do + -- check if all the mappings are created in the index template + if not index_information.index_template.template.mappings.properties[index_mapping_property_name] then + if (params.update_datastream_index_template == 1 and index_information.index_template["_meta"].created_by_centreon) then + self.sc_logger:notice("[EventQueue:validate_index_template]: Elastic index template is not valid. Missing mapping property: " + .. tostring(index_mapping_property_name) .. ". Template is going to be automatically updated") + update_template = true + else + -- we do not return at the first missing property because we want to display all the missing one in one go instead. + self.sc_logger:error("[EventQueue:validate_index_template]: Elastic index template is not valid. Missing mapping property: " + .. tostring(index_mapping_property_name)) + return_code = false + end + end + end + end + end + + if update_template then + self.sc_logger:notice("[EventQueue:validate_index_template]: Going to update index template with the following structure: " .. self.sc_common:dumper(self.elastic_index_template)) + return_code = self:create_index_template(params) + end + + return return_code +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[EventQueue:format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric host") + self:add_generic_information(metric) + self:add_generic_optional_information(metric) + self:add() +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric service") + + self:add_generic_information(metric) + self.sc_event.event.formated_event["service_description"] = tostring(self.sc_event.event.cache.service.description) + self:add_generic_optional_information(metric) + self:add_service_optional_information() + self:add() +end + +function EventQueue:add_generic_information(metric) + local event = self.sc_event.event + self.sc_event.event.formated_event = { + ["@timestamp"] = event.last_check, + ["host_name"] = tostring(event.cache.host.name), + ["metric_name"] = tostring(metric.metric_name), + ["metric_value"] = metric.value, + ["metric_instance"] = metric.instance, + ["metric_subinstances"] = metric.subinstances, + ["metric_unit"] = metric.unit + } +end + +function EventQueue:add_generic_optional_information(metric) + local params = self.sc_event.params + local event = self.sc_event.event + + -- add hostgroups + if params.add_hostgroups_dimension == 1 then + local hostgroups = {} + + for _, hg_info in ipairs(event.cache.hostgroups) do + table.insert(hostgroups, hg_info.group_name) + end + + self.sc_event.event.formated_event["host_groups"] = hostgroups + end + + -- add poller + if params.add_poller_dimension == 1 then + self.sc_event.event.formated_event.poller = event.cache.poller + end + + -- add min and max + if params.add_min_max_dimension == 1 then + self.sc_event.event.formated_event.metric_min = self:handle_NaN(metric.min) + self.sc_event.event.formated_event.metric_max = self:handle_NaN(metric.max) + end + + -- add thresholds + if params.add_thresholds_dimension == 1 then + self.sc_event.event.formated_event.metric_warning_low = self:handle_NaN(metric.warning_low) + self.sc_event.event.formated_event.metric_warning_high = self:handle_NaN(metric.warning_high) + self.sc_event.event.formated_event.metric_critical_low = self:handle_NaN(metric.critical_low) + self.sc_event.event.formated_event.metric_critical_high = self:handle_NaN(metric.critical_high) + end +end + +function EventQueue:handle_NaN(value) + if value == value then + return value + end + + return nil +end + +function EventQueue:add_service_optional_information() + -- add servicegroups + if self.sc_params.params.add_servicegroups_dimension == 1 then + local servicegroups = {} + + for _, sg_info in ipairs(self.sc_event.event.cache.servicegroups) do + table.insert(servicegroups, sg_info.group_name) + end + + self.sc_event.event.formated_event["service_groups"] = servicegroups + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. "max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = '{"index":{}}\n' .. broker.json_encode(event) .. "\n" + else + payload = payload .. '{"index":{}}\n' .. broker.json_encode(event) .. "\n" + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local params = self.sc_params.params + local url = params.http_server_url .. queue_metadata.endpoint + queue_metadata.headers = { + "Content-type: application/json" + } + + if (params.elastic_username ~= "" and params.elastic_password ~= "") then + table.insert(queue_metadata.headers, "Authorization: Basic " .. mime.b64(params.elastic_username .. ":" .. params.elastic_password)) + end + + if payload or queue_metadata.method == "GET" then + -- write payload in the logfile for test purpose + if params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + return true + end + end + + self.sc_logger:info("[EventQueue:send_data]: Elastic address is: " .. tostring(url)) + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload, basic_auth) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, params.allow_insecure_connection) + :setopt(curl.OPT_SSL_VERIFYHOST, params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (params.proxy_address ~= '') then + if (params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, params.proxy_address .. ':' .. params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (params.proxy_username ~= '') then + if (params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, params.proxy_username .. ':' .. params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + if queue_metadata.method and queue_metadata.method == "PUT" then + http_request:setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + end + + -- adding the HTTP POST data + if payload then + http_request:setopt_postfields(payload) + end + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- the gsub function is here to fix a bug with the broker method json_decode that crashes when a value is null. Internal issue: MON-20481 + self.elastic_result = string.gsub(http_response_body, "null", "false") + local decoded_elastic_result, error_json = broker.json_decode(self.elastic_result) + + if error_json then + self.sc_logger:error("[EventQueue:send_data]: Couldn't decode json from elasticsearch. Error is: " .. tostring(error_json) + .. ". Received json is: " .. tostring(http_response_body) .. ". Sent data is: " .. tostring(payload)) + return false + end + + if (http_response_code == 200 and not decoded_elastic_result.errors) then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + return true + end + + + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body) .. ". Sent data is: " .. tostring(payload)) + return false +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + if queue.fail_message_counter <= 3 and queue.last_fail_message_date + 30 < os.time(os.date("*t")) then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + queue.last_fail_message_date = os.time(os.date("*t")) + queue.fail_message_counter = queue.fail_message_counter + 1 + elseif queue.fail_message_counter > 3 and queue.last_fail_message_date + 300 < os.time(os.date("*t")) then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set or elastic index is not valid") + queue.last_fail_message_date = os.time(os.date("*t")) + queue.fail_message_counter = queue.fail_message_counter + 1 + end + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua new file mode 100644 index 00000000000..238dfce1295 --- /dev/null +++ b/stream-connectors/centreon-certified/elasticsearch/elastic-neb-apiv1.lua @@ -0,0 +1,345 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Elasticsearch Connector +-- Tested with versions +-- 7.1.1 +-- +-- References: +-- https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an elasticsearch server +-- You can install one with docker: +-- docker pull elasticsearch +-- docker run -p 9200:9200 -p 9300:9300 -v $PWD:/var/lib/elasticsearch -d elasticsearch +-- You need to create two indices: +-- curl -X PUT "http://elasticsearch/centreon_metric" -H 'Content-Type: application/json' +-- -d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +-- "instance":{"type":"keyword"},"metric":{"type":"keyword"},"value":{"type":"double"}, +-- "min":{"type":"double"},"max":{"type":"double"},"uom":{"type":"text"}, +-- "type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}' +-- curl -X PUT "http://elasticsearch/centreon_status" -H 'Content-Type: application/json' +-- -d '{"mappings":{"properties":{"host":{"type":"keyword"},"service":{"type":"keyword"}, +-- "output":{"type":"text"},"status":{"type":"keyword"},"state":{"type":"keyword"}, +-- "type":{"type":"keyword"},"timestamp":{"type":"date","format":"epoch_second"}}}}'' +-- +-- The Lua-socket and Lua-sec libraries are required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl "http://elasticsearch/centreon_status/_search?pretty" +-------------------------------------------------------------------------------- + +local http = require("socket.http") +local https = require("ssl.https") +local ltn12 = require("ltn12") +local mime = require("mime") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_result_body = {} + local url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. + "/_bulk" + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + if raw_event.status then + http_post_data = http_post_data .. '{"index":{"_index":"' .. self.elastic_index_status .. '"}}\n' .. + broker.json_encode(raw_event) .. '\n' + end + if raw_event.metric then + http_post_data = http_post_data .. '{"index":{"_index":"' .. self.elastic_index_metric .. '"}}\n' .. + broker.json_encode(raw_event) .. '\n' + end + end + broker_log:info(2, "EventQueue:flush: HTTP POST url: \"" .. url .. "\"") + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + http.TIMEOUT = self.http_timeout + local req + if self.http_server_protocol == "http" then + req = http + else + req = https + end + local hr_result, hr_code, hr_header, hr_s = req.request{ + url = url, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-type"] = "application/x-ndjson", + ["content-length"] = string.len(http_post_data), + ["authorization"] = "Basic " .. (mime.b64(self.elastic_username .. ":" .. self.elastic_password)) + } + } + + -- Handling the return code + local retval = false + if hr_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. + " is \"" .. v .. "\"") + end + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +local previous_event = "" + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. + " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. + " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + +function EventQueue:add(e) + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + if current_event == previous_event then + broker_log:info(3, "EventQueue:add: Duplicate event ignored.") + return false + end + previous_event = current_event + + broker_log:info(3, "EventQueue:add: " .. current_event) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events == 1 then return false end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events == 1 then return false end + end + end + + if string.match(self.filter_type, "status") then + self.events[#self.events + 1] = { + timestamp = e.last_check, + host = hostname, + service = service_description, + output = string.match(e.output, "^(.*)\n"), + status = e.state, + state = e.state_type, + type = type + } + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: timestamp = " .. + self.events[#self.events].timestamp) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: host = " .. + self.events[#self.events].host) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: service = " .. + self.events[#self.events].service) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: output = " .. + self.events[#self.events].output) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: status = " .. + self.events[#self.events].status) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: status]: state = " .. + self.events[#self.events].state) + end + if string.match(self.filter_type, "metric") then + local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata, true) + if perfdata_err then + broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) + return false + end + + for m,v in pairs(perfdata) do + local instance = string.match(m, "(.*)#.*") + if not instance then + instance = "" + end + + local perfval = { + value = "", + min = "", + max = "", + uom = "" + } + for i,d in pairs(perfdata[m]) do + perfval[i] = d + end + self.events[#self.events + 1] = { + timestamp = e.last_check, + host = hostname, + service = service_description, + instance = instance, + metric = string.gsub(m, ".*#", ""), + value = perfval.value, + min = perfval.min, + max = perfval.max, + uom = perfval.uom, + type = type + } + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: timestamp = " .. + self.events[#self.events].timestamp) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: host = " .. + self.events[#self.events].host) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: service = " .. + self.events[#self.events].service) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: instance = " .. + self.events[#self.events].instance) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: metric = " .. + self.events[#self.events].metric) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: value = " .. + self.events[#self.events].value) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: min = " .. + self.events[#self.events].min) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: max = " .. + self.events[#self.events].max) + broker_log:info(3, "EventQueue:add: entry #" .. #self.events .. " [type: metric]: uom = " .. + self.events[#self.events].uom) + end + end + + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. + " elements.") + local retval = self:flush() + return retval + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + if #self.events > 0 then + broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. + (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + local retval = self:flush() + return retval + end + return false + else + return false + end +end + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_address = "", + http_server_port = 9200, + http_server_protocol = "http", + http_timeout = 5, + elastic_username = "", + elastic_password = "", + elastic_index_metric = "centreon_metric", + elastic_index_status = "centreon_status", + filter_type = "metric,status", + max_buffer_size = 5000, + max_buffer_age = 30, + log_level = 0, -- already proceeded in init function + log_path = "", -- already proceeded in init function + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == "elastic_password" then + v = string.gsub(v, ".", "*") + end + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 3 + local log_path = "/var/log/centreon-broker/stream-connector-elastic-neb.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + local retval = queue:add(e) + broker_log:info(3, "write: Ending write() function, returning " .. tostring(retval)) + -- return true to ask broker to clear its cache, false otherwise + return retval +end + +-- Fonction filter() +-- return true if category NEB and elements Host or Service +-- return false otherwise +function filter(category, element) + return category == 1 and (element == 14 or element == 24) +end diff --git a/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua new file mode 100644 index 00000000000..5ff02c7ec5b --- /dev/null +++ b/stream-connectors/centreon-certified/google/bigquery-events-apiv2.lua @@ -0,0 +1,431 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_oauth = require("centreon-stream-connectors-lib.google.auth.oauth") +local sc_bq = require("centreon-stream-connectors-lib.google.bigquery.bigquery") +local curl = require("cURL") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + [1] = "dataset", + [2] = "key_file_path", + [3] = "api_key", + [4] = "scope_list" + } + + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + params.accepted_categories = "neb,bam" + params.accepted_elements = "host_status,service_status,downtime,acknowledgement,ba_status" + self.sc_params.params.proxy_address = params.proxy_address + self.sc_params.params.proxy_port = params.proxy_port + self.sc_params.params.proxy_username = params.proxy_username + self.sc_params.params.proxy_password = params.proxy_password + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_params.params.__internal_ts_host_last_flush = os.time() + self.sc_params.params.__internal_ts_service_last_flush = os.time() + self.sc_params.params.__internal_ts_ack_last_flush = os.time() + self.sc_params.params.__internal_ts_dt_last_flush = os.time() + self.sc_params.params.__internal_ts_ba_last_flush = os.time() + + self.sc_params.params.host_table = params.host_table or "hosts" + self.sc_params.params.service_table = params.service_table or "services" + self.sc_params.params.ack_table = params.ack_table or "acknowledgements" + self.sc_params.params.downtime_table = params.downtime_table or "downtimes" + self.sc_params.params.ba_table = params.ba_table or "bas" + self.sc_params.params._sc_gbq_use_default_schemas = 1 + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + -- initiate EventQueue variables + self.events = { + [categories.neb.id] = {}, + [categories.bam.id] = {} + } + + self.events[categories.neb.id] = { + [elements.acknowledgement.id] = {}, + [elements.downtime.id] = {}, + [elements.host_status.id] = {}, + [elements.service_status.id] = {} + } + + self.events[categories.bam.id] = { + [elements.ba_status.id] = {} + } + + self.flush = { + [categories.neb.id] = {}, + [categories.bam.id] = {} + } + + self.flush[categories.neb.id] = { + [elements.acknowledgement.id] = function () return self:flush_ack() end, + [elements.downtime.id] = function () return self:flush_dt() end, + [elements.host_status.id] = function () return self:flush_host() end, + [elements.service_status.id] = function () return self:flush_service() end + } + + self.flush[categories.bam.id] = { + [elements.ba_status.id] = function () return self:flush_ba() end + } + + self.sc_params.params.google_bq_api_url = params.google_bq_api_url or "https://content-bigquery.googleapis.com/bigquery/v2" + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.sc_oauth = sc_oauth.new(self.sc_params.params, self.sc_common, self.sc_logger) -- , self.sc_common, self.sc_logger) + self.sc_bq = sc_bq.new(self.sc_params.params, self.sc_logger) + self.sc_bq:get_tables_schema() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + + self.sc_event.event.formated_event = {} + self.sc_event.event.formated_event.json = {} + + for column, value in pairs(self.sc_bq.schemas[self.sc_event.event.category][self.sc_event.event.element]) do + self.sc_event.event.formated_event.json[column] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + self.events[category][element][#self.events[category][element] + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored host events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_host () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_logger:debug("EventQueue:flush: Concatenating all the host events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.host_table) + + -- reset stored events list + self.events[categories.neb.id][elements.host_status.id] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_host_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored host events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_service () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_logger:debug("EventQueue:flush: Concatenating all the service events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.service_table) + + -- reset stored events list + self.events[categories.neb.id][elements.service_status.id] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_service_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored ack events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_ack () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_logger:debug("EventQueue:flush: Concatenating all the ack events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.ack_table) + + -- reset stored events list + self.events[categories.neb.id][elements.acknowledgement.id] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_ack_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored downtime events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_dt () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_logger:debug("EventQueue:flush: Concatenating all the downtime events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.downtime_table) + + -- reset stored events list + self.events[categories.neb.id][elements.downtime.id] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_dt_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored BA events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush_ba () + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.sc_logger:debug("EventQueue:flush: Concatenating all the BA events as one string") + + -- send stored events + retval = self:send_data(self.sc_params.params.ba_table) + + -- reset stored events list + self.events[categories.bam.id][elements.ba_status.id] = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_ba_last_flush = os.time() + + return retval +end + +function EventQueue:flush_old_queues() + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + local current_time = os.time() + + -- flush old ack events + if #self.events[categories.neb.id][elements.acknowledgement.id] > 0 and os.time() - self.sc_params.params.__internal_ts_ack_last_flush > self.sc_params.params.max_buffer_age then + self:flush_ack() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ack_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old downtime events + if #self.events[categories.neb.id][elements.downtime.id] > 0 and os.time() - self.sc_params.params.__internal_ts_dt_last_flush > self.sc_params.params.max_buffer_age then + self:flush_dt() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_dt_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old host events + if #self.events[categories.neb.id][elements.host_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_host_last_flush > self.sc_params.params.max_buffer_age then + self:flush_host() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_host_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old service events + if #self.events[categories.neb.id][elements.service_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_service_last_flush > self.sc_params.params.max_buffer_age then + self:flush_service() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_service_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end + + -- flush old BA events + if #self.events[categories.bam.id][elements.ba_status.id] > 0 and os.time() - self.sc_params.params.__internal_ts_ba_last_flush > self.sc_params.params.max_buffer_age then + self:flush_ba() + self.sc_logger:debug("write: Queue max age (" .. os.time() - self.sc_params.params.__internal_ts_ba_last_flush .. "/" .. self.sc_params.params.max_buffer_age .. ") is reached, flushing data") + end +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data (table_name) + local data = { + rows = {} + } + + -- concatenate all stored event in the data variable + for index, formated_event in ipairs(self.events[self.sc_event.event.category][self.sc_event.event.element]) do + data.rows[index] = formated_event + end + + self.sc_logger:info("EventQueue:send_data: creating json: " .. tostring(broker.json_encode(data))) + + -- output data to the tool we want + if self:call(broker.json_encode(data), table_name) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data, table_name) + local res = "" + local headers = { + "Authorization: Bearer " .. self.sc_oauth:get_access_token(), + "Content-Type: application/json" + } + local url = self.sc_params.params.google_bq_api_url .. "/projects/" .. self.sc_oauth.key_table.project_id .. "/datasets/" + .. self.sc_params.params.dataset .. "/tables/" .. table_name .. "/insertAll?alt=json&key=" .. self.sc_params.params.api_key + + -- initiate curl + local request = curl.easy() + :setopt_url(url) + :setopt_writefunction(function (response) + res = res .. response + end) + + -- add postfields url params + if data then + request:setopt_postfields(data) + end + + self.sc_logger:info("[EventQueue:call]: URL: " .. tostring(url)) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= "" and self.sc_params.params.proxy_address) then + if (self.sc_params.params.proxy_port ~= "" and self.sc_params.params.proxy_port) then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:call]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '' and self.sc_params.params.proxy_username) then + if (self.sc_params.params.proxy_password ~= '' and self.sc_params.params.proxy_username) then + request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:call]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- set up headers + request:setopt(curl.OPT_HTTPHEADER, headers) + + -- run query + request:perform() + self.sc_logger:info("EventQueue:call: sending data: " .. tostring(data)) + + local code = request:getinfo(curl.INFO_RESPONSE_CODE) + + if code ~= 200 then + self.sc_logger:error("[EventQueue:call]: http code is: " .. tostring(code) .. ". Result is: " ..tostring(res)) + end + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + queue:flush_old_queues() + + -- Then we check that the event queue is not already full + if (#queue.events[queue.sc_event.event.category][queue.sc_event.event.element] >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events[queue.sc_event.event.category][queue.sc_event.event.element] .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + queue.flush[queue.sc_event.event.category][queue.sc_event.event.element]() + end + + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events[queue.sc_event.event.category][queue.sc_event.event.element] >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events[queue.sc_event.event.category][queue.sc_event.event.element] .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + queue.flush[queue.sc_event.event.category][queue.sc_event.event.element]() + end + + return true +end diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua new file mode 100644 index 00000000000..879ff852942 --- /dev/null +++ b/stream-connectors/centreon-certified/influxdb/influxdb-metrics-apiv1.lua @@ -0,0 +1,174 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker InfluxDB Connector +-- Tested with versions +-- 1.4.3 +-- +-- References: +-- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ +-- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an influxdb server +-- You can install one with docker and these commands: +-- docker pull influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- You need to create a database +-- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" +-- +-- The Lua-socket library is required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" --data-urlencode "q=SELECT * from Cpu" +-------------------------------------------------------------------------------- + + +local http = require("socket.http") +local ltn12 = require("ltn12") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local event_queue = { + __internal_ts_last_flush = nil, + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "http", + events = {}, + influx_database = "mydb", + max_buffer_size = 5000, + max_buffer_age = 5 +} + +-- Constructor: event_queue:new +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + if self[i] and i ~= "events" and string.sub(i, 1, 11) ~= "__internal_" then + broker_log:info(1, "event_queue:new: getting parameter " .. i .. " => " .. v) + self[i] = v + else + broker_log:warning(1, "event_queue:new: ignoring parameter " .. i .. " => " .. v) + end + end + self.__internal_ts_last_flush = os.time() + broker_log:info(2, "event_queue:new: setting the internal timestamp to " .. self.__internal_ts_last_flush) + return o +end + +-- Method: event_queue:flush +-- Called when the max number of events or when the max age of buffer is reached +function event_queue:flush() + broker_log:info(2, "event_queue:flush: Concatenating all the events as one string") + -- we concatenate all the events + local http_post_data = "" + local http_result_body = {} + for i, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. raw_event + end + broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. "/write?db=" .. self.influx_database .. "\"") + broker_log:info(3, "event_queue:flush: HTTP POST data are: '" .. http_post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.http_server_protocol.."://"..self.http_server_address..":"..self.http_server_port.."/write?db="..self.influx_database, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(http_post_data) + } + } + -- Handling the return code + if hr_code == 204 then + broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: message line " .. i .. " is \"" .. v .. "\"") + end + end + + -- now that the data has been sent, we empty the events array + self.events = {} + -- and update the timestamp + self.__internal_ts_last_flush = os.time() +end + +-- Méthode event_queue:add +function event_queue:add(e) + local metric = e.name + -- time is a reserved word in influxDB so I rename it + if metric == "time" then + metric = "_"..metric + end + -- retrieve objects names instead of IDs + local host_name = broker_cache:get_hostname(e.host_id) + local service_description = broker_cache:get_service_description(e.host_id, e.service_id) + -- what if we could not get them from cache + if not host_name then + broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found. Restarting centengine should fix this.") + host_name = e.host_id + end + if not service_description then + broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found. Restarting centengine should fix this.") + service_description = e.service_id + end + -- we finally append the event to the events table + broker_log:info(3, "event_queue:add: adding \"" .. service_description..",host="..host_name.." "..metric.."="..e.value.." "..e.ctime.."000000000\" to event list.") + self.events[#self.events + 1] = service_description..",host="..host_name.." "..metric.."="..e.value.." "..e.ctime.."000000000\n" + + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "event_queue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + self:flush() + return true + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + broker_log:info(2, "event_queue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + self:flush() + return true + else + return false + end +end +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +-- Fonction init() +function init(conf) + broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector-influxdb.log") + broker_log:info(2, "init: Beginning init() function") + queue = event_queue:new(nil, conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function\n") + return true +end + +-- Fonction filter() +-- return true if you want to handle this type of event (category, element) +-- return false if you want to ignore them +function filter(category, element) + if category == 3 and element == 1 then + return true + end + return false +end + diff --git a/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua new file mode 100644 index 00000000000..74ee93bb33d --- /dev/null +++ b/stream-connectors/centreon-certified/influxdb/influxdb-neb-apiv1.lua @@ -0,0 +1,284 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker InfluxDB Connector +-- Tested with versions +-- 1.4.3, 1.7.4, 1.7.6 +-- +-- References: +-- https://docs.influxdata.com/influxdb/v1.4/write_protocols/line_protocol_tutorial/ +-- https://docs.influxdata.com/influxdb/v1.4/guides/writing_data/ +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- You need an influxdb server +-- You can install one with docker and these commands: +-- docker pull influxdb +-- docker run -p 8086:8086 -p 8083:8083 -v $PWD:/var/lib/influxdb -d influxdb +-- You need to create a database +-- curl http://:8086/query --data-urlencode "q=CREATE DATABASE mydb" +-- You can eventually create a retention policy +-- +-- The Lua-socket and Lua-sec libraries are required by this script. +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Access to the data: +-- curl -G 'http://:8086/query?pretty=true' --data-urlencode "db=mydb" +-- --data-urlencode "q=SELECT * from Cpu" +-------------------------------------------------------------------------------- + +local http = require("socket.http") +local https = require("ssl.https") +local ltn12 = require("ltn12") +local mime = require("mime") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- flush() method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + -- we concatenate all the events + local http_post_data = "" + local http_result_body = {} + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. raw_event + end + local url = self.http_server_protocol .. "://" .. self.http_server_address .. ":" .. self.http_server_port .. + "/write?db=" .. self.influx_database .. "&rp=" .. self.influx_retention_policy + broker_log:info(2, "EventQueue:flush: HTTP POST request \"" .. url .. "\"") + broker_log:info(3, "EventQueue:flush: HTTP POST data are: '" .. http_post_data .. "'") + http.TIMEOUT = self.http_timeout + local req + if self.http_server_protocol == "http" then + req = http + else + req = https + end + local hr_result, hr_code, hr_header, hr_s = req.request{ + url = url, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(http_result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(http_post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(http_post_data), + ["authorization"] = "Basic " .. (mime.b64(self.influx_username .. ":" .. self.influx_password)) + } + } + -- Handling the return code + local retval = false + if hr_code == 204 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. hr_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED: message line " .. i .. + " is \"" .. v .. "\"") + end + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add(e) + broker_log:info(3, "EventQueue:add: " .. broker.json_encode(e)) + -- let's get and verify we have perfdata + local perfdata, perfdata_err = broker.parse_perfdata(e.perfdata) + if perfdata_err then + broker_log:info(3, "EventQueue:add: No metric: " .. perfdata_err) + perfdata = {} + end + -- retrieve and store state for further processing + if self.skip_events_state == 0 and e.last_check ~= nil then + perfdata["centreon.state"] = e.state + perfdata["centreon.state_type"] = e.state_type + elseif perfdata_err then + return false + end + -- retrieve objects names instead of IDs + local host_name = broker_cache:get_hostname(e.host_id) + local service_description + if e.service_id then + service_description = broker_cache:get_service_description(e.host_id, e.service_id) + else + service_description = "host-latency" + end + -- what if we could not get them from cache + if not host_name then + broker_log:warning(1, "EventQueue:add: host_name for id " .. e.host_id .. + " not found. Restarting centengine should fix this.") + if self.skip_anon_events == 1 then + return false + end + host_name = e.host_id + end + if not service_description then + broker_log:warning(1, "EventQueue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. + " not found. Restarting centengine should fix this.") + if self.skip_anon_events == 1 then + return false + end + service_description = e.service_id + end + -- message format : [,=...] + -- =[,=...] [unix-nano-timestamp] + -- some characters [ ,=] must be escaped, let's replace them by the replacement_character for better handling + -- backslash at the end of a tag value is not supported, let's also replace it + -- consider last space in service_description as a separator for an item tag + local item = "" + if string.find(service_description, " [^ ]+$") then + item = string.gsub(service_description, ".* ", "", 1) + item = ",item=" .. string.gsub(string.gsub(item, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) + service_description = string.gsub(service_description, " +[^ ]+$", "", 1) + end + service_description = string.gsub(string.gsub(service_description, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) + -- define messages from perfata, transforming instance names to inst tags, which leads to one message per instance + -- consider new perfdata (dot-separated metric names) only (of course except for host-latency) + local instances = {} + local perfdate = e.last_check + for m,v in pairs(perfdata) do + local inst, metric = string.match(m, "(.+)#(.+)") + if not inst then + inst = "" + metric = m + else + inst = ",inst=" .. string.gsub(string.gsub(inst, "[ ,=]", self.replacement_character), "\\$", self.replacement_character) + end + if (not e.service_id and metric ~= "time") or string.match(metric, ".+[.].+") then + if not instances[inst] then + instances[inst] = self.measurement .. service_description .. ",host=" .. host_name .. item .. inst .. " " + end + instances[inst] = instances[inst] .. metric .. "=" .. v .. "," + elseif metric == "perfdate" then + perfdate = v + end + end + -- compute final messages to push + for _,v in pairs(instances) do + self.events[#self.events + 1] = v:sub(1, -2) .. " " .. perfdate .. "000000000" .. "\n" + broker_log:info(3, "EventQueue:add: adding " .. self.events[#self.events]:sub(1, -2)) + end + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + broker_log:info(2, "EventQueue:add: flushing because buffer size reached " .. self.max_buffer_size .. + " elements.") + local retval = self:flush() + return retval + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age then + broker_log:info(2, "EventQueue:add: flushing " .. #self.events .. " elements because buffer age reached " .. + (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + local retval = self:flush() + return retval + else + return false + end +end + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + measurement = "", + http_server_address = "", + http_server_port = 8086, + http_server_protocol = "https", + http_timeout = 5, + influx_database = "mydb", + influx_retention_policy = "", + influx_username = "", + influx_password = "", + max_buffer_size = 5000, + max_buffer_age = 30, + skip_anon_events = 1, + skip_events_state = 0, + replacement_character = "_", + log_level = 0, -- already proceeded in init function + log_path = "" -- already proceeded in init function + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == "influx_password" then + v = string.gsub(v, ".", "*") + end + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + if i == "measurement" then + retval[i] = retval[i] .. ",service=" + end + else + broker_log:warning(1, "EventQueue.new: ignoring parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 3 + local log_path = "/var/log/centreon-broker/stream-connector-influxdb-neb.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + local retval = queue:add(e) + broker_log:info(3, "write: Ending write() function, returning " .. tostring(retval)) + -- return true to ask broker to clear its cache, false otherwise + return retval +end + +-- Fonction filter() +-- return true if you want to handle this type of event (category, element) ; here category NEB and element +-- Host or Service +-- return false otherwise +function filter(category, element) + return category == 1 and (element == 14 or element == 24) +end diff --git a/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua new file mode 100644 index 00000000000..a0c61e1b756 --- /dev/null +++ b/stream-connectors/centreon-certified/influxdb/influxdb2-metrics-apiv2.lua @@ -0,0 +1,413 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker influxdb Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "bucket_id", + "bucket_api_key", + "org_name", + "http_server_url" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/infuxdb2-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.bucket_api_key = params.bucket_api_key + self.sc_params.params.bucket_id = params.bucket_id + self.sc_params.params.org_name = params.org_name + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.influxdb2_api_endpoint = params.influxdb2_api_endpoint or "/api/v2/write" + self.sc_params.params.influxdb2_precision = params.influxdb2_precision or "s" -- can be ms, s, us, ns [default] + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + -- according to https://docs.influxdata.com/influxdb/cloud/write-data/best-practices/optimize-writes/#batch-writes best practice is 5000 lines + self.sc_params.params.max_buffer_size = params.max_buffer_size or 5000 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + -- https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#special-characters + self.sc_params.params.metric_name_regex = params.metric_name_regex or "([, =])" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "\\%1" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_accepted_event method +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +--- escape_special_characters: escape influxdb2 characters according to https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#special-characters +-- @param string (string) the string that probably contains special characters +-- @return (string) the string with escaped special characters +function EventQueue:escape_special_characters(string) + local params = self.sc_params.params + return string.gsub(tostring(string), params.metric_name_regex, params.metric_replacement_character) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + local event = self.sc_event.event + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: start format_metric host") + self.sc_event.event.formated_event = metric.metric_name .. ",type=host," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric host") +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + local params = self.sc_params.params + self.sc_logger:debug("[EventQueue:format_metric_service]: start format_metric service") + self.sc_event.event.formated_event = metric.metric_name .. ",type=service,service.name=" + .. self:escape_special_characters(self.sc_event.event.cache.service.description) + .. "," .. self:build_generic_tags(metric) .. " value=" .. metric.value .. " " .. self.sc_event.event.last_check + self:add() + self.sc_logger:debug("[EventQueue:format_metric_service]: end format_metric service") +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_tags method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:build_generic_tags(metric) + local event = self.sc_event.event + local tags = 'host.name=' .. event.cache.host.name .. ',poller=' .. self:escape_special_characters(event.cache.poller) + + -- add metric instance in tags + if metric.instance ~= "" then + tags = tags .. ',metric.instance=' .. self:escape_special_characters(metric.instance) + end + + if metric.uom ~= "" then + tags = tags .. ',metric.unit=' .. metric.uom + end + + -- add metric subinstances in tags + if metric.subinstance[1] then + for subinstance_name, subinstance_value in ipairs(metric.subinstance) do + tags = tags .. ',' .. self.sc_common:trim(subinstance_name, "_") .. '=' .. self:escape_special_characters(subinstance_value) + end + end + + return tags +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = event + else + payload = payload .. "\n" .. event + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local params = self.sc_params.params + + local url = params.http_server_url .. tostring(params.influxdb2_api_endpoint) + .. "?bucket=" .. tostring(params.bucket_id) .. "&org=" .. tostring(params.org_name) + .. "&precision=" .. tostring(params.influxdb2_precision) + + queue_metadata.headers = { + "content-type: text/plain; charset=utf-8", + "accept: application/json", + "Authorization: Token " .. tostring(params.bucket_api_key) + } + + self.sc_logger:log_curl_command(url, queue_metadata, params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Influxdb address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- https://docs.influxdata.com/influxdb/cloud/api/#operation/PostWrite other than 204 is not good + if http_response_code == 204 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end \ No newline at end of file diff --git a/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua new file mode 100644 index 00000000000..05b0513b4ec --- /dev/null +++ b/stream-connectors/centreon-certified/kafka/kafka-events-apiv2.lua @@ -0,0 +1,310 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local kafka_config = require("centreon-stream-connectors-lib.rdkafka.config") +local kafka_producer = require("centreon-stream-connectors-lib.rdkafka.producer") +local kafka_topic_config = require("centreon-stream-connectors-lib.rdkafka.topic_config") +local kafka_topic = require("centreon-stream-connectors-lib.rdkafka.topic") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- listing madantory parameters + local mandatory_parameters = { + [1] = "topic", + [2] = "brokers" + } + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/kafka-stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + self.sc_kafka_config = kafka_config.new() + self.sc_kafka_topic_config = kafka_topic_config.new() + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.kafka_partition_ua = -1 + self.sc_params.params.topic = params.topic + self.sc_params.params.brokers = params.brokers + self.sc_params.params.centreon_name = params.centreon_name + + -- overriding default parameters for this stream connector + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- handle kafka params + self.sc_params:get_kafka_params(self.sc_kafka_config, params) + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- SEGFAULT ON EL8 (only usefull for debugging) + -- self.sc_kafka_config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) + -- self.sc_kafka_config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + + -- initiate a kafka producer + self.sc_kafka_producer = kafka_producer.new(self.sc_kafka_config) + + -- add kafka brokers to the producer + local kafka_brokers = self.sc_common:split(self.sc_params.params.brokers, ',') + for index, broker in ipairs(kafka_brokers) do + self.sc_kafka_producer:brokers_add(broker) + end + + -- add kafka topic config + self.sc_kafka_topic_config["auto.commit.enable"] = "true" + self.sc_kafka_topic = kafka_topic.new(self.sc_kafka_producer, self.sc_params.params.topic, self.sc_kafka_topic_config) + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_host_status() end, + [elements.service_status.id] = function () return self:format_service_status() end + }, + [categories.bam.id] = function () return self:format_ba_status() end + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + + return true +end + +function EventQueue:format_host_status() + self.sc_event.event.formated_event = { + host = tostring(self.sc_event.event.cache.host.name), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + output = self.sc_common:ifnil_or_empty(string.gsub(self.sc_event.event.output, '\\', "_"), "no output"), + } +end + +function EventQueue:format_service_status() + self.sc_event.event.formated_event = { + host = tostring(self.sc_event.event.cache.host.name), + service = tostring(self.sc_event.event.cache.service.description), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + output = self.sc_common:ifnil_or_empty(string.gsub(self.sc_event.event.output, '\\', "_"), "no output") + } +end + +function EventQueue:format_ba_status() + self.sc_event.event.formated_event = { + ba = tostring(self.sc_event.event.cache.ba.ba_name), + state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state] + } + +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end + + return payload +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data(payload, queue_metadata) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("EventQueue:send_data: creating json: " .. tostring(payload)) + + -- output data to the tool we want + if self:call(payload) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + self.sc_kafka_producer:produce(self.sc_kafka_topic, self.sc_params.params.kafka_partition_ua, data) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua new file mode 100644 index 00000000000..9e5c20d4235 --- /dev/null +++ b/stream-connectors/centreon-certified/logstash/logstash-events-apiv2.lua @@ -0,0 +1,345 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker logstash Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "port" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/logstash-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.http_server_url = params.http_server_url + self.sc_params.params.port = params.port + self.sc_params.params.username = params.username or "" + self.sc_params.params.password = params.password or "" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + title = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. ": " .. tostring(event.cache.host.name), + state = self.sc_params.params.status_mapping[event.category][event.element][event.state], + hostname = tostring(event.cache.host.name), + output = event.output, + poller = event.cache.poller, + event_timestamp = event.last_check + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + self.sc_event.event.formated_event = { + title = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. ": " .. tostring(event.cache.host.name) .. ", " .. tostring(event.cache.service.description), + state = self.sc_params.params.status_mapping[event.category][event.element][event.state], + hostname = tostring(event.cache.host.name), + service = tostring(event.cache.service.description), + output = event.output, + poller = event.cache.poller, + event_timestamp = event.last_check + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url .. ":" .. self.sc_params.params.port + queue_metadata.headers = {"accept: application/json"} + queue_metadata.method = "PUT" + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Logstash address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_CUSTOMREQUEST, queue_metadata.method) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + if (self.sc_params.params.username ~= '') then + if (self.sc_params.params.password ~= '') then + http_request:setopt(curl.OPT_USERPWD, self.sc_params.params.username .. ":" .. self.sc_params.params.password) + else + self.sc_logger:error("[EventQueue:send_data]: basic auth username is configured but no password has been provided") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua new file mode 100644 index 00000000000..91f22e49bc4 --- /dev/null +++ b/stream-connectors/centreon-certified/ndo/ndo-module-apiv1.lua @@ -0,0 +1,332 @@ +local ndo = {} + +ndo.api = { + NDO_API_PROTOVERSION = 2, + NDO_API_STARTCONFIGDUMP = 900, + NDO_API_ENDCONFIGDUMP = 901, + NDO_API_ENDDATA = 999, + NDO_API_ENDDATADUMP = 1000, + NDO_API_LOGENTRY = 100, + NDO_API_PROCESSDATA = 200, + NDO_API_TIMEDEVENTDATA = 201, + NDO_API_LOGDATA = 202, + NDO_API_SYSTEMCOMMANDDATA = 203, + NDO_API_EVENTHANDLERDATA = 204, + NDO_API_NOTIFICATIONDATA = 205, + NDO_API_SERVICECHECKDATA = 206, + NDO_API_HOSTCHECKDATA = 207, + NDO_API_COMMENTDATA = 208, + NDO_API_DOWNTIMEDATA = 209, + NDO_API_FLAPPINGDATA = 210, + NDO_API_PROGRAMSTATUSDATA = 211, + NDO_API_HOSTSTATUSDATA = 212, + NDO_API_SERVICESTATUSDATA = 213, + NDO_API_ADAPTIVEPROGRAMDATA = 214, + NDO_API_ADAPTIVEHOSTDATA = 215, + NDO_API_ADAPTIVESERVICEDATA = 216, + NDO_API_EXTERNALCOMMANDDATA = 217, + NDO_API_AGGREGATEDSTATUSDATA = 218, + NDO_API_RETENTIONDATA = 219, + NDO_API_CONTACTNOTIFICATIONDATA = 220, + NDO_API_CONTACTNOTIFICATIONMETHODDATA = 221, + NDO_API_ACKNOWLEDGEMENTDATA = 222, + NDO_API_STATECHANGEDATA = 223, + NDO_API_CONTACTSTATUSDATA = 224, + NDO_API_ADAPTIVECONTACTDATA = 225, + NDO_API_MAINCONFIGFILEVARIABLES = 300, + NDO_API_RESOURCECONFIGFILEVARIABLES = 301, + NDO_API_CONFIGVARIABLES = 302, + NDO_API_RUNTIMEVARIABLES = 303, + NDO_API_HOSTDEFINITION = 400, + NDO_API_HOSTGROUPDEFINITION = 401, + NDO_API_SERVICEDEFINITION = 402, + NDO_API_SERVICEGROUPDEFINITION = 403, + NDO_API_HOSTDEPENDENCYDEFINITION = 404, + NDO_API_SERVICEDEPENDENCYDEFINITION = 405, + NDO_API_HOSTESCALATIONDEFINITION = 406, + NDO_API_SERVICEESCALATIONDEFINITION = 407, + NDO_API_COMMANDDEFINITION = 408, + NDO_API_TIMEPERIODDEFINITION = 409, + NDO_API_CONTACTDEFINITION = 410, + NDO_API_CONTACTGROUPDEFINITION = 411, + NDO_API_HOSTEXTINFODEFINITION = 412, + NDO_API_SERVICEEXTINFODEFINITION = 413, + NDO_API_ACTIVEOBJECTSLIST = 414 +} + +ndo.data = { + NDO_DATA_NONE = 0, + NDO_DATA_TYPE = 1, + NDO_DATA_FLAGS = 2, + NDO_DATA_ATTRIBUTES = 3, + NDO_DATA_TIMESTAMP = 4, + NDO_DATA_ACKAUTHOR = 5, + NDO_DATA_ACKDATA = 6, + NDO_DATA_ACKNOWLEDGEMENTTYPE = 7, + NDO_DATA_ACTIVEHOSTCHECKSENABLED = 8, + NDO_DATA_ACTIVESERVICECHECKSENABLED = 9, + NDO_DATA_AUTHORNAME = 10, + NDO_DATA_CHECKCOMMAND = 11, + NDO_DATA_CHECKTYPE = 12, + NDO_DATA_COMMANDARGS = 13, + NDO_DATA_COMMANDLINE = 14, + NDO_DATA_COMMANDSTRING = 15, + NDO_DATA_COMMANDTYPE = 16, + NDO_DATA_COMMENT = 17, + NDO_DATA_COMMENTID = 18, + NDO_DATA_COMMENTTIME = 19, + NDO_DATA_COMMENTTYPE = 20, + NDO_DATA_CONFIGFILENAME = 21, + NDO_DATA_CONFIGFILEVARIABLE = 22, + NDO_DATA_CONFIGVARIABLE = 23, + NDO_DATA_CONTACTSNOTIFIED = 24, + NDO_DATA_CURRENTCHECKATTEMPT = 25, + NDO_DATA_CURRENTNOTIFICATIONNUMBER = 26, + NDO_DATA_CURRENTSTATE = 27, + NDO_DATA_DAEMONMODE = 28, + NDO_DATA_DOWNTIMEID = 29, + NDO_DATA_DOWNTIMETYPE = 30, + NDO_DATA_DURATION = 31, + NDO_DATA_EARLYTIMEOUT = 32, + NDO_DATA_ENDTIME = 33, + NDO_DATA_ENTRYTIME = 34, + NDO_DATA_ENTRYTYPE = 35, + NDO_DATA_ESCALATED = 36, + NDO_DATA_EVENTHANDLER = 37, + NDO_DATA_EVENTHANDLERENABLED = 38, + NDO_DATA_EVENTHANDLERSENABLED = 39, + NDO_DATA_EVENTHANDLERTYPE = 40, + NDO_DATA_EVENTTYPE = 41, + NDO_DATA_EXECUTIONTIME = 42, + NDO_DATA_EXPIRATIONTIME = 43, + NDO_DATA_EXPIRES = 44, + NDO_DATA_FAILUREPREDICTIONENABLED = 45, + NDO_DATA_FIXED = 46, + NDO_DATA_FLAPDETECTIONENABLED = 47, + NDO_DATA_FLAPPINGTYPE = 48, + NDO_DATA_GLOBALHOSTEVENTHANDLER = 49, + NDO_DATA_GLOBALSERVICEEVENTHANDLER = 50, + NDO_DATA_HASBEENCHECKED = 51, + NDO_DATA_HIGHTHRESHOLD = 52, + NDO_DATA_HOST = 53, + NDO_DATA_ISFLAPPING = 54, + NDO_DATA_LASTCOMMANDCHECK = 55, + NDO_DATA_LASTHARDSTATE = 56, + NDO_DATA_LASTHARDSTATECHANGE = 57, + NDO_DATA_LASTHOSTCHECK = 58, + NDO_DATA_LASTHOSTNOTIFICATION = 59, + NDO_DATA_LASTLOGROTATION = 60, + NDO_DATA_LASTSERVICECHECK = 61, + NDO_DATA_LASTSERVICENOTIFICATION = 62, + NDO_DATA_LASTSTATECHANGE = 63, + NDO_DATA_LASTTIMECRITICAL = 64, + NDO_DATA_LASTTIMEDOWN = 65, + NDO_DATA_LASTTIMEOK = 66, + NDO_DATA_LASTTIMEUNKNOWN = 67, + NDO_DATA_LASTTIMEUNREACHABLE = 68, + NDO_DATA_LASTTIMEUP = 69, + NDO_DATA_LASTTIMEWARNING = 70, + NDO_DATA_LATENCY = 71, + NDO_DATA_LOGENTRY = 72, + NDO_DATA_LOGENTRYTIME = 73, + NDO_DATA_LOGENTRYTYPE = 74, + NDO_DATA_LOWTHRESHOLD = 75, + NDO_DATA_MAXCHECKATTEMPTS = 76, + NDO_DATA_MODIFIEDHOSTATTRIBUTE = 77, + NDO_DATA_MODIFIEDHOSTATTRIBUTES = 78, + NDO_DATA_MODIFIEDSERVICEATTRIBUTE = 79, + NDO_DATA_MODIFIEDSERVICEATTRIBUTES = 80, + NDO_DATA_NEXTHOSTCHECK = 81, + NDO_DATA_NEXTHOSTNOTIFICATION = 82, + NDO_DATA_NEXTSERVICECHECK = 83, + NDO_DATA_NEXTSERVICENOTIFICATION = 84, + NDO_DATA_NOMORENOTIFICATIONS = 85, + NDO_DATA_NORMALCHECKINTERVAL = 86, + NDO_DATA_NOTIFICATIONREASON = 87, + NDO_DATA_NOTIFICATIONSENABLED = 88, + NDO_DATA_NOTIFICATIONTYPE = 89, + NDO_DATA_NOTIFYCONTACTS = 90, + NDO_DATA_OBSESSOVERHOST = 91, + NDO_DATA_OBSESSOVERHOSTS = 92, + NDO_DATA_OBSESSOVERSERVICE = 93, + NDO_DATA_OBSESSOVERSERVICES = 94, + NDO_DATA_OUTPUT = 95, + NDO_DATA_PASSIVEHOSTCHECKSENABLED = 96, + NDO_DATA_PASSIVESERVICECHECKSENABLED = 97, + NDO_DATA_PERCENTSTATECHANGE = 98, + NDO_DATA_PERFDATA = 99, + NDO_DATA_PERSISTENT = 100, + NDO_DATA_PROBLEMHASBEENACKNOWLEDGED = 101, + NDO_DATA_PROCESSID = 102, + NDO_DATA_PROCESSPERFORMANCEDATA = 103, + NDO_DATA_PROGRAMDATE = 104, + NDO_DATA_PROGRAMNAME = 105, + NDO_DATA_PROGRAMSTARTTIME = 106, + NDO_DATA_PROGRAMVERSION = 107, + NDO_DATA_RECURRING = 108, + NDO_DATA_RETRYCHECKINTERVAL = 109, + NDO_DATA_RETURNCODE = 110, + NDO_DATA_RUNTIME = 111, + NDO_DATA_RUNTIMEVARIABLE = 112, + NDO_DATA_SCHEDULEDDOWNTIMEDEPTH = 113, + NDO_DATA_SERVICE = 114, + NDO_DATA_SHOULDBESCHEDULED = 115, + NDO_DATA_SOURCE = 116, + NDO_DATA_STARTTIME = 117, + NDO_DATA_STATE = 118, + NDO_DATA_STATECHANGE = 119, + NDO_DATA_STATECHANGETYPE = 120, + NDO_DATA_STATETYPE = 121, + NDO_DATA_STICKY = 122, + NDO_DATA_TIMEOUT = 123, + NDO_DATA_TRIGGEREDBY = 124, + NDO_DATA_LONGOUTPUT = 125, + NDO_DATA_ACTIONURL = 126, + NDO_DATA_COMMANDNAME = 127, + NDO_DATA_CONTACTADDRESS = 128, + NDO_DATA_CONTACTALIAS = 129, + NDO_DATA_CONTACTGROUP = 130, + NDO_DATA_CONTACTGROUPALIAS = 131, + NDO_DATA_CONTACTGROUPMEMBER = 132, + NDO_DATA_CONTACTGROUPNAME = 133, + NDO_DATA_CONTACTNAME = 134, + NDO_DATA_DEPENDENCYTYPE = 135, + NDO_DATA_DEPENDENTHOSTNAME = 136, + NDO_DATA_DEPENDENTSERVICEDESCRIPTION = 137, + NDO_DATA_EMAILADDRESS = 138, + NDO_DATA_ESCALATEONCRITICAL = 139, + NDO_DATA_ESCALATEONDOWN = 140, + NDO_DATA_ESCALATEONRECOVERY = 141, + NDO_DATA_ESCALATEONUNKNOWN = 142, + NDO_DATA_ESCALATEONUNREACHABLE = 143, + NDO_DATA_ESCALATEONWARNING = 144, + NDO_DATA_ESCALATIONPERIOD = 145, + NDO_DATA_FAILONCRITICAL = 146, + NDO_DATA_FAILONDOWN = 147, + NDO_DATA_FAILONOK = 148, + NDO_DATA_FAILONUNKNOWN = 149, + NDO_DATA_FAILONUNREACHABLE = 150, + NDO_DATA_FAILONUP = 151, + NDO_DATA_FAILONWARNING = 152, + NDO_DATA_FIRSTNOTIFICATION = 153, + NDO_DATA_HAVE2DCOORDS = 154, + NDO_DATA_HAVE3DCOORDS = 155, + NDO_DATA_HIGHHOSTFLAPTHRESHOLD = 156, + NDO_DATA_HIGHSERVICEFLAPTHRESHOLD = 157, + NDO_DATA_HOSTADDRESS = 158, + NDO_DATA_HOSTALIAS = 159, + NDO_DATA_HOSTCHECKCOMMAND = 160, + NDO_DATA_HOSTCHECKINTERVAL = 161, + NDO_DATA_HOSTCHECKPERIOD = 162, + NDO_DATA_HOSTEVENTHANDLER = 163, + NDO_DATA_HOSTEVENTHANDLERENABLED = 164, + NDO_DATA_HOSTFAILUREPREDICTIONENABLED = 165, + NDO_DATA_HOSTFAILUREPREDICTIONOPTIONS = 166, + NDO_DATA_HOSTFLAPDETECTIONENABLED = 167, + NDO_DATA_HOSTFRESHNESSCHECKSENABLED = 168, + NDO_DATA_HOSTFRESHNESSTHRESHOLD = 169, + NDO_DATA_HOSTGROUPALIAS = 170, + NDO_DATA_HOSTGROUPMEMBER = 171, + NDO_DATA_HOSTGROUPNAME = 172, + NDO_DATA_HOSTMAXCHECKATTEMPTS = 173, + NDO_DATA_HOSTNAME = 174, + NDO_DATA_HOSTNOTIFICATIONCOMMAND = 175, + NDO_DATA_HOSTNOTIFICATIONINTERVAL = 176, + NDO_DATA_HOSTNOTIFICATIONPERIOD = 177, + NDO_DATA_HOSTNOTIFICATIONSENABLED = 178, + NDO_DATA_ICONIMAGE = 179, + NDO_DATA_ICONIMAGEALT = 180, + NDO_DATA_INHERITSPARENT = 181, + NDO_DATA_LASTNOTIFICATION = 182, + NDO_DATA_LOWHOSTFLAPTHRESHOLD = 183, + NDO_DATA_LOWSERVICEFLAPTHRESHOLD = 184, + NDO_DATA_MAXSERVICECHECKATTEMPTS = 185, + NDO_DATA_NOTES = 186, + NDO_DATA_NOTESURL = 187, + NDO_DATA_NOTIFICATIONINTERVAL = 188, + NDO_DATA_NOTIFYHOSTDOWN = 189, + NDO_DATA_NOTIFYHOSTFLAPPING = 190, + NDO_DATA_NOTIFYHOSTRECOVERY = 191, + NDO_DATA_NOTIFYHOSTUNREACHABLE = 192, + NDO_DATA_NOTIFYSERVICECRITICAL = 193, + NDO_DATA_NOTIFYSERVICEFLAPPING = 194, + NDO_DATA_NOTIFYSERVICERECOVERY = 195, + NDO_DATA_NOTIFYSERVICEUNKNOWN = 196, + NDO_DATA_NOTIFYSERVICEWARNING = 197, + NDO_DATA_PAGERADDRESS = 198, + NDO_DATA_PARALLELIZESERVICECHECK = 199, + NDO_DATA_PARENTHOST = 200, + NDO_DATA_PROCESSHOSTPERFORMANCEDATA = 201, + NDO_DATA_PROCESSSERVICEPERFORMANCEDATA = 202, + NDO_DATA_RETAINHOSTNONSTATUSINFORMATION = 203, + NDO_DATA_RETAINHOSTSTATUSINFORMATION = 204, + NDO_DATA_RETAINSERVICENONSTATUSINFORMATION = 205, + NDO_DATA_RETAINSERVICESTATUSINFORMATION = 206, + NDO_DATA_SERVICECHECKCOMMAND = 207, + NDO_DATA_SERVICECHECKINTERVAL = 208, + NDO_DATA_SERVICECHECKPERIOD = 209, + NDO_DATA_SERVICEDESCRIPTION = 210, + NDO_DATA_SERVICEEVENTHANDLER = 211, + NDO_DATA_SERVICEEVENTHANDLERENABLED = 212, + NDO_DATA_SERVICEFAILUREPREDICTIONENABLED = 213, + NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS = 214, + NDO_DATA_SERVICEFLAPDETECTIONENABLED = 215, + NDO_DATA_SERVICEFRESHNESSCHECKSENABLED = 216, + NDO_DATA_SERVICEFRESHNESSTHRESHOLD = 217, + NDO_DATA_SERVICEGROUPALIAS = 218, + NDO_DATA_SERVICEGROUPMEMBER = 219, + NDO_DATA_SERVICEGROUPNAME = 220, + NDO_DATA_SERVICEISVOLATILE = 221, + NDO_DATA_SERVICENOTIFICATIONCOMMAND = 222, + NDO_DATA_SERVICENOTIFICATIONINTERVAL = 223, + NDO_DATA_SERVICENOTIFICATIONPERIOD = 224, + NDO_DATA_SERVICENOTIFICATIONSENABLED = 225, + NDO_DATA_SERVICERETRYINTERVAL = 226, + NDO_DATA_SHOULDBEDRAWN = 227, + NDO_DATA_STALKHOSTONDOWN = 228, + NDO_DATA_STALKHOSTONUNREACHABLE = 229, + NDO_DATA_STALKHOSTONUP = 230, + NDO_DATA_STALKSERVICEONCRITICAL = 231, + NDO_DATA_STALKSERVICEONOK = 232, + NDO_DATA_STALKSERVICEONUNKNOWN = 233, + NDO_DATA_STALKSERVICEONWARNING = 234, + NDO_DATA_STATUSMAPIMAGE = 235, + NDO_DATA_TIMEPERIODALIAS = 236, + NDO_DATA_TIMEPERIODNAME = 237, + NDO_DATA_TIMERANGE = 238, + NDO_DATA_VRMLIMAGE = 239, + NDO_DATA_X2D = 240, + NDO_DATA_X3D = 241, + NDO_DATA_Y2D = 242, + NDO_DATA_Y3D = 243, + NDO_DATA_Z3D = 244, + NDO_DATA_CONFIGDUMPTYPE = 245, + NDO_DATA_FIRSTNOTIFICATIONDELAY = 246, + NDO_DATA_HOSTRETRYINTERVAL = 247, + NDO_DATA_NOTIFYHOSTDOWNTIME = 248, + NDO_DATA_NOTIFYSERVICEDOWNTIME = 249, + NDO_DATA_CANSUBMITCOMMANDS = 250, + NDO_DATA_FLAPDETECTIONONUP = 251, + NDO_DATA_FLAPDETECTIONONDOWN = 252, + NDO_DATA_FLAPDETECTIONONUNREACHABLE = 253, + NDO_DATA_FLAPDETECTIONONOK = 254, + NDO_DATA_FLAPDETECTIONONWARNING = 255, + NDO_DATA_FLAPDETECTIONONUNKNOWN = 256, + NDO_DATA_FLAPDETECTIONONCRITICAL = 257, + NDO_DATA_DISPLAYNAME = 258, + NDO_DATA_DEPENDENCYPERIOD = 259, + NDO_DATA_MODIFIEDCONTACTATTRIBUTE = 260, + NDO_DATA_MODIFIEDCONTACTATTRIBUTES = 261, + NDO_DATA_CUSTOMVARIABLE = 262, + NDO_DATA_HASBEENMODIFIED = 263, + NDO_DATA_CONTACT = 264, + NDO_DATA_LASTSTATE = 265, + NDO_DATA_INSTANCE = 266, + NDO_DATA_HOSTID = 267, + NDO_DATA_SERVICEID = 268, + NDO_DATA_LASTUPDATE = 269, + NDO_DATA_ACTUALENDTIME = 270, + NDO_DATA_ACTUALSTARTTIME = 271 +} + +return ndo diff --git a/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua b/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua new file mode 100644 index 00000000000..40a1849d2a1 --- /dev/null +++ b/stream-connectors/centreon-certified/ndo/ndo-output-apiv1.lua @@ -0,0 +1,723 @@ +local socket = require "socket" + +-- Specifying where is the module to load +package.path = package.path .. ";/usr/share/centreon-broker/lua/ndo-module.lua" +local NDO = require "ndo" + +local ndo = { + [65537] = { + id = 1, + ndo_api_id = NDO.api.NDO_API_ACKNOWLEDGEMENTDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "comment_data" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_TIMESTAMP, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_STICKY, tag = "sticky" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYCONTACTS, tag = "notify_contacts" }, + { ndo_data = NDO.data.NDO_DATA_PERSISTENT, tag = "persistent_comment" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + } + }, + [65538] = { + id = 2, + ndo_api_id = NDO.api.NDO_API_COMMENTDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTYPE, tag = "entry_type" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "expire_time" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRES, tag = "expires" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTID, tag = "internal_id" }, + { ndo_data = NDO.data.NDO_DATA_PERSISTENT, tag = "persistent" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_SOURCE, tag = "source" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "data" }, + } + }, + [65539] = { + id = 3, + ndo_api_id = NDO.api.NDO_API_RUNTIMEVARIABLES, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENMODIFIED, tag = "modified" }, + { ndo_data = NDO.data.NDO_DATA_CONFIGFILENAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "update_time" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "value" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "default_value" }, + } + }, + [65540] = { + id = 4, + ndo_api_id = NDO.api.NDO_API_CONFIGVARIABLES, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENMODIFIED, tag = "modified" }, + { ndo_data = NDO.data.NDO_DATA_CONFIGFILENAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "update_time" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "value" }, + } + }, + [65541] = { + id = 5, + ndo_api_id = NDO.api.NDO_API_DOWNTIMEDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACTUALENDTIME, tag = "actual_end_time" }, + { ndo_data = NDO.data.NDO_DATA_ACTUALSTARTTIME, tag = "actual_start_time" }, + { ndo_data = NDO.data.NDO_DATA_AUTHORNAME, tag = "author" }, + { ndo_data = NDO.data.NDO_DATA_DOWNTIMETYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_EXPIRATIONTIME, tag = "deletion_time" }, + { ndo_data = NDO.data.NDO_DATA_DURATION, tag = "duration" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "entry_time" }, + { ndo_data = NDO.data.NDO_DATA_FIXED, tag = "fixed" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_DOWNTIMEID, tag = "internal_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_TRIGGEREDBY, tag = "triggered_by" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "cancelled" }, + { ndo_data = NDO.data.NDO_DATA_Y3D, tag = "started" }, + { ndo_data = NDO.data.NDO_DATA_COMMENT, tag = "comment_data" }, + } + }, + [65542] = { + id = 6, + ndo_api_id = NDO.api.NDO_API_EVENTHANDLERDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_EARLYTIMEOUT, tag = "early_timeout" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_RETURNCODE, tag = "return_code" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_TIMEOUT, tag = "timeout" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDARGS, tag = "command_args" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + } + }, + [65543] = { + id = 7, + ndo_api_id = NDO.api.NDO_API_FLAPPINGDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_COMMENTTIME, tag = "comment_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTIME, tag = "event_time" }, + { ndo_data = NDO.data.NDO_DATA_ENTRYTYPE, tag = "event_type" }, + { ndo_data = NDO.data.NDO_DATA_TYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_HIGHTHRESHOLD, tag = "high_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMENTID, tag = "internal_comment_id" }, + { ndo_data = NDO.data.NDO_DATA_LOWTHRESHOLD, tag = "low_threshold" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONREASON, tag = "reason_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + } + }, + [65544] = { + id = 8, + ndo_api_id = NDO.api.NDO_API_HOSTDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIONURL, tag = "action_url" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_HOSTADDRESS, tag = "address" }, + { ndo_data = NDO.data.NDO_DATA_HOSTALIAS, tag = "alias" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSCHECKSENABLED, tag = "check_freshness" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTCHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = 0, tag = "default_active_checks" }, + { ndo_data = 0, tag = "default_event_handler_enabled" }, + { ndo_data = 0, tag = "default_failure_prediction" }, + { ndo_data = 0, tag = "default_flap_detection" }, + { ndo_data = 0, tag = "default_notify" }, + { ndo_data = 0, tag = "default_passive_checks" }, + { ndo_data = 0, tag = "default_process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_DISPLAYNAME, tag = "display_name" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FIRSTNOTIFICATIONDELAY, tag = "first_notification_delay" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONDOWN, tag = "flap_detection_on_down" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUNREACHABLE, tag = "flap_detection_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUP, tag = "flap_detection_on_up" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSTHRESHOLD, tag = "freshness_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HIGHHOSTFLAPTHRESHOLD, tag = "high_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGE, tag = "icon_image" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGEALT, tag = "icon_image_alt" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTCHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTNOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEDOWN, tag = "last_time_down" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNREACHABLE, tag = "last_time_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUP, tag = "last_time_up" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_LOWHOSTFLAPTHRESHOLD, tag = "low_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTCHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTNOTIFICATION, tag = "next_host_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTES, tag = "notes" }, + { ndo_data = NDO.data.NDO_DATA_NOTESURL, tag = "notes_url" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONINTERVAL, tag = "notification_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONPERIOD, tag = "notification_period" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTDOWN, tag = "notify_on_down" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTDOWNTIME, tag = "notify_on_downtime" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTFLAPPING, tag = "notify_on_flapping" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTRECOVERY, tag = "notify_on_recovery" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYHOSTUNREACHABLE, tag = "notify_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_host" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETAINHOSTNONSTATUSINFORMATION, tag = "retain_nonstatus_information" }, + { ndo_data = NDO.data.NDO_DATA_RETAINHOSTSTATUSINFORMATION, tag = "retain_status_information" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONDOWN, tag = "stalk_on_down" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONUNREACHABLE, tag = "stalk_on_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_STALKHOSTONUP, tag = "stalk_on_up" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_STATUSMAPIMAGE, tag = "statusmap_image" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65545] = { + id = 9, + ndo_api_id = NDO.api.NDO_API_HOSTCHECKDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + } + }, + [65546] = { + id = 10, + ndo_api_id = NDO.api.NDO_API_HOSTDEPENDENCYDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_DEPENDENCYPERIOD, tag = "dependency_period" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTHOSTNAME, tag = "dependent_host_id" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFAILUREPREDICTIONOPTIONS, tag = "execution_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_INHERITSPARENT, tag = "inherits_parent" }, + { ndo_data = NDO.data.NDO_DATA_HOSTNOTIFICATIONCOMMAND, tag = "notification_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + } + }, + [65547] = { + id = 11, + ndo_api_id = NDO.api.NDO_API_HOSTGROUPDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_HOSTID, tag = "hostgroup_id" }, + } + }, + [65548] = { + id = 12, + ndo_api_id = NDO.api.NDO_API_HOSTGROUPMEMBERDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_HOSTGROUPNAME, tag = "hostgroup_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "host_id" }, + } + }, + [65549] = { + id = 13, + ndo_api_id = NDO.api.NDO_API_HOSTPARENT, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "child_id" }, + { ndo_data = NDO.data.NDO_DATA_PARENTHOST, tag = "parent_id" }, + } + }, + [65550] = { + id = 14, + ndo_api_id = NDO.api.NDO_API_HOSTSTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_HOSTCHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTCHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTHOSTNOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEDOWN, tag = "last_time_down" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNREACHABLE, tag = "last_time_unreachable" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUP, tag = "last_time_up" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTCHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTHOSTNOTIFICATION, tag = "next_host_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_host" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65551] = { + id = 15, + ndo_api_id = NDO.api.NDO_API_PROCESSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "engine" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMNAME, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_RUNTIME, tag = "running" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSID, tag = "pid" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMSTARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_PROGRAMVERSION, tag = "version" }, + } + }, + [65552] = { + id = 16, + ndo_api_id = NDO.api.NDO_API_PROGRAMSTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_ACTIVEHOSTCHECKSENABLED, tag = "active_host_checks" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_service_checks" }, + { ndo_data = NDO.data.NDO_DATA_HOSTADDRESS, tag = "address" }, + { ndo_data = NDO.data.NDO_DATA_HOSTFRESHNESSCHECKSENABLED, tag = "check_hosts_freshness" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSCHECKSENABLED, tag = "check_services_freshness" }, + { ndo_data = NDO.data.NDO_DATA_DAEMONMODE, tag = "daemon_mode" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "description" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handlers" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATE, tag = "last_alive" }, + { ndo_data = NDO.data.NDO_DATA_LASTCOMMANDCHECK, tag = "last_command_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTLOGROTATION, tag = "last_log_rotation" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDHOSTATTRIBUTES, tag = "modified_host_attributes" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_service_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notifications" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERHOST, tag = "obsess_over_hosts" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_services" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVEHOSTCHECKSENABLED, tag = "passive_host_checks" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_service_checks" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_GLOBALHOSTEVENTHANDLER, tag = "global_host_event_handler" }, + { ndo_data = NDO.data.NDO_DATA_GLOBALSERVICEEVENTHANDLER, tag = "global_service_event_handler" }, + } + }, + [65553] = { + id = 17, + ndo_api_id = NDO.api.NDO_API_COMMANDDEFINITION, + key = { + { ndo_data = 1, tag = "args" }, + { ndo_data = 2, tag = "filename" }, + { ndo_data = 3, tag = "instance_id" }, + { ndo_data = 4, tag = "loaded" }, + { ndo_data = 5, tag = "should_be_loaded" }, + } + }, + [65554] = { + id = 18, + ndo_api_id = NDO.api.NDO_API_NOTIFICATIONDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_CONTACTSNOTIFIED, tag = "contacts_notified" }, + { ndo_data = NDO.data.NDO_DATA_ENDTIME, tag = "end_time" }, + { ndo_data = NDO.data.NDO_DATA_ESCALATED, tag = "escalated" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONTYPE, tag = "type" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONREASON, tag = "reason_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_STARTTIME, tag = "start_time" }, + { ndo_data = NDO.data.NDO_DATA_STATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_ACKAUTHOR, tag = "ack_author" }, + { ndo_data = NDO.data.NDO_DATA_ACKDATA, tag = "ack_data" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDNAME, tag = "command_name" }, + { ndo_data = NDO.data.NDO_DATA_CONTACTNAME, tag = "contact_name" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + } + }, + [65555] = { + id = 19, + ndo_api_id = NDO.api.NDO_API_SERVICEDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIONURL, tag = "action_url" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSCHECKSENABLED, tag = "check_freshness" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_SERVICECHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = 0, tag = "default_active_checks" }, + { ndo_data = 0, tag = "default_event_handler_enabled" }, + { ndo_data = 0, tag = "default_failure_prediction" }, + { ndo_data = 0, tag = "default_flap_detection" }, + { ndo_data = 0, tag = "default_notify" }, + { ndo_data = 0, tag = "default_passive_checks" }, + { ndo_data = 0, tag = "default_process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_DISPLAYNAME, tag = "display_name" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS, tag = "failure_prediction_options" }, + { ndo_data = NDO.data.NDO_DATA_FIRSTNOTIFICATIONDELAY, tag = "first_notification_delay" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONCRITICAL, tag = "flap_detection_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONOK, tag = "flap_detection_on_ok" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONUNKNOWN, tag = "flap_detection_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONONWARNING, tag = "flap_detection_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFRESHNESSTHRESHOLD, tag = "freshness_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HIGHSERVICEFLAPTHRESHOLD, tag = "high_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGE, tag = "icon_image" }, + { ndo_data = NDO.data.NDO_DATA_ICONIMAGEALT, tag = "icon_image_alt" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEISVOLATILE, tag = "volatile" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICECHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICENOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMECRITICAL, tag = "last_time_critical" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEOK, tag = "last_time_ok" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNKNOWN, tag = "last_time_unknown" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEWARNING, tag = "last_time_warning" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_LOWSERVICEFLAPTHRESHOLD, tag = "low_flap_threshold" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICECHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICENOTIFICATION, tag = "next_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NOTES, tag = "notes" }, + { ndo_data = NDO.data.NDO_DATA_NOTESURL, tag = "notes_url" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONINTERVAL, tag = "notification_interval" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONPERIOD, tag = "notification_period" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICECRITICAL, tag = "notify_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEDOWNTIME, tag = "notify_on_downtime" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEFLAPPING, tag = "notify_on_flapping" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICERECOVERY, tag = "notify_on_recovery" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEUNKNOWN, tag = "notify_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFYSERVICEWARNING, tag = "notify_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_service" }, + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_RETAINSERVICENONSTATUSINFORMATION, tag = "retain_nonstatus_information" }, + { ndo_data = NDO.data.NDO_DATA_RETAINSERVICESTATUSINFORMATION, tag = "retain_status_information" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "description" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONCRITICAL, tag = "stalk_on_critical" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONOK, tag = "stalk_on_ok" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONUNKNOWN, tag = "stalk_on_unknown" }, + { ndo_data = NDO.data.NDO_DATA_STALKSERVICEONWARNING, tag = "stalk_on_warning" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + } + }, + [65556] = { + id = 20, + ndo_api_id = NDO.api.NDO_API_SERVICECHECKDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_COMMANDLINE, tag = "command_line" }, + } + }, + [65557] = { + id = 21, + ndo_api_id = NDO.api.NDO_API_SERVICEDEPENDENCYDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_DEPENDENCYPERIOD, tag = "dependency_period" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTHOSTNAME, tag = "dependent_host_id" }, + { ndo_data = NDO.data.NDO_DATA_DEPENDENTSERVICEDESCRIPTION, tag = "dependent_service_id" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEFAILUREPREDICTIONOPTIONS, tag = "execution_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INHERITSPARENT, tag = "inherits_parent" }, + { ndo_data = NDO.data.NDO_DATA_SERVICENOTIFICATIONCOMMAND, tag = "notification_failure_options" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + } + }, + [65558] = { + id = 22, + ndo_api_id = NDO.api.NDO_API_SERVICEGROUPDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "name" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEID, tag = "servicegroup_id" }, + } + }, + [65559] = { + id = 23, + ndo_api_id = NDO.api.NDO_API_SERVICEGROUPMEMBERDEFINITION, + key = { + { ndo_data = NDO.data.NDO_DATA_SERVICEGROUPNAME, tag = "servicegroup_id" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "service_id" }, + } + }, + [65560] = { + id = 24, + ndo_api_id = NDO.api.NDO_API_SERVICESTATUSDATA, + key = { + { ndo_data = NDO.data.NDO_DATA_PASSIVESERVICECHECKSENABLED, tag = "passive_checks" }, + { ndo_data = NDO.data.NDO_DATA_PERCENTSTATECHANGE, tag = "percent_state_change" }, + { ndo_data = NDO.data.NDO_DATA_PERFDATA, tag = "perfdata" }, + { ndo_data = NDO.data.NDO_DATA_PROBLEMHASBEENACKNOWLEDGED, tag = "acknowledged" }, + { ndo_data = NDO.data.NDO_DATA_PROCESSPERFORMANCEDATA, tag = "process_perfdata" }, + { ndo_data = NDO.data.NDO_DATA_ACKNOWLEDGEMENTTYPE, tag = "acknowledgement_type" }, + { ndo_data = NDO.data.NDO_DATA_ACTIVESERVICECHECKSENABLED, tag = "active_checks" }, + { ndo_data = NDO.data.NDO_DATA_CHECKCOMMAND, tag = "check_command" }, + { ndo_data = NDO.data.NDO_DATA_RETRYCHECKINTERVAL, tag = "retry_interval" }, + { ndo_data = NDO.data.NDO_DATA_CHECKTYPE, tag = "check_type" }, + { ndo_data = NDO.data.NDO_DATA_SERVICECHECKPERIOD, tag = "check_period" }, + { ndo_data = NDO.data.NDO_DATA_SERVICEDESCRIPTION, tag = "service_description" }, + { ndo_data = NDO.data.NDO_DATA_SCHEDULEDDOWNTIMEDEPTH, tag = "scheduled_downtime_depth" }, + { ndo_data = NDO.data.NDO_DATA_SERVICE, tag = "service_id" }, + { ndo_data = NDO.data.NDO_DATA_SHOULDBESCHEDULED, tag = "should_be_scheduled" }, + { ndo_data = NDO.data.NDO_DATA_STATETYPE, tag = "state_type" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTCHECKATTEMPT, tag = "check_attempt" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTNOTIFICATIONNUMBER, tag = "notification_number" }, + { ndo_data = NDO.data.NDO_DATA_CURRENTSTATE, tag = "state" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLER, tag = "event_handler" }, + { ndo_data = NDO.data.NDO_DATA_EVENTHANDLERENABLED, tag = "event_handler_enabled" }, + { ndo_data = NDO.data.NDO_DATA_EXECUTIONTIME, tag = "execution_time" }, + { ndo_data = NDO.data.NDO_DATA_FAILUREPREDICTIONENABLED, tag = "failure_prediction" }, + { ndo_data = NDO.data.NDO_DATA_X3D, tag = "enabled" }, + { ndo_data = NDO.data.NDO_DATA_FLAPDETECTIONENABLED, tag = "flap_detection" }, + { ndo_data = NDO.data.NDO_DATA_HASBEENCHECKED, tag = "checked" }, + { ndo_data = NDO.data.NDO_DATA_HOST, tag = "host_id" }, + { ndo_data = NDO.data.NDO_DATA_ISFLAPPING, tag = "flapping" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATE, tag = "last_hard_state" }, + { ndo_data = NDO.data.NDO_DATA_LASTHARDSTATECHANGE, tag = "last_hard_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICECHECK, tag = "last_check" }, + { ndo_data = NDO.data.NDO_DATA_LASTSERVICENOTIFICATION, tag = "last_notification" }, + { ndo_data = NDO.data.NDO_DATA_LASTSTATECHANGE, tag = "last_state_change" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMECRITICAL, tag = "last_time_critical" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEOK, tag = "last_time_ok" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEUNKNOWN, tag = "last_time_unknown" }, + { ndo_data = NDO.data.NDO_DATA_LASTTIMEWARNING, tag = "last_time_warning" }, + { ndo_data = NDO.data.NDO_DATA_LATENCY, tag = "latency" }, + { ndo_data = NDO.data.NDO_DATA_INSTANCE, tag = "instance_id" }, + { ndo_data = NDO.data.NDO_DATA_HOSTID, tag = "hostname" }, + { ndo_data = NDO.data.NDO_DATA_LASTUPDATE, tag = "last_update" }, + { ndo_data = NDO.data.NDO_DATA_MAXCHECKATTEMPTS, tag = "max_check_attempts" }, + { ndo_data = NDO.data.NDO_DATA_MODIFIEDSERVICEATTRIBUTES, tag = "modified_attributes" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICECHECK, tag = "next_check" }, + { ndo_data = NDO.data.NDO_DATA_NEXTSERVICENOTIFICATION, tag = "next_notification" }, + { ndo_data = NDO.data.NDO_DATA_NOMORENOTIFICATIONS, tag = "no_more_notifications" }, + { ndo_data = NDO.data.NDO_DATA_NORMALCHECKINTERVAL, tag = "check_interval" }, + { ndo_data = NDO.data.NDO_DATA_NOTIFICATIONSENABLED, tag = "notify" }, + { ndo_data = NDO.data.NDO_DATA_OBSESSOVERSERVICE, tag = "obsess_over_service" }, + { ndo_data = NDO.data.NDO_DATA_OUTPUT, tag = "output" } + } + } +} + +local function join(tab) + table.sort(tab) + +end + +local custom_output = { + hostname = function (ndo_data, d) + return ndo_data .. "=" .. tostring(broker_cache:get_hostname(d.host_id)) .. "\n" + end, + process_perfdata = function (ndo_data, d) + return ndo_data .. "=1\n" + end, + service_description = function (ndo_data, d) + return ndo_data .. "=" .. tostring(broker_cache:get_service_description(d.host_id, d.service_id)) .. "\n" + end, + default = function (ndo_data, d) + return ndo_data .. "=0\n" + end, +} + +-- Obsolete things and some initializations +custom_output.last_notification = custom_output.default +custom_output.last_time_ok = custom_output.default +custom_output.last_time_warning = custom_output.default +custom_output.last_time_critical = custom_output.default +custom_output.last_time_unknown = custom_output.default +custom_output.next_notification = custom_output.default +custom_output.modified_attributes = custom_output.default +custom_output.failure_prediction = custom_output.default +custom_output.instance_id = custom_output.default + +local function get_ndo_msg(d) + local t = d.type + if ndo[t] then + local output = "\n" .. ndo[t].ndo_api_id .. ":\n" + local key = ndo[t].key + for i,v in ipairs(key) do + if d[v.tag] ~= nil then + local value = d[v.tag] + if type(value) == "boolean" then + if value then value = "1" else value = "0" end + end + value = tostring(value):gsub("\n", "\\n") + output = output .. v.ndo_data .. "=" .. tostring(value) .. "\n" + else + if custom_output[v.tag] then + output = output .. custom_output[v.tag](v.ndo_data, d) + else + output = output .. tostring(v.ndo_data) .. "(index=" .. i .. ") =UNKNOWN (" .. v.tag .. ")\n" + broker_log:warning(1, "The event does not contain an item " .. v.tag) + end + end + end + output = output .. NDO.api.NDO_API_ENDDATA .. "\n\n" + return output + else + return nil + end +end + +local data = { + max_row = 1, + rows = {} +} + +local function connect() + data.socket, err = socket.connect(data.ipaddr, data.port) + if not data.socket then + local msg = "Unable to establish connection on server " .. data.ipaddr .. ":" .. data.port .. ": " .. err + broker_log:error(1, msg) + end +end + +-------------------------------------------------------------------------------- +-- Initialization of the module +-- @param conf A table containing data entered by the user through the GUI +-------------------------------------------------------------------------------- +function init(conf) + -- broker_ndo initialization + broker_log:set_parameters(1, '/var/log/centreon-broker/ndo-output.log') + if conf['ipaddr'] and conf['ipaddr'] ~= "" then + data.ipaddr = conf['ipaddr'] + else + error("Unable to find the 'ipaddr' value of type 'string'") + end + + if conf['port'] and conf['port'] ~= "" then + data.port = conf['port'] + else + error("Unable to find the 'port' value of type 'number'") + end + + if conf['max-row'] then + data.max_row = conf['max-row'] + else + error("Unable to find the 'max-row' value of type 'number'") + end + connect() +end + +-------------------------------------------------------------------------------- +-- Called when the data limit count is reached. +-------------------------------------------------------------------------------- +local function flush() + if #data.rows > 0 then + if not data.socket then + connect() + end + if data.socket then + for k,v in ipairs(data.rows) do + local msg = get_ndo_msg(v) + if msg then + local l, err = data.socket:send(msg) + if not l then + broker_log:error(2, "Unable to send data to socket :" .. err) + data.socket = nil + end + else + broker_log:info(1, "Unable to write event of cat " .. v.category .. " elem " .. v.element) + end + end + data.rows = {} + end + end + return true +end + +-------------------------------------------------------------------------------- +-- Function attached to the write event. +-------------------------------------------------------------------------------- +function write(d) + if d.category ~= 1 or d.element ~= 24 then + return true + end + data.rows[#data.rows + 1] = d + + if #data.rows >= data.max_row then + return flush() + end + return true +end + diff --git a/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua new file mode 100644 index 00000000000..38b77169d53 --- /dev/null +++ b/stream-connectors/centreon-certified/omi/omi_connector-apiv1.lua @@ -0,0 +1,151 @@ +-- +-- Copyright 2018 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- ipaddr (string): the ip address of the operation connector server +-- url (string): url of the operation connector endpoint +-- logfile (string): the log file to use +-- loglevel (number): th log level (0, 1, 2, 3) where 3 is the maximum level +-- port (number): the operation connector server port +-- max_size (number): how many events to store before sending them to the server. +-- max_age (number): flush the events when the specified time (in second) is reach (even if max_size is not reach). + +local http = require("socket.http") +local ltn12 = require("ltn12") + +--default values overwrite if set in the GUI (Broker stream connector output configuration) +local my_data = { + source_ci = "Centreon", + ipaddr = "192.168.56.15", + url = "/bsmc/rest/events/opscx-sdk/v1/", + logfile = "/var/log/centreon-broker/omi_connector.log", + loglevel = 2, --set log level (0, 1, 2, 3) where 3 is the maximum level + port = 30005, + max_size = 5, + max_age = 60, + flush_time = os.time(), + data = {} +} + +--initialization of parameters if set in the GUI +function init(conf) + if conf.logfile then + my_data.logfile = conf.logfile + end + if conf.loglevel then + my_data.loglevel = conf.loglevel + end + if conf.ipaddr then + my_data.ipaddr = conf.ipaddr + end + if conf.url then + my_data.url = conf.url + end + if conf.port then + my_data.port = conf.port + end + if conf.max_size then + my_data.max_size = conf.max_size + end + if conf.max_age then + my_data.max_age = conf.max_age + end + broker_log:set_parameters(my_data.loglevel, my_data.logfile) + broker_log:info(2, "init values :" .. + " logfile = " .. my_data.logfile .. + " loglevel = " .. my_data.loglevel .. + " ipaddr = " .. my_data.ipaddr .. + " url = " .. my_data.url .. + " port = " .. my_data.port .. + " max_size = " .. my_data.max_size .. + " max_age = " .. my_data.max_age .. "\n") +end + +--called when max_size or max_age is reached +local function flush() + if #my_data.data == 0 then + broker_log:info(2, "No data to flush") + my_data.flush_time = os.time() + return true + end + local buf = table.concat(my_data.data, "\n") + local respbody = {} + local body, code, headers, status = http.request { + method = "POST", + url = "https://" .. my_data.ipaddr .. ":" .. my_data.port .. my_data.url, + source = ltn12.source.string(buf), + headers = + { + ["Content-Type"] = "Content-Type:text/xml", + ["content-length"] = string.len(buf) + }, + sink = ltn12.sink.table(respbody) + } + if code == 200 then + my_data.data = {} + broker_log:info(2, "API connexion ok : " .. tostring(code) .. "\t" .. tostring(status)) + my_data.flush_time = os.time() + return true + else + broker_log:error(0, "Could not reach API : " .. tostring(code)) + return false + end +end + +function write(d) + -- Service status + if d.category == 1 and d.element == 24 then + broker_log:info(3, "write: " .. broker.json_encode(d)) + if d.host_id and d.service_id then + local hostname = broker_cache:get_hostname(d.host_id) + local service_desc = broker_cache:get_service_description(d.host_id,d.service_id) + if not hostname or not service_desc then + broker_log:error(2, "Unknown host id : " .. d.host_id .. " Try to restart centengine") + return true + end + if d.state_type == 1 then --we keep only events in hard state + broker_log:info(3, "HARD STATE") + if d.last_hard_state_change then + if math.abs(d.last_check - d.last_hard_state_change) < 10 then --we keep only events with a state that changed from the previous check + if d.state == d.last_hard_state then + broker_log:info(3, "STATE CHANGE") + local reqbody = "\t" .. + "" .. service_desc .. "\t" .. + "" .. d.output .. "\t" .. + "" .. d.state .. "\t" .. + "" .. d.last_update .. "\t" .. + "" .. hostname .. "\t" .. + "" .. hostname .. "\t" .. + "" .. my_data.source_ci .. "\t" .. + "" .. d.service_id .. "\t" .. + "" + table.insert(my_data.data, reqbody) + end + end + end + end + end + end + if #my_data.data >= my_data.max_size or os.time() - my_data.flush_time >= my_data.max_age then + broker_log:info(2, "max size or flush time is reached, flushing data") + return flush() + end + return true +end diff --git a/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua new file mode 100644 index 00000000000..5f4cbba6d39 --- /dev/null +++ b/stream-connectors/centreon-certified/omi/omi_events-apiv2.lua @@ -0,0 +1,372 @@ +-- +-- Copyright 2022 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work you need to provide to this script a Broker stream connector output configuration +-- with the following informations: +-- +-- source_ci (string): Name of the transmiter, usually Centreon server name +-- ipaddr (string): the ip address of the operation connector server +-- url (string): url of the operation connector endpoint +-- logfile (string): the log file to use +-- loglevel (number): th log level (0, 1, 2, 3) where 3 is the maximum level +-- port (number): the operation connector server port +-- max_size (number): how many events to store before sending them to the server. +-- max_age (number): flush the events when the specified time (in second) is reach (even if max_size is not reach). + +-- Libraries +local curl = require("cURL") +local http = require("socket.http") +local ltn12 = require("ltn12") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-- workaround https://github.com/centreon/centreon-broker/issues/201 +local previous_event = "" + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- +function EventQueue.new(params) + local self = {} + self.fail = false + + local mandatory_parameters = { + "ipaddr", + "url", + "port" + } + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/omi_event.log" + local log_level = params.log_level or 2 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "service_status" + self.sc_params.params.source_ci = params.source_ci or "Centreon" + self.sc_params.params.ipaddr = params.ipaddr or "192.168.56.15" + self.sc_params.params.url = params.url or "/bsmc/rest/events/opscx-sdk/v1/" + self.sc_params.params.port = params.port or 30005 + self.sc_params.params.max_output_length = params.max_output_length or 1024 + self.sc_params.params.max_buffer_size = params.max_buffer_size or 5 + self.sc_params.params.max_buffer_age = params.max_buffer_age or 60 + self.sc_params.params.flush_time = params.flush_time or os.time() + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + self.sc_params:build_accepted_elements_info() + + -- only load the custom code file, not executed yet + if not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-- Format XML file with service infoamtion +function EventQueue:format_event_service() + local service_severity = self.sc_broker:get_severity(self.sc_event.event.host_id, self.sc_event.event.service_id) + + if service_severity == false then + service_severity = 0 + end + + self.sc_event.event.formated_event = { + title = self.sc_event.event.cache.service.description, + description = string.match(self.sc_event.event.output, "^(.*)\n"), + severity = self.sc_event.event.state, + time_created = self.sc_event.event.last_update, + node = self.sc_event.event.cache.host.name, + related_ci = self.sc_event.event.cache.host.name, + source_ci = self.sc_common:ifnil_or_empty(self.source_ci, 'Centreon'), + source_event_id = self.sc_common:ifnil_or_empty(self.sc_event.event.service_id, 0) + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} xml encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} xml encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = "\t" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "\t" + end + payload = payload .. "" + + else + payload = payload .. "\n\t" + for index, xml_str in pairs(event) do + payload = payload .. "<" .. tostring(index) .. ">" .. tostring(self.sc_common:xml_escape(xml_str)) .. "\t" + end + payload = payload .. "" + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "Content-Type: text/xml", + "content-length: " .. string.len(payload) + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following xml " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: BSM Http Server URL is: \"" .. tostring(url) .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 202 or http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- Fonction write() +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua new file mode 100644 index 00000000000..ee630ef29e2 --- /dev/null +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-apiv1.lua @@ -0,0 +1,1039 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Opsgenie connector +-- documentation available at https://docs.centreon.com/current/en/integrations/stream-connectors/opsgenie.html +-------------------------------------------------------------------------------- + +-- libraries +local curl = require "cURL" + +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + + if not hostgroups then + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- get_severity: retrieve severity from host or service +-- @param {number} host_id, +-- @param {number} [optional] service_id +-- @return {array} severity, +-------------------------------------------------------------------------------- +local function get_severity (host_id, service_id) + local service_id = service_id or nil + local severity = nil + + if host_id == nil then + broker_log:warning(1, "get_severity: host id is nil") + return false + end + + if service_id == nil then + severity = broker_cache:get_severity(host_id) + else + severity = broker_cache:get_severity(host_id, service_id) + end + + return severity +end + +-------------------------------------------------------------------------------- +-- get_ba_name: retrieve ba name from ba id +-- @param {number} ba_id, +-- @return {string} ba_name, the name of the ba +-- @return {string} ba_description, the description of the ba +-------------------------------------------------------------------------------- +local function get_ba_name (ba_id) + if ba_id == nil then + broker_log:warning(1, "get_ba_name: ba id is nil") + return false + end + + local ba_info = broker_cache:get_ba(ba_id) + if ba_info == nil then + broker_log:warning(1, "get_ba_name: couldn't get ba informations in cache") + return false + end + + return ba_info.ba_name, ba_info.ba_description +end + +-------------------------------------------------------------------------------- +-- get_bvs: retrieve bv name from ba id +-- @param {number} ba_id, +-- @return {array} bv_names, the bvs' name +-- @return {array} bv_names, the bvs' description +-------------------------------------------------------------------------------- +local function get_bvs (ba_id) + if ba_id == nil then + broker_log:warning(1, "get_bvs: ba id is nil") + return false + end + + local bv_id = broker_cache:get_bvs(ba_id) + + if bv_id == nil then + broker_log:warning(1, "get_bvs: couldn't get bvs for ba id: " .. tostring(ba_id)) + return false + end + + local bv_names = {} + local bv_descriptions = {} + local bv_infos = {} + + for i, v in ipairs(bv_id) do + bv_infos = broker_cache:get_bv(v) + if (bv_infos.bv_name ~= nil and bv_infos.bv_name ~= '') then + table.insert(bv_names,bv_infos.bv_name) + -- handle nil descriptions on BV + if bv_infos.bv_description ~= nil then + table.insert(bv_descriptions,bv_infos.bv_description) + else + broker_log:info(3, 'get_bvs: BV: ' .. bv_infos.bv_name .. ' has no description') + end + end + end + + return bv_names, bv_descriptions +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + + -- return empty string if text is nil + if text == nil then + broker_log:error(1, 'split: could not split text because it is nil') + return '' + end + + -- set default separator + seperator = ifnil_or_empty(separator, ',') + + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_event_status: check the status of an event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown, + ba_status = "0,1,2", -- = ok, warning, critical + hard_only = 1, + acknowledged = 0, + element_type = "host_status,service_status,ba_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "neb,bam", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 0, + max_buffer_size = 1, + max_buffer_age = 5, + max_stored_events = 10, -- do not use values above 100 + skip_anon_events = 1, + skip_nil_id = 1, + element_mapping = {}, + category_mapping = {}, + status_mapping = {}, + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + validatedEvents = {}, + app_api_token = '', + integration_api_token = '', + api_url = 'https://api.opsgenie.com', + date_format = '%Y-%m-%d %H:%M:%S', + host_alert_message = '{last_update_date} {hostname} is {state}', + host_alert_description = '', + host_alert_alias = '{hostname}_{state}', + service_alert_message = '{last_update_date} {hostname} // {serviceDescription} is {state}', + service_alert_description = '', + service_alert_alias = '{hostname}_{serviceDescription}_{state}', + ba_incident_message = '{baName} is {state}, health level reached {level_nominal}', + ba_incident_description = '', + ba_incident_tags = 'centreon,applications', + enable_incident_tags = 1, + get_bv = 1, + enable_severity = 0, + priority_must_be_set = 0, + priority_matching = 'P1=1,P2=2,P3=3,P4=4,P5=5', + opsgenie_priorities = 'P1,P2,P3,P4,P5', + priority_mapping = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + retval.status_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.status_mapping[1][14] = { + [0] = 'UP', + [1] = 'DOWN', + [2] = 'UNREACHABLE' + } + + retval.status_mapping[1][24] = { + [0] = 'OK', + [1] = 'WARNING', + [2] = 'CRITICAL', + [3] = 'UNKNOWN' + } + + retval.status_mapping[6][1] = { + [0] = 'OK', + [1] = 'WARNING', + [2] = 'CRITICAL' + } + + -- retval.status_mapping[14] = { + -- [0] = 'UP', + -- [1] = 'DOWN', + -- [2] = 'UNREACHABLE' + -- } + + -- retval.status_mapping[24] = { + -- [0] = 'OK', + -- [1] = 'WARNING', + -- [2] = 'CRITICAL', + -- [3] = 'UNKNOWN' + -- } + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == 'app_api_token' or i == 'integration_api_token' then + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => *********") + else + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + end + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + retval.host_alert_message = ifnil_or_empty(retval.host_alert_message, '{last_update_date} {hostname} is {state}') + retval.service_alert_message = ifnil_or_empty(retval.service_alert_message, '{last_update_date} {hostname} // {serviceDescription} is {state}') + retval.enable_severity = check_boolean_number_option_syntax(retval.enable_severity, 1) + retval.priority_must_be_set = check_boolean_number_option_syntax(retval.priority_must_be_set, 0) + retval.priority_matching = ifnil_or_empty(retval.priority_matching, 'P1=1,P2=2,P3=3,P4=4,P5=5') + retval.opsgenie_priorities = ifnil_or_empty(retval.opsgenie_priorities, 'P1,P2,P3,P4,P5') + retval.host_alert_alias = ifnil_or_empty(retval.host_alert_alias, '{hostname}_{state}') + retval.service_alert_alias = ifnil_or_empty(retval.service_alert_alias, '{hostname}_{serviceDescription}_{state}') + retval.ba_incident_message = ifnil_or_empty(retval.ba_incident_message, '{baName} is {state}, health level reached {level_nominal}') + retval.enable_incident_tags = check_boolean_number_option_syntax(retval.enable_incident_tags, 1) + retval.get_bv = check_boolean_number_option_syntax(retval.get_bv, 1) + + local severity_to_priority = {} + + if retval.enable_severity == 1 then + retval.priority_matching_list = split(retval.priority_matching, ',') + + for i, v in ipairs(retval.priority_matching_list) do + severity_to_priority = split(v, '=') + + if string.match(retval.opsgenie_priorities, severity_to_priority[1]) == nil then + broker_log:warning(1, "EventQueue.new: severity is enabled but the priority configuration is wrong. configured matching: " .. retval.priority_matching_list .. + ", invalid parsed priority: " .. severity_to_priority[1] .. ", known Opsgenie priorities: " .. opsgenie_priorities .. + ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") + break + end + + retval.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + end + end + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} data, the data we want to send to opsgenie +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (data, url_path, token) + method = method or "GET" + data = data or nil + + local endpoint = self.api_url .. url_path + broker_log:info(3, "EventQueue:call: Prepare url " .. endpoint) + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + + broker_log:info(3, "EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + broker_log:info(3, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: GenieKey " .. token + } + ) + + broker_log:info(3, "EventQueue:call: Add post data") + request:setopt_postfields(data) + + broker_log:info(3, "EventQueue:call: request body " .. tostring(data)) + broker_log:info(3, "EventQueue:call: request header " .. tostring(token)) + broker_log:info(3, "EventQueue:call: Call url " .. tostring(endpoint)) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + broker_log:info(3, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(3, "EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + broker_log:info(1, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end + +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.currentEvent.element == 14 or self.currentEvent.element == 24 then + -- prepare api info + self.currentEvent.endpoint = '/v2/alerts' + self.currentEvent.token = self.integration_api_token + + self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) + -- can't find hostname in cache + if self.currentEvent.hostname == self.currentEvent.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.currentEvent.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + if (string.find(self.currentEvent.hostname, '^_Module_BAM_*')) then + return false + end + + -- check hard state + if not compare_numbers(self.currentEvent.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.currentEvent.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.currentEvent.scheduled_downtime_depth, '>=') then + return false + end + + if not self:is_valid_hostgroup() then + return false + end + + if self.enable_severity == 1 then + if not self:set_priority() then + return false + end + end + + self.currentEvent.output = ifnil_or_empty(string.match(self.currentEvent.output, "^(.*)\n"), 'no output') + end + + if self.currentEvent.element == 14 then + + if not check_event_status(self.currentEvent.state, self.host_status) then + return false + end + + self.sendData.message = self:buildMessage(self.host_alert_message, nil) + self.sendData.description = self:buildMessage(self.host_alert_description, self.currentEvent.output) + self.sendData.alias = self:buildMessage(self.host_alert_alias, nil) + + elseif self.currentEvent.element == 24 then + + self.currentEvent.serviceDescription = get_service_description(self.currentEvent.host_id, self.currentEvent.service_id) + + -- can't find service description in cache + if self.currentEvent.serviceDescription == self.currentEvent.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_event_status(self.currentEvent.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.currentEvent.serviceDescription == 0 and self.skip_nil_id == 1 then + return false + end + + self.sendData.message = self:buildMessage(self.service_alert_message, nil) + self.sendData.description = self:buildMessage(self.service_alert_description, self.currentEvent.output) + self.sendData.alias = self:buildMessage(self.service_alert_alias, nil) + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @return {boolean} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + if self.currentEvent.element == 1 then + broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 1') + -- prepare api info + self.currentEvent.endpoint = '/v1/incidents/create' + self.currentEvent.token = self.app_api_token + + -- check if ba event status is valid + broker_log:info(3, 'EventQueue:is_valid_bam_event: starting BA treatment 2') + if not check_event_status(self.currentEvent.state, self.ba_status) then + return false + end + + self.currentEvent.baName, self.currentEvent.baDescription = get_ba_name(self.currentEvent.ba_id) + + if self.currentEvent.baName and self.currentEvent.baName ~= nil then + if self.enable_incident_tags == 1 then + self.currentEvent.bv_names, self.currentEvent.bv_descriptions = get_bvs(self.currentEvent.ba_id) + self.sendData.tags = self.currentEvent.bv_names + + if ba_incident_tags ~= '' then + local custom_tags = split(self.ba_incident_tags, ',') + for i, v in ipairs(custom_tags) do + broker_log:info(3, 'EventQueue:is_valid_bam_event: adding ' .. tostring(v) .. ' to the list of tags') + table.insert(self.sendData.tags, v) + end + end + end + + self.sendData.message = self:buildMessage(self.ba_incident_message, nil) + return true + end + end + return false +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + self.sendData = {} + if self.currentEvent.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.currentEvent.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.currentEvent.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + +-------------------------------------------------------------------------------- +-- : check if the event is associated to an accepted hostgroup +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.currentEvent.hostgroups = get_hostgroups(self.currentEvent.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then + return true + end + + -- drop event if we can't find any hostgroup on the host + if not self.currentEvent.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.currentEvent.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.currentEvent.hostgroups) + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.currentEvent.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.currentEvent.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval +end + + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-opsgenie.log" + log_level = parameters.log_level or 1 + + if not parameters.app_api_token or not parameters.integration_api_token then + broker_log:error(1,'Required parameters are: api_token. There type must be string') + end + + broker_log:set_parameters(log_level, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + if i == 'app_api_token' or i == 'integration_api_token' then + broker_log:info(1, "Init " .. i .. " : *********") + else + broker_log:info(1, "Init " .. i .. " : " .. v) + end + end + + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} eventData, the data related to the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add () + self.events[#self.events + 1] = self.sendData + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + retval = self:send_data() + + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = '' + local counter = 0 + + for _, raw_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(raw_event) + counter = counter + 1 + else + data = data .. ',' .. broker.json_encode(raw_event) + end + end + + broker_log:info(2, 'EventQueue:send_data: creating json: ' .. data) + + if self:call(data, self.currentEvent.endpoint, self.currentEvent.token) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:buildMessage, creates a message from a template +-- @param {string} template, the message template that needs to be converted +-- @param {string} default_template, a default message that will be parsed if template is empty +-- @return {string} template, the template vith converted values +-------------------------------------------------------------------------------- +function EventQueue:buildMessage (template, default_template) + if template == '' then + template = default_template + end + + for variable in string.gmatch(template, "{(.-)}") do + -- converts from timestamp to human readable date + if string.match(variable, '.-_date') then + template = template:gsub("{" .. variable .. "}", os.date(self.date_format, self.currentEvent[variable:sub(1, -6)])) + -- replaces numeric state value for human readable state (warning, critical...) + elseif variable == 'state' then + template = template:gsub("{" .. variable .. "}", self.status_mapping[self.currentEvent.category][self.currentEvent.element][self.currentEvent.state]) + else + if self.currentEvent[variable] ~= nil then + template = template:gsub("{" .. variable .. "}", self.currentEvent[variable]) + else + broker_log:warning(1, "EventQueue:buildMessage: {" .. variable .. "} is not a valid template variable") + end + end + end + + return template +end + +-------------------------------------------------------------------------------- +-- EventQueue:set_priority, set opsgenie priority using centreon severity +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:set_priority () + local severity = nil + + -- get host severity + if self.currentEvent.service_id == nil then + broker_log:info(3, "EventQueue:set_priority: getting severity for host: " .. self.currentEvent.host_id) + severity = get_severity(self.currentEvent.host_id) + -- get service severity + else + broker_log:info(3, "EventQueue:set_priority: getting severity for service: " .. self.currentEvent.service_id) + severity = get_severity(self.currentEvent.host_id, self.currentEvent.service_id) + end + + -- find the opsgenie priority depending on the found severity + local matching_priority = self.priority_mapping[tostring(severity)] + + -- drop event if no severity is found and opsgenie priority must be set + if matching_priority == nil and self.priority_must_be_set == 1 then + broker_log:info(3, "EventQueue:set_priority: couldn't find a matching priority for severity: " .. tostring(severity) .. " and priority is mandatory. Dropping event") + return false + -- ignore priority if it is not found, opsgenie will affect a default one (P3) + elseif matching_priority == nil then + broker_log:info(3, 'EventQueue:set_priority: could not find matching priority for severity: ' .. tostring(severity) .. '. Skipping priority...') + return true + else + self.sendData.priority = matching_priority + end + + return true +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + + -- drop event if wrong category + if not queue:is_valid_category(event.category) then + return true + end + + -- drop event if wrong element + if not queue:is_valid_element(event.category, event.element) then + return false + end + + queue.currentEvent = event + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- do not compute event if it is duplicated + if queue:is_event_duplicated() then + return true + end + -- END OF FIX + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") + queue:flush() + end + + -- adding event to the queue + if queue:is_valid_event() then + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- create id from event data + if queue.currentEvent.element == 14 and queue.currentEvent.category == 1 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.last_check) + elseif queue.currentEvent.element == 24 and queue.currentEvent.category == 1 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.service_id) .. '_' .. tostring(queue.currentEvent.last_check) + end + + -- remove oldest event from sent events list + if #queue.validatedEvents >= queue.max_stored_events then + table.remove(queue.validatedEvents, 1) + end + + -- add event in the sent events list and add list to queue + table.insert(queue.validatedEvents, eventId) + -- END OF FIX + + queue:add() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:is_event_duplicated, create an id from the neb event and check if id is in an already sent events list +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_event_duplicated() + local eventId = '' + if self.currentEvent.element == 14 then + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.last_check) + else + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.service_id) .. '_' .. tostring(self.currentEvent.last_check) + end + + for i, v in ipairs(self.validatedEvents) do + if eventId == v then + return true + end + end + + return false +end + diff --git a/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua new file mode 100644 index 00000000000..5f149e9a5b9 --- /dev/null +++ b/stream-connectors/centreon-certified/opsgenie/opsgenie-events-apiv2.lua @@ -0,0 +1,487 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Opsgenie Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "app_api_token" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/opsgenie-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + --params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.app_api_token = params.app_api_token + self.sc_params.params.integration_api_token = params.integration_api_token + self.sc_params.params.api_url = params.api_url or "https://api.opsgenie.com" + self.sc_params.params.alerts_api_endpoint = params.alerts_api_endpoint or "/v2/alerts" + self.sc_params.params.incident_api_endpoint = params.incident_api_endpoint or "/v1/incidents/create" + self.sc_params.params.ba_incident_tags = params.ba_incident_tags or "centreon,application" + self.sc_params.params.enable_incident_tags = params.enable_incident_tags or 1 + self.sc_params.params.get_bv = params.get_bv or 1 + self.sc_params.params.enable_severity = params.enable_severity or 0 + self.sc_params.params.default_priority = params.default_priority + self.sc_params.params.priority_mapping = params.priority_mapping or "P1=1,P2=2,P3=3,P4=4,P5=5" + self.sc_params.params.opsgenie_priorities = params.opsgenie_priorities or "P1,P2,P3,P4,P5" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.timestamp_conversion_format = params.timestamp_conversion_format or "%Y-%m-%d %H:%M:%S" + + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- need a queue for each type of event because ba status aren't sent on the same endpoint + self.sc_params.params.send_mixed_events = 0 + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.state_to_alert_type_mapping = { + [categories.neb.id] = { + [elements.host_status.id] = { + [0] = "info", + [1] = "error", + [2] = "warning" + }, + [elements.service_status.id] = { + [0] = "info", + [1] = "warning", + [2] = "error", + [3] = "warning" + } + } + } + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = { + [elements.ba_status.id] = function () return self:format_event_ba() end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- handle metadatas for queues + self.sc_flush:add_queue_metadata( + categories.neb.id, + elements.host_status.id, + { + api_endpoint = self.sc_params.params.alerts_api_endpoint, + token = self.sc_params.params.app_api_token + } + ) + self.sc_flush:add_queue_metadata( + categories.neb.id, + elements.service_status.id, + { + api_endpoint = self.sc_params.params.alerts_api_endpoint, + token = self.sc_params.params.app_api_token + } + ) + + -- handle opsgenie priority mapping + local severity_to_priority = {} + self.priority_mapping = {} + + if self.sc_params.params.enable_severity == 1 then + self.priority_matching_list = self.sc_common:split(self.sc_params.params.priority_matching, ',') + + for _, priority_group in ipairs(self.priority_matching_list) do + severity_to_priority = self.sc_common:split(priority_group, '=') + + -- + if string.match(self.sc_params.params.opsgenie_priorities, severity_to_priority[1]) == nil then + self.sc_logger:error("[EvenQueue.new]: severity is enabled but the priority configuration is wrong. configured matching: " + .. self.sc_params.params.priority_matching_list .. ", invalid parsed priority: " .. severity_to_priority[1] + .. ", known Opsgenie priorities: " .. self.sc_params.params.opsgenie_priorities + .. ". Considere adding your priority to the opsgenie_priorities list if the parsed priority is valid") + break + end + + self.priority_mapping[severity_to_priority[2]] = severity_to_priority[1] + end + end + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +function EventQueue:get_priority() + local severity = nil + local event = self.sc_event.event + local params = self.sc_params.params + + -- get appropriate severity depending on event type (service|host) + if event.service_id == nil then + self.sc_logger:debug("[EventQueue:get_priority]: getting severity for host: " .. event.host_id) + severity = event.cache.severity.host + else + self.sc_logger:debug("[EventQueue:get_priority]: getting severity for service: " .. event.service_id) + severity = event.cache.severity.service + end + + -- find the opsgenie priority depending on the found severity + local matching_priority = self.priority_mapping[tostring(severity)] + + if not matching_priority then + return params.default_priority + end + + return matching_priority +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +-- https://docs.opsgenie.com/docs/alert-api#create-alert +function EventQueue:format_event_host() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(os.date(self.sc_params.params.timestamp_conversion_format, event.last_update) + .. " " .. event.cache.host.name .. " is " .. state, 1, 130), + description = string.sub(event.output, 1, 15000), + alias = string.sub(event.cache.host.name .. "_" .. state, 1, 512) + } + + local priority = self:get_priority() + if priority then + self.sc_event.event.formated_event.priority = priority + end +end + +-- https://docs.opsgenie.com/docs/alert-api#create-alert +function EventQueue:format_event_service() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(os.date(self.sc_params.params.timestamp_conversion_format, event.last_update) + .. " " .. event.cache.host.name .. " // " .. event.cache.service.description .. " is " .. state, 1, 130), + description = string.sub(event.output, 1, 15000), + alias = string.sub(event.cache.host.name .. "_" .. event.cache.service.description .. "_" .. state, 1, 512) + } + + local priority = self:get_priority() + if priority then + self.sc_event.event.formated_event.priority = priority + end +end + +-- https://docs.opsgenie.com/docs/incident-api#create-incident +function EventQueue:format_event_ba() + local event = self.sc_event.event + local state = self.sc_params.params.status_mapping[event.category][event.element][event.state] + + self.sc_event.event.formated_event = { + message = string.sub(event.cache.ba.ba_name .. " is " .. state .. ", health level reached " .. event.level_nominal, 1, 130) + } + + if self.sc_params.params.enable_incident_tags == 1 then + local tags = {} + + for _, bv_info in ipairs(event.cache.bvs) do + -- can't have more than 20 tags + if #tags < 50 then + self.sc_logger:info("[EventQueue:format_event_ba]: add bv name: " .. tostring(bv_info.bv_name) .. " to list of tags") + table.insert(tags, string.sub(bv_info.bv_name, 1, 50)) + end + end + + local custom_tags = self.sc_common:split(self.sc_params.params.ba_incident_tags, ",") + for _, tag_name in ipairs(custom_tags) do + -- can't have more than 20 tags + if #tags < 20 then + self.sc_logger:info("[EventQueue:format_event_ba]: add custom tag: " .. tostring(tag_name) .. " to list of tags") + table.insert(tags, string.sub(tag_name, 1, 50)) + end + end + + self.sc_event.formated_event.tags = tags + end + +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. "," .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.api_url .. queue_metadata.api_endpoint + queue_metadata.headers = { + "content-type: application/json", + "accept: application/json", + "Authorization: GenieKey " .. queue_metadata.token + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Opsgenie address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + + -- according to opsgenie documentation "Create alert requests are processed asynchronously, therefore valid requests are responded with HTTP status 202 - Accepted" + -- https://docs.opsgenie.com/docs/alert-api#create-alert + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua new file mode 100644 index 00000000000..d2777539fe7 --- /dev/null +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-apiv1.lua @@ -0,0 +1,450 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker PagerDuty Connector +-- Tested with the public API on the developer platform: +-- https://events.pagerduty.com/v2/enqueue +-- +-- References: +-- https://developer.pagerduty.com/api-reference/reference/events-v2/openapiv3.json/paths/~1enqueue/post +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites: +-- +-- You need a PagerDuty instance +-- You need your instance's routing_key. According to the page linked above: "The GUID of one of your Events API V2 integrations. This is the "Integration Key" listed on the Events API V2 integration's detail page." +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] pdy_routing_key: see above, this will be your authentication token +-- [RECOMMENDED] pdy_centreon_url: in order to get links/url that work in your events +-- [RECOMMENDED] log_level: level of verbose. Default is 2 but in production 1 is the recommended value. +-- [OPTIONAL] http_server_url: default "https://events.pagerduty.com/v2/enqueue" +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp + +-- Global variables +local previous_event = "" +-- Nagios states to Pagerduty severity conversion table +local from_state_to_severity = { "info", "warning", "critical", "error" } + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + return service +end + + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "https://events.pagerduty.com/v2/enqueue", + http_proxy_string = "", + http_timeout = 5, + pdy_source = "", + pdy_routing_key = "Please fill pdy_routing_key in StreamConnector parameter", + pdy_centreon_url = "http://set.pdy_centreon_url.parameter", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:add method +-- @param e An event +-------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hotsname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local pdy_dedup_key + if e.service_id then --to remain consistent in the alert handling even in the event of the loss of the broker cache, we should use the ids to link the events + pdy_dedup_key = e.host_id .. "_" .. e.service_id + else + pdy_dedup_key = e.host_id .. "_H" + end + + -- converting epoch timestamp to UTC time in RFC3339 + local pdy_timestamp = new_from_timestamp(e.last_update):rfc_3339() + broker_log:info(3, "EventQueue:add: Timestamp converted from " .. e.last_update .. " to \"" .. pdy_timestamp .. "\"") + + -- converting e.state into PagerDuty severity + -- from_state_to_severity maps between 'critical', 'warning', 'error' or 'info' and e.state. WARNING: if info then "event_action" is not "trigger" but "resolve" + local pdy_severity = ifnil_or_empty(from_state_to_severity[e.state + 1], 'error') + broker_log:info(3, "EventQueue:add: Severity converted from " .. e.state .. " to \"" .. pdy_severity .. "\"") + + -- handling empty output (empty "summary" cannot be sent to PagerDuty) + local pdy_summary = hostname .. "/" .. service_description .. ": " .. ifnil_or_empty(string.match(e.output, "^(.*)\n"), 'no output') + + -- basic management of "class" attribute + local pdy_class + if e.service_id then + pdy_class = "service" + else + pdy_class = "host" + end + + -- managing "event_action" (trigger/resolve) + local pdy_event_action + if pdy_severity == "info" then + pdy_event_action = "resolve" + else + pdy_event_action = "trigger" + end + broker_log:info(3, "EventQueue:add: Since severity is \"" .. pdy_severity .. "\", event_action is \"" .. pdy_event_action .. "\"") + + -- Managing perfdata + local pdy_custom_details = {} +-- if e.perfdata then +-- broker_log:info(3, "EventQueue:add: Perfdata list: " .. broker.json_encode(e.perfdata) .. " ") +-- -- Case when the perfdata name is delimited with simple quotes: spaces allowed +-- for metric_name, metric_value in e.perfdata:gmatch("%s?'(.+)'=(%d+[%a]?);?[%W;]*%s?") do +-- broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) +-- pdy_custom_details[metric_name] = metric_value +-- end +-- -- Case when the perfdata name is NOT delimited with simple quotes: no spaces allowed +-- for metric_name, metric_value in e.perfdata:gmatch("%s?([^'][%S]+[^'])=(%d+[%a]?);?[%W;]*%s?") do +-- broker_log:info(3, "EventQueue:add: Perfdata " .. metric_name .. " = " .. metric_value) +-- pdy_custom_details[metric_name] = metric_value +-- end +-- end + + -- Hostgroups + local host_hg_array = broker_cache:get_hostgroups(e.host_id) + local pdy_hostgroups = "" + -- case when no filter has been set for hostgroups + for i = 1, #host_hg_array do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. ifnil_or_empty(host_hg_array[i].group_name, "empty host group") + else + pdy_hostgroups = ifnil_or_empty(host_hg_array[i].group_name, "empty host group") + end + end + + -- Servicegroups + if e.service_id then + local service_hg_array = broker_cache:get_servicegroups(e.host_id, e.service_id) + local pdy_servicegroups = "" + -- case when no filter has been set for servicegroups + for i = 1, #service_hg_array do + if pdy_servicegroups ~= "" then + pdy_servicegroups = pdy_servicegroups .. ", " .. ifnil_or_empty(service_hg_array[i].group_name, "empty service group") + else + pdy_servicegroups = ifnil_or_empty(service_hg_array[i].group_name, "empty service group") + end + end + end + + local pdy_custom_details = {} + + local host_severity = broker_cache:get_severity(e.host_id) + if host_severity ~= nil then + pdy_custom_details["Hostseverity"] = host_severity + end + + if e.service_id then + local service_severity = broker_cache:get_severity(e.host_id, e.service_id) + if service_severity ~= nil then + pdy_custom_details["Serviceseverity"] = service_severity + end + end + + if pdy_hostgroups ~= "" then + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + if pdy_servicegroups ~= "" then + pdy_custom_details["Servicegroups"] = pdy_servicegroups + end + + local pdy_source_field = hostname + if self.pdy_source and self.pdy_source ~= "" then + pdy_source_field = self.pdy_source + end + -- Appending the current event to the queue + self.events[#self.events + 1] = { + payload = { + summary = pdy_summary, + timestamp = pdy_timestamp, + severity = pdy_severity, + source = pdy_source_field, + component = service_description, + group = pdy_hostgroups, + class = pdy_class, + custom_details = pdy_custom_details + }, + routing_key = self.pdy_routing_key, + dedup_key = pdy_dedup_key, + event_action = pdy_event_action, + client = "Centreon Stream Connector", + client_url = self.pdy_centreon_url, + links = { + { + href = self.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. hostname, + text = "Link to host summary." + } + } + --images = { + --{ + --src = "https://chart.googleapis.com/chart?chs=600x400&chd=t:6,2,9,5,2,5,7,4,8,2,1&cht=lc&chds=a&chxt=y&chm=D,0033FF,0,0,5,1", + --href = "https://google.com", + --alt = "An example link with an image" + --} + --} + } + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush method +-- Called when the max number of events or the max age are reached +-------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "accept: application/vnd.pagerduty+json;version=2", + "content-type: application/json", + --"authorization: Token token=" .. self.pdy_routing_key, + "x-routing-key: " .. self.pdy_routing_key + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + broker_log:info(3, "EventQueue:flush: POST data: '" .. http_post_data .. "'") + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 202 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code .. " message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 2 + local log_path = "/var/log/centreon-broker/stream-connector-pagerduty.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting PagerDuty StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(1, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") + return queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_check and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end diff --git a/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua new file mode 100644 index 00000000000..883a72c28b5 --- /dev/null +++ b/stream-connectors/centreon-certified/pagerduty/pagerduty-events-apiv2.lua @@ -0,0 +1,483 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Pagerduty Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "pdy_routing_key" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/pagerduty-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- force buffer size to 1 to avoid breaking the communication with pagerduty (can't send more than one event at once) + params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.pdy_centreon_url = params.pdy_centreon_url or "http://set.pdy_centreon_url.parameter" + self.sc_params.params.http_server_url = params.http_server_url or "https://events.pagerduty.com/v2/enqueue" + self.sc_params.params.client = params.client or "Centreon Stream Connector" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.pdy_source = params.pdy_source or nil + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + self.state_to_severity_mapping = { + [0] = { + severity = "info", + action = "resolve" + }, + [1] = { + severity = "warning", + action = "trigger" + }, + [2] = { + severity = "critical", + action = "trigger" + }, + [3] = { + severity = "error", + action = "trigger" + } + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + local pdy_custom_details = {} + + -- handle hostgroup + local hostgroups = self.sc_broker:get_hostgroups(event.host_id) + local pdy_hostgroups = "" + + -- retrieve hostgroups and store them in pdy_custom_details["Hostgroups"] + if not hostgroups then + pdy_hostgroups = "empty host group" + else + for index, hg_data in ipairs(hostgroups) do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. hg_data.group_name + else + pdy_hostgroups = hg_data.group_name + end + end + + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + + -- handle severity + local host_severity = self.sc_broker:get_severity(event.host_id) + + if host_severity then + pdy_custom_details['Hostseverity'] = host_severity + end + + pdy_custom_details["Output"] = self.sc_common:ifnil_or_empty(event.output, "no output") + + self.sc_event.event.formated_event = { + payload = { + summary = tostring(event.cache.host.name) .. ": " .. self.sc_params.params.status_mapping[event.category][event.element][event.state], + timestamp = new_from_timestamp(event.last_update):rfc_3339(), + severity = self.state_to_severity_mapping[event.state].severity, + source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), + component = tostring(event.cache.host.name), + group = pdy_hostgroups, + class = "host", + custom_details = pdy_custom_details, + }, + routing_key = self.sc_params.params.pdy_routing_key, + event_action = self.state_to_severity_mapping[event.state].action, + dedup_key = event.host_id .. "_H", + client = self.sc_params.params.client, + client_url = self.sc_params.params.client_url, + links = { + { + -- should think about using the new resources page but keep it as is for compatibility reasons + href = self.sc_params.params.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. tostring(event.cache.host.name), + text = "Link to Centreon host summary" + } + } + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + local pdy_custom_details = {} + + -- handle hostgroup + local hostgroups = self.sc_broker:get_hostgroups(event.host_id) + local pdy_hostgroups = "" + + -- retrieve hostgroups and store them in pdy_custom_details["Hostgroups"] + if not hostgroups then + pdy_hostgroups = "empty host group" + else + for index, hg_data in ipairs(hostgroups) do + if pdy_hostgroups ~= "" then + pdy_hostgroups = pdy_hostgroups .. ", " .. hg_data.group_name + else + pdy_hostgroups = hg_data.group_name + end + end + + pdy_custom_details["Hostgroups"] = pdy_hostgroups + end + + -- handle servicegroups + local servicegroups = self.sc_broker:get_servicegroups(event.host_id, event.service_id) + local pdy_servicegroups = "" + + -- retrieve servicegroups and store them in pdy_custom_details["Servicegroups"] + if not servicegroups then + pdy_servicegroups = "empty service group" + else + for index, sg_data in ipairs(servicegroups) do + if pdy_servicegroups ~= "" then + pdy_servicegroups = pdy_servicegroups .. ", " .. sg_data.group_name + else + pdy_servicegroups = sg_data.group_name + end + end + + pdy_custom_details["Servicegroups"] = pdy_servicegroups + end + + -- handle host severity + local host_severity = self.sc_broker:get_severity(event.host_id) + + if host_severity then + pdy_custom_details["Hostseverity"] = host_severity + end + + -- handle service severity + local service_severity = self.sc_broker:get_severity(event.host_id, event.service_id) + + if service_severity then + pdy_custom_details["Serviceseverity"] = service_severity + end + + pdy_custom_details["Output"] = self.sc_common:ifnil_or_empty(event.output, "no output") + + self.sc_event.event.formated_event = { + payload = { + summary = tostring(event.cache.host.name) .. "/" .. tostring(event.cache.service.description) .. ": " .. self.sc_params.params.status_mapping[event.category][event.element][event.state], + timestamp = new_from_timestamp(event.last_update):rfc_3339(), + severity = self.state_to_severity_mapping[event.state].severity, + source = self.sc_params.params.pdy_source or tostring(event.cache.host.name), + component = tostring(event.cache.service.description), + group = pdy_hostgroups, + class = "service", + custom_details = pdy_custom_details, + }, + routing_key = self.sc_params.params.pdy_routing_key, + event_action = self.state_to_severity_mapping[event.state].action, + dedup_key = event.host_id .. "_" .. event.service_id, + client = self.sc_params.params.client, + client_url = self.sc_params.params.client_url, + links = { + { + -- should think about using the new resources page but keep it as is for compatibility reasons + href = self.sc_params.params.pdy_centreon_url .. "/centreon/main.php?p=20202&o=hd&host_name=" .. tostring(event.cache.host.name), + text = "Link to Centreon host summary" + } + } + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Pagerduty address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + -- pagerduty use 202 https://developer.pagerduty.com/api-reference/reference/events-v2/openapiv3.json/paths/~1enqueue/post + if http_response_code == 202 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua new file mode 100644 index 00000000000..c0a66e4f965 --- /dev/null +++ b/stream-connectors/centreon-certified/prometheus/prometheus-gateway-apiv1.lua @@ -0,0 +1,949 @@ +#!/usr/bin/lua + +-- libraries +local curl = require "cURL" +local base64 = require("base64") + +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty (var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + + +-------------------------------------------------------------------------------- +-- ifnumber_not_nan: check if a number is a number (and not a NaN) +-- @param {number} number, the number to check +-- @return {boolean} +-------------------------------------------------------------------------------- +local function ifnumber_not_nan (number) + if (number ~= number) then + return false + elseif (type(number) ~= 'number') then + return false + else + return true + end +end + +-------------------------------------------------------------------------------- +-- convert_to_openmetric: replace unwanted characters in order to comply with the open metrics format +-- @param {string} string, the string to convert +-- @return {string} string, a string that matches [a-zA-Z0-9_\.]+ +-------------------------------------------------------------------------------- +local function convert_to_openmetric (string) + if string == nil or string == '' or type(string) ~= 'string' then + return false + end + + return string.gsub(string, '[^a-zA-Z0-9_:]', '_') +end + +-------------------------------------------------------------------------------- +-- unit_mapping: convert perfdata units to openmetrics standard +-- @param {string} unit, the unit value +-- @return {string} unit, the openmetrics unit name +-- @reuturn {boolean}, true if the unit is found in the mapping or empty +-------------------------------------------------------------------------------- +local function unit_mapping (unit) + local unitMapping = { + s = 'seconds', + m = 'meters', + B = 'bytes', + g = 'grams', + V = 'volts', + A = 'amperes', + K = 'kelvins', + ratio = 'ratios', + degres = 'celsius' + } + + local unhandledUnit = nil + + if unit == nil or unit == '' or type(unit) ~= 'string' then + unit = '' + end + + if unit == '%' then + unit = unitMapping['ratio'] + elseif unit == '°' then + unit = unitMapping['degres'] + else + if (unitMapping[unit] ~= nil) then + unit = unitMapping[unit] + end + end + + return unit, true +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + if not hostgroups then + broker_log:warning(1, "get_hostgroups: no hostgroup for host id " .. host_id .. " found.") + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + -- https://stackoverflow.com/questions/1426954/split-string-in-lua + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_neb_event_status: check the status of a neb event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_neb_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown + hard_only = 0, + acknowledged = 1, + element_type = "service_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "storage", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 1, + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1, + skip_nil_id = 1, + enable_threshold_metrics = 0, + enable_status_metrics = 0, + disable_bam_host = 1, + add_hostgroups = 1, + enable_extended_metric_name = 0, + prometheus_gateway_address = 'http://localhost', + prometheus_gateway_port = '9091', + prometheus_gateway_job = 'monitoring', + http_timeout = 60, + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + current_event = nil, + element_mapping = {}, + category_mapping = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + retval.enable_threshold_metrics = check_boolean_number_option_syntax(retval.enable_threshold_metrics, 1) + retval.enable_status_metrics = check_boolean_number_option_syntax(retval.enable_status_metrics, 1) + retval.disable_bam_host = check_boolean_number_option_syntax(retval.disable_bam_host, 1) + retval.enable_extended_metric_name = check_boolean_number_option_syntax(retval.enable_extended_metric_name, 0) + retval.add_hostgroups = check_boolean_number_option_syntax(retval.add_hostgroups, 1) + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end + + +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @param {table} event, the event data +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.current_event.element == 14 or self.current_event.element == 24 then + self.current_event.hostname = get_hostname(self.current_event.host_id) + + -- can't find hostname in cache + if self.current_event.hostname == self.current_event.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.current_event.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + -- host is a BA + if (string.find(tostring(self.current_event.hostname), '_Module_BAM_') and self.disable_bam_host == 1) then + return false + end + + self.current_event.hostname = tostring(self.current_event.hostname) + + -- output isn't required, we only need perfdatas + -- self.current_event.output = ifnil_or_empty(string.match(self.current_event.output, "^(.*)\n"), 'no output') + end + + if self.current_event.element == 14 then + if not check_neb_event_status(self.current_event.state, self.host_status) then + return false + end + elseif self.current_event.element == 24 then + self.current_event.service_description = get_service_description(self.current_event.host_id, self.current_event.service_id) + + -- can't find service description in cache + if self.current_event.service_description == self.current_event.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_neb_event_status(self.current_event.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.current_event.service_description == 0 and self.skip_nil_id == 1 then + return false + end + end + + -- check hard state + if not compare_numbers(self.current_event.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.current_event.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.current_event.scheduled_downtime_depth, '>=') then + return false + end + + self.current_event.service_description = tostring(self.current_event.service_description) + + if not self:is_valid_hostgroup() then + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @param {table} event, the event data +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @param {table} event, the event data +-- @return {table} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @param {table} event, the event data +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + + if self.current_event.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.current_event.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.current_event.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + + -------------------------------------------------------------------------------- + -- is_valid_hostgroup: check if the event is associated to an accepted hostgroup + -- @return {boolean} + -------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.current_event.hostgroups = get_hostgroups(self.current_event.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then + return true + end + + -- drop event if we can't find any hostgroup on the host + if not self.current_event.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.current_event.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.current_event.hostgroups) + + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.current_event.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.current_event.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval +end + + +-------------------------------------------------------------------------------- +-- display_hostgroups: create the hostgroup label for the metric +-- @return {string} hostgroupLabel: the full label for the metric +-------------------------------------------------------------------------------- +function EventQueue:display_hostgroups () + if not self.current_event.hostgroups then + return false + end + + local hostgroupLabel = 'hostgroup="' + local counter = 0 + + for i, v in pairs(self.current_event.hostgroups) do + if counter == 0 then + hostgroupLabel = hostgroupLabel .. v.group_name + counter = 1 + else + hostgroupLabel = hostgroupLabel .. ',' .. v.group_name + end + end + + hostgroupLabel = hostgroupLabel .. '"' + + return hostgroupLabel +end + + +-------------------------------------------------------------------------------- +-- format_data: prepare the event data so it can be sent +-- @return {table|string|number} data, the formated data +-------------------------------------------------------------------------------- +function EventQueue:format_data () + local perf, error = broker.parse_perfdata(self.current_event.perfdata, true) + local type = nil + local data = '' + local name = nil + local unit = nil + + -- handle hostgroups + if self.add_hostgroups == 1 then + self.current_event.hostgroupsLabel = self:display_hostgroups() + else + self.current_event.hostgroupsLabel = false + end + + for label, metric in pairs(perf) do + type = self:get_metric_type(metric) + unit= unit_mapping(metric.uom) + name = self:create_metric_name(label, unit) + + + data = data .. '# TYPE ' .. name .. ' ' .. type .. '\n' + data = data .. self:add_unit_info(label, unit, name) + + if not self.current_event.hostgroupsLabel then + data = data .. name .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. metric.value .. '\n' + else + data = data .. name .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '", ' .. self.current_event.hostgroupsLabel .. '} ' .. metric.value .. '\n' + end + + if (self.enable_threshold_metrics == 1) then + data = data .. self:threshold_metrics(metric, label, unit, type) + end + end + + if (self.enable_status_metrics == 1) then + name = convert_to_openmetric(self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label .. ':monitoring_status') + data = data .. '# TYPE ' .. name .. ' counter\n' + data = data .. '# HELP ' .. name .. ' 0 is OK, 1 is WARNING, 2 is CRITICAL, 3 is UNKNOWN\n' + if not self.current_event.hostgroupsLabel then + data = data .. name .. '{label="monitoring_status", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. self.current_event.state .. '\n' + else + data = data .. name .. '{label="monitoring_status", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '", ' .. self.current_event.hostgroupsLabel .. '} ' .. self.current_event.state .. '\n' + end + end + + return data +end + +-------------------------------------------------------------------------------- +-- create_metric_name: concatenates data to create the metric name +-- @param {string} label, the name of the perfdata +-- @param {string} unit, the unit name +-- @return {string} name, the prometheus metric name (open metric format) +-------------------------------------------------------------------------------- +function EventQueue:create_metric_name (label, unit) + local name = '' + + if (unit ~= '') then + if (self.enable_extended_metric_name == 0) then + name = label .. '_' .. unit + else + name = self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label .. '_' .. unit + end + else + if (self.enable_extended_metric_name == 0) then + name = label + else + name = self.current_event.hostname .. '_' .. self.current_event.service_description .. ':' .. label + end + end + + return convert_to_openmetric(name) +end +-------------------------------------------------------------------------------- +-- get_metric_type: find out the metric type to match openmetrics standard +-- @param {table} perfdata, the perfdata informations +-- @return {string} metricType, the type of the metric +-------------------------------------------------------------------------------- +function EventQueue:get_metric_type (perfdata) + local metricType = nil; + if (ifnumber_not_nan(perfdata.max)) then + metricType = 'gauge' + else + metricType = 'counter' + end + + return metricType +end + +-------------------------------------------------------------------------------- +-- add_unit_info: add unit metadata to match openmetrics standard +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} name, the name of the metric +-- @return {string} data, the unit metadata information +-------------------------------------------------------------------------------- +function EventQueue:add_unit_info (label, unit, name) + local data = '' + + if (unit ~= '' and unit ~= nil) then + data = '# UNIT ' .. name .. '\n' + end + + return data +end + +-------------------------------------------------------------------------------- +-- add_type_info: add unit metadata to match openmetrics standard +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} suffix, a suffix that is part of the metric name +-- @return {string} name, the full metric name (open metric format) +-------------------------------------------------------------------------------- +function EventQueue:add_type_info (label, unit, suffix) + return self:create_metric_name(label, unit) .. '_' .. suffix +end + +-------------------------------------------------------------------------------- +-- threshold_metrics: create openmetrics metrics based on alert thresholds from centreon +-- @param {table} perfdata, perfdata informations +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} type, the type of unit (counter, gauge...) +-- @return {string} data, metrics based on alert thresholds +-------------------------------------------------------------------------------- +function EventQueue:threshold_metrics (perfdata, label, unit, type) + local data = '' + local metricName = nil + + if (ifnumber_not_nan(perfdata.warning_low)) then + metricName = self:add_type_info(label, unit, 'warning_low') + message = 'values below this will trigger a warning alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.warning_low) + end + + if (ifnumber_not_nan(perfdata.warning_high)) then + metricName = self:add_type_info(label, unit, 'warning_high') + message = 'alues above this will trigger a warning alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.warning_high) + end + + if (ifnumber_not_nan(perfdata.critical_low)) then + metricName = self:add_type_info(label, unit, 'critical_low') + message = 'values below this will trigger a critical alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.critical_low) + end + + if (ifnumber_not_nan(perfdata.critical_high)) then + metricName = self:add_type_info(label, unit, 'critical_high') + message = 'values above this will trigger a critical alert\n' + data = data .. self:threshold_metrics_format(metricName, label, unit, type, message, perfdata.critical_high) + end + + return data +end + +-------------------------------------------------------------------------------- +-- threshold_metrics_format: create data format for threshold metrics +-- @param {string} metricName, the formated metric name +-- @param {string} label, the name of the metric +-- @param {string} unit, the unit name +-- @param {string} type, the type of unit (counter, gauge...) +-- @return {string} data, metrics based on alert thresholds +-------------------------------------------------------------------------------- +function EventQueue:threshold_metrics_format (metricName, label, unit, type, message, perfdata) + local data = '' + + data = data .. '# TYPE ' .. metricName .. ' ' .. type .. '\n' + data = data .. '# UNIT ' .. metricName .. ' ' .. unit .. '\n' + data = data .. '# HELP ' .. metricName .. ' ' .. message + + if not self.current_event.hostgroupsLabel then + data = data .. metricName .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '"} ' .. perfdata .. '\n' + else + data = data .. metricName .. '{label="' .. label .. '", host="' .. self.current_event.hostname .. '", service="' .. self.current_event.service_description .. '",' .. self.current_event.hostgroupsLabel .. '"} ' .. perfdata .. '\n' + end + + return data +end + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-prometheus-gateway.log" + broker_log:set_parameters(1, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + broker_log:info(1, "Init " .. i .. " : " .. v) + end + + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} event, the event that will be added to the queue +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add (data) + self.events[#self.events + 1] = data + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + if not self:send_data() then + broker_log:error(1, "EventQueue:flush: couldn't send data, flushing data anyway") + end + + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local httpPostData = '' + + for _, raw_event in ipairs(self.events) do + httpPostData = httpPostData .. raw_event + end + + local httpResponseBody = "" + local httpRequest = curl.easy() + :setopt_url(self.prometheus_gateway_address .. ':' .. self.prometheus_gateway_port .. '/metrics/job/' .. self.prometheus_gateway_job .. '/instance/' .. self.current_event.hostname .. '/service@base64/' .. base64.encode(self.current_event.service_description)) + :setopt_writefunction( + function (response) + httpResponseBody = httpResponseBody .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/openmetrics-text" + } + ) + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + httpRequest:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:send_data: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + httpRequest:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:send_data: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + broker_log:info(3, "EventQueue:send_data: POST data: '" .. httpPostData .. "'") + httpRequest:setopt_postfields(httpPostData) + + -- performing the HTTP request + httpRequest:perform() + + -- collecting results + httpResponseCode = httpRequest:getinfo(curl.INFO_RESPONSE_CODE) + + httpRequest:close() + + -- Handling the return code + local retval = false + if httpResponseCode == 200 then + broker_log:info(2, "EventQueue:send_data: HTTP POST request successful: return code is " .. httpResponseCode) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(1, "the body request " .. httpPostData) + broker_log:error(1, "EventQueue:send_data: HTTP POST request FAILED, return code is " .. httpResponseCode .. " message is:\n\"" .. httpResponseBody .. "\n\"\n") + end + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + queue.current_event = event + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") + queue:flush() + end + + if not queue:is_valid_category(event.category) then + broker_log:info(3, 'write: event category is ' .. event.category .. ' and is not valid') + return true + end + + if not queue:is_valid_element(event.category, event.element) then + broker_log:info(3, 'write: event element is ' .. event.element .. ' and is not valid') + return true + end + + -- adding event to the queue + if queue:is_valid_event() then + queue:add(queue:format_data()) + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua new file mode 100644 index 00000000000..ceec49ab048 --- /dev/null +++ b/stream-connectors/centreon-certified/servicenow/servicenow-apiv1.lua @@ -0,0 +1,898 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html +-------------------------------------------------------------------------------- + + +-- libraries +local curl = require "cURL" + +-- Global variables + +-- Useful functions + +-------------------------------------------------------------------------------- +-- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var, the variable that needs to be checked +-- @param alt, the value of the variable if it is nil or empty +-- @return alt|var, the alternate value or the variable value +-------------------------------------------------------------------------------- +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +-------------------------------------------------------------------------------- +-- boolean_to_number: convert boolean variable to number +-- @param {boolean} boolean, the boolean that will be converted +-- @return {number}, a number according to the boolean value +-------------------------------------------------------------------------------- +local function boolean_to_number (boolean) + return boolean and 1 or 0 +end + +-------------------------------------------------------------------------------- +-- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param {number} number, the boolean number that must be validated +-- @param {number} default, the default value that is going to be return if the default number is not validated +-- @return {number} number, a boolean number +-------------------------------------------------------------------------------- +local function check_boolean_number_option_syntax (number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +-------------------------------------------------------------------------------- +-- get_hostname: retrieve hostname from host_id +-- @param {number} host_id, +-- @return {string} hostname, +-------------------------------------------------------------------------------- +local function get_hostname (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostname: host id is nil") + hostname = 0 + return hostname + end + + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + broker_log:warning(1, "get_hostname: hostname for id " .. host_id .. " not found. Restarting centengine should fix this.") + hostname = host_id + end + + return hostname +end + +-------------------------------------------------------------------------------- +-- get_service_description: retrieve the service name from its host_id and service_id +-- @param {number} host_id, +-- @param {number} service_id, +-- @return {string} service, the name of the service +-------------------------------------------------------------------------------- +local function get_service_description (host_id, service_id) + if host_id == nil or service_id == nil then + service = 0 + broker_log:warning(1, "get_service_description: host id or service id has a nil value") + + return service + end + + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + broker_log:warning(1, "get_service_description: service_description for id " .. host_id .. "." .. service_id .. " not found. Restarting centengine should fix this.") + service = service_id + end + + return service +end + +-------------------------------------------------------------------------------- +-- get_hostgroups: retrieve hostgroups from host_id +-- @param {number} host_id, +-- @return {array} hostgroups, +-------------------------------------------------------------------------------- +local function get_hostgroups (host_id) + if host_id == nil then + broker_log:warning(1, "get_hostgroup: host id is nil") + return false + end + + local hostgroups = broker_cache:get_hostgroups(host_id) + + if not hostgroups then + return false + end + + return hostgroups +end + +-------------------------------------------------------------------------------- +-- split: convert a string into a table +-- @param {string} string, the string that is going to be splitted into a table +-- @param {string} separatpr, the separator character that will be used to split the string +-- @return {table} table, +-------------------------------------------------------------------------------- +local function split (text, separator) + local hash = {} + -- https://stackoverflow.com/questions/1426954/split-string-in-lua + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +-------------------------------------------------------------------------------- +-- find_in_mapping: check if item type is in the mapping and is accepted +-- @param {table} mapping, the mapping table +-- @param {string} reference, the accepted values for the item +-- @param {string} item, the item we want to find in the mapping table and in the reference +-- @return {boolean} +-------------------------------------------------------------------------------- +local function find_in_mapping (mapping, reference, item) + for mappingIndex, mappingValue in pairs(mapping) do + for referenceIndex, referenceValue in pairs(split(reference, ',')) do + if item == mappingValue and mappingIndex == referenceValue then + return true + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- find_hostgroup_in_list: check if hostgroups from hosts are in an accepted list from the stream connector configuration +-- @param {table} acceptedHostgroups, the table with the name of accepted hostgroups +-- @param {table} hostHostgroups, the hostgroups associated to an host +-- @return {boolean} +-- @return {string} [optional] acceptedHostgroupsName, the hostgroup name that matched +-------------------------------------------------------------------------------- +local function find_hostgroup_in_list (acceptedHostgroups, hostHostgroups) + for _, acceptedHostgroupsName in ipairs(acceptedHostgroups) do + for _, hostHostgroupsInfo in pairs(hostHostgroups) do + if acceptedHostgroupsName == hostHostgroupsInfo.group_name then + return true, acceptedHostgroupsName + end + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- check_neb_event_status: check the status of a neb event (ok, critical...) +-- @param {number} eventStatus, the status of the event +-- @param {string} acceptedStatus, the event statuses that are going to be accepted +-- @return {boolean} +-------------------------------------------------------------------------------- +local function check_neb_event_status (eventStatus, acceptedStatuses) + for i, v in ipairs(split(acceptedStatuses, ',')) do + if tostring(eventStatus) == v then + return true + end + end + + return false +end + +-------------------------------------------------------------------------------- +-- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param {number} firstNumber +-- @param {number} secondNumber +-- @param {string} operator, the mathematical operator that is used for the comparison +-- @return {boolean} +-------------------------------------------------------------------------------- +local function compare_numbers (firstNumber, secondNumber, operator) + if type(firstNumber) ~= 'number' or type(secondNumber) ~= 'number' then + return false + end + + if firstNumber .. operator .. secondNumber then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue:new (conf) + local retval = { + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown + hard_only = 1, + acknowledged = 0, + element_type = "host_status,service_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + category_type = "neb", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_hostgroups = '', + in_downtime = 0, + max_buffer_size = 10, + max_buffer_age = 5, + max_stored_events = 10, -- do not use values above 100 + skip_anon_events = 1, + skip_nil_id = 1, + element_mapping = {}, + category_mapping = {}, + instance = '', + username = '', + password = '', + client_id = '', + client_secret = '', + proxy_address = '', + proxy_port = '', + proxy_username = '', + proxy_password = '', + validatedEvents = {}, + tokens = {} + } + + retval.category_mapping = { + neb = 1, + bbdo = 2, + storage = 3, + correlation = 4, + dumper = 5, + bam = 6, + extcmd = 7 + } + + retval.element_mapping = { + [1] = {}, + [3] = {}, + [6] = {} + } + + retval.element_mapping[1].acknowledgement = 1 + retval.element_mapping[1].comment = 2 + retval.element_mapping[1].custom_variable = 3 + retval.element_mapping[1].custom_variable_status = 4 + retval.element_mapping[1].downtime = 5 + retval.element_mapping[1].event_handler = 6 + retval.element_mapping[1].flapping_status = 7 + retval.element_mapping[1].host_check = 8 + retval.element_mapping[1].host_dependency = 9 + retval.element_mapping[1].host_group = 10 + retval.element_mapping[1].host_group_member = 11 + retval.element_mapping[1].host = 12 + retval.element_mapping[1].host_parent = 13 + retval.element_mapping[1].host_status = 14 + retval.element_mapping[1].instance = 15 + retval.element_mapping[1].instance_status = 16 + retval.element_mapping[1].log_entry = 17 + retval.element_mapping[1].module = 18 + retval.element_mapping[1].service_check = 19 + retval.element_mapping[1].service_dependency = 20 + retval.element_mapping[1].service_group = 21 + retval.element_mapping[1].service_group_member = 22 + retval.element_mapping[1].service = 23 + retval.element_mapping[1].service_status = 24 + retval.element_mapping[1].instance_configuration = 25 + + retval.element_mapping[3].metric = 1 + retval.element_mapping[3].rebuild = 2 + retval.element_mapping[3].remove_graph = 3 + retval.element_mapping[3].status = 4 + retval.element_mapping[3].index_mapping = 5 + retval.element_mapping[3].metric_mapping = 6 + + retval.element_mapping[6].ba_status = 1 + retval.element_mapping[6].kpi_status = 2 + retval.element_mapping[6].meta_service_status = 3 + retval.element_mapping[6].ba_event = 4 + retval.element_mapping[6].kpi_event = 5 + retval.element_mapping[6].ba_duration_event = 6 + retval.element_mapping[6].dimension_ba_event = 7 + retval.element_mapping[6].dimension_kpi_event = 8 + retval.element_mapping[6].dimension_ba_bv_relation_event = 9 + retval.element_mapping[6].dimension_bv_event = 10 + retval.element_mapping[6].dimension_truncate_table_signal = 11 + retval.element_mapping[6].bam_rebuild = 12 + retval.element_mapping[6].dimension_timeperiod = 13 + retval.element_mapping[6].dimension_ba_timeperiod_relation = 14 + retval.element_mapping[6].dimension_timeperiod_exception = 15 + retval.element_mapping[6].dimension_timeperiod_exclusion = 16 + retval.element_mapping[6].inherited_downtime = 17 + + retval.tokens.authToken = nil + retval.tokens.refreshToken = nil + + + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + if i == 'client_secret' or i == 'password' then + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => *********") + else + broker_log:info(1, "EventQueue.new: getting parameter " .. i .. " => " .. v) + end + else + broker_log:info(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + + retval.hard_only = check_boolean_number_option_syntax(retval.hard_only, 1) + retval.acknowledged = check_boolean_number_option_syntax(retval.acknowledged, 0) + retval.in_downtime = check_boolean_number_option_syntax(retval.in_downtime, 0) + retval.skip_anon_events = check_boolean_number_option_syntax(retval.skip_anon_events, 1) + retval.skip_nil_id = check_boolean_number_option_syntax(retval.skip_nil_id, 1) + + retval.__internal_ts_last_flush = os.time() + retval.events = {} + setmetatable(retval, EventQueue) + -- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + + return retval +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + + return self.tokens.authToken.token +end + +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.client_id .. "&client_secret=" .. self.client_secret .. "&username=" .. self.username .. "&password=" .. self.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + + self.tokens.refreshToken = { + token = res.refresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.client_id .. "&client_secret=" .. self.client_secret .. "&username=" .. self.username .. "&password=" .. self.password .. "&refresh_token=" .. token + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.tokens.refreshToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.tokens.authToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call (url, method, data, authToken) + method = method or "GET" + data = data or nil + authToken = authToken or nil + + local endpoint = "https://" .. tostring(self.instance) .. ".service-now.com/" .. tostring(url) + broker_log:info(3, "EventQueue:call: Prepare url " .. endpoint) + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + + broker_log:info(3, "EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.proxy_address ~= '') then + if (self.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.proxy_address .. ':' .. self.proxy_port) + else + broker_log:error(1, "EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.proxy_username ~= '') then + if (self.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.proxy_password) + else + broker_log:error(1, "EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + if not authToken then + if method ~= "GET" then + broker_log:info(3, "EventQueue:call: Add form header") + request:setopt(curl.OPT_HTTPHEADER, { "Content-Type: application/x-www-form-urlencoded" }) + end + else + broker_log:info(3, "Add JSON header") + request:setopt( + curl.OPT_HTTPHEADER, + { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + ) + end + + if method ~= "GET" then + broker_log:info(3, "EventQueue:call: Add post data") + request:setopt_postfields(data) + end + + broker_log:info(3, "EventQueue:call: request body " .. tostring(data)) + broker_log:info(3, "EventQueue:call: request header " .. tostring(authToken)) + broker_log:info(3, "EventQueue:call: Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + broker_log:info(3, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(3, "EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + broker_log:info(1, "EventQueue:call: HTTP Code : " .. respCode) + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + broker_log:info(1, "EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +-------------------------------------------------------------------------------- +-- is_valid_category: check if the event category is valid +-- @param {number} category, the category id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_category (category) + return find_in_mapping(self.category_mapping, self.category_type, category) +end + +-------------------------------------------------------------------------------- +-- is_valid_element: check if the event element is valid +-- @param {number} category, the category id of the event +-- @param {number} element, the element id of the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_element (category, element) + return find_in_mapping(self.element_mapping[category], self.element_type, element) +end + +-------------------------------------------------------------------------------- +-- is_valid_neb_event: check if the neb event is valid +-- @return {table} validNebEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_neb_event () + if self.currentEvent.element == 14 or self.currentEvent.element == 24 then + self.currentEvent.hostname = get_hostname(self.currentEvent.host_id) + + -- can't find hostname in cache + if self.currentEvent.hostname == self.currentEvent.host_id and self.skip_anon_events == 1 then + return false + end + + -- can't find host_id in the event + if self.currentEvent.hostname == 0 and self.skip_nil_id == 1 then + return false + end + + if (string.find(self.currentEvent.hostname, '^_Module_BAM_*')) then + return false + end + + self.currentEvent.output = ifnil_or_empty(string.match(self.currentEvent.output, "^(.*)\n"), 'no output') + self.sendData.source = 'centreon' + self.sendData.event_class = 'centreon' + self.sendData.severity = 5 + self.sendData.node = self.currentEvent.hostname + self.sendData.time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.currentEvent.last_check) + self.sendData.description = self.currentEvent.output + end + + if self.currentEvent.element == 14 then + if not check_neb_event_status(self.currentEvent.state, self.host_status) then + return false + end + + self.sendData.resource = self.currentEvent.hostname + if self.currentEvent.state == 0 then + self.sendData.severity = 0 + elseif self.currentEvent.state == 1 then + self.sendData.severity = 1 + end + + elseif self.currentEvent.element == 24 then + self.currentEvent.serviceDescription = get_service_description(self.currentEvent.host_id, self.currentEvent.service_id) + + -- can't find service description in cache + if self.currentEvent.serviceDescription == self.currentEvent.service_id and self.skip_anon_events == 1 then + return false + end + + if not check_neb_event_status(self.currentEvent.state, self.service_status) then + return false + end + + -- can't find service_id in the event + if self.currentEvent.serviceDescription == 0 and self.skip_nil_id == 1 then + return false + end + + self.currentEvent.svc_severity = broker_cache:get_severity(self.currentEvent.host_id,self.currentEvent.service_id) + + end + + -- check hard state + if not compare_numbers(self.currentEvent.state_type, self.hard_only, '>=') then + return false + end + + -- check ack + if not compare_numbers(self.acknowledged, boolean_to_number(self.currentEvent.acknowledged), '>=') then + return false + end + + -- check downtime + if not compare_numbers(self.in_downtime, self.currentEvent.scheduled_downtime_depth, '>=') then + return false + end + + local my_retval = self:is_valid_hostgroup() + + if not self:is_valid_hostgroup() then + return false + end + + self.sendData.resource = self.currentEvent.serviceDescription + if self.currentEvent.state == 0 then + self.sendData.severity = 0 + elseif self.currentEvent.state == 1 then + self.sendData.severity = 3 + elseif self.currentEvent.state == 2 then + self.sendData.severity = 1 + elseif self.currentEvent.state == 3 then + self.sendData.severity = 4 + end + + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_storage_event: check if the storage event is valid +-- @return {table} validStorageEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_storage_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_bam_event: check if the bam event is valid +-- @return {table} validBamEvent, a table of boolean indexes validating the event +-------------------------------------------------------------------------------- +function EventQueue:is_valid_bam_event () + return true +end + +-------------------------------------------------------------------------------- +-- is_valid_event: check if the event is valid +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_event () + local validEvent = false + self.sendData = {} + if self.currentEvent.category == 1 then + validEvent = self:is_valid_neb_event() + elseif self.currentEvent.category == 3 then + validEvent = self:is_valid_storage_event() + elseif self.currentEvent.category == 6 then + validEvent = self:is_valid_bam_event() + end + + return validEvent +end + +-------------------------------------------------------------------------------- +-- : check if the event is associated to an accepted hostgroup +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_valid_hostgroup () + self.currentEvent.hostgroups = get_hostgroups(self.currentEvent.host_id) + + -- return true if option is not set + if self.accepted_hostgroups == '' then + return true + end + + -- drop event if we can't find any hostgroup on the host + if not self.currentEvent.hostgroups then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: dropping event because no hostgroup has been found for host_id: ' .. self.currentEvent.host_id) + return false + end + + -- check if hostgroup is in the list of the accepted one + local retval, matchedHostgroup = find_hostgroup_in_list(split(self.accepted_hostgroups, ','), self.currentEvent.hostgroups) + if matchedHostgroup == nil then + broker_log:info(2, 'EventQueue:is_valid_hostgroup: no hostgroup matched provided list: ' .. self.accepted_hostgroups .. ' for host_id: ' .. self.currentEvent.host_id .. '') + else + broker_log:info(2, 'EventQueue:is_valid_hostgroup: host_id: ' .. self.currentEvent.host_id .. ' matched is in the following hostgroup: ' .. matchedHostgroup) + end + + return retval +end + + +local queue + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function init (parameters) + logfile = parameters.logfile or "/var/log/centreon-broker/connector-servicenow.log" + + if not parameters.instance or not parameters.username or not parameters.password + or not parameters.client_id or not parameters.client_secret then + broker_log:error(1,'Required parameters are: instance, username, password, client_id and client_secret. There type must be string') + end + + broker_log:set_parameters(1, logfile) + broker_log:info(1, "Parameters") + for i,v in pairs(parameters) do + if i == 'client_secret' or i == 'password' then + broker_log:info(1, "Init " .. i .. " : *********") + else + broker_log:info(1, "Init " .. i .. " : " .. v) + end + end + + queue = EventQueue:new(parameters) +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the queue +-- @param {table} eventData, the data related to the event +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:add () + self.events[#self.events + 1] = self.sendData + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:flush () + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + + retval = self:send_data() + + self.events = {} + + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = '' + local authToken = self:getAuthToken() + local counter = 0 + + for _, raw_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(raw_event) + counter = counter + 1 + else + data = data .. ',' .. broker.json_encode(raw_event) + end + end + + data = '{"records":[' .. data .. ']}' + broker_log:info(2, 'EventQueue:send_data: creating json: ' .. data) + + if self:call( + "api/global/em/jsonv2", + "POST", + data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {array} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + + -- drop event if wrong category + if not queue:is_valid_category(event.category) then + return true + end + + -- drop event if wrong element + if not queue:is_valid_element(event.category, event.element) then + return false + end + + queue.currentEvent = event + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- do not compute event if it is duplicated + if queue:is_event_duplicated() then + return true + end + -- END OF FIX + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.max_buffer_size) then + broker_log:warning(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events.") + queue:flush() + end + + -- adding event to the queue + if queue:is_valid_event() then + + -- START FIX FOR BROKER SENDING DUPLICATED EVENTS + -- create id from event data + if queue.currentEvent.element == 14 then + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.last_check) + else + eventId = tostring(queue.currentEvent.host_id) .. '_' .. tostring(queue.currentEvent.service_id) .. '_' .. tostring(queue.currentEvent.last_check) + end + + -- remove oldest event from sent events list + if #queue.validatedEvents >= queue.max_stored_events then + table.remove(queue.validatedEvents, 1) + end + + -- add event in the sent events list and add list to queue + table.insert(queue.validatedEvents, eventId) + -- END OF FIX + + queue:add() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:is_event_duplicated, create an id from the neb event and check if id is in an already sent events list +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:is_event_duplicated() + local eventId = '' + if self.currentEvent.element == 14 then + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.last_check) + else + eventId = tostring(self.currentEvent.host_id) .. '_' .. tostring(self.currentEvent.service_id) .. '_' .. tostring(self.currentEvent.last_check) + end + + for i, v in ipairs(self.validatedEvents) do + if eventId == v then + return true + end + end + + return false +end diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua new file mode 100644 index 00000000000..17566923121 --- /dev/null +++ b/stream-connectors/centreon-certified/servicenow/servicenow-em-events-apiv2.lua @@ -0,0 +1,508 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html +-------------------------------------------------------------------------------- + + +-- libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new (params) + local self = {} + local mandatory_parameters = { + [1] = "instance", + [2] = "client_id", + [3] = "client_secret", + [4] = "username", + [5] = "password" + } + + self.tokens = {} + self.tokens.authToken = nil + self.tokens.refreshToken = nil + + + self.events = {} + self.fail = false + + local logfile = params.logfile or "/var/log/centreon-broker/servicenow-em-stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + self.sc_params.params.instance = params.instance + self.sc_params.params.client_id = params.client_id + self.sc_params.params.client_secret = params.client_secret + self.sc_params.params.username = params.username + self.sc_params.params.password = params.password + self.sc_params.params.http_server_url = params.http_server_url or "service-now.com" + + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + setmetatable(self, { __index = EventQueue }) + + return self +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + + return self.tokens.authToken.token +end + +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + + self.tokens.refreshToken = { + token = res.refresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password .. "&refresh_token=" .. token + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.tokens.refreshToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.tokens.authToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call(url, method, data, authToken) + data = data or nil + authToken = authToken or nil + local queue_metadata = { + method = method or "GET" + } + + -- handle headers + if not authToken and queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + queue_metadata.headers = {"Content-Type: application/x-www-form-urlencoded"} + else + broker_log:info(3, "Add JSON header") + queue_metadata.headers = { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + end + + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. "." .. self.sc_params.params.http_server_url .. "/" .. tostring(url) + self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + + self.sc_logger:log_curl_command(endpoint, queue_metadata, self.sc_params.params, data) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) + return true + end + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + self.sc_logger:debug("EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + if queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add post data") + request:setopt_postfields(data) + end + + self.sc_logger:debug("EventQueue:call: request body " .. tostring(data)) + self.sc_logger:debug("EventQueue:call: request header " .. tostring(authToken)) + self.sc_logger:warning("EventQueue:call: Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + self.sc_logger:debug("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:debug("EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + self.sc_logger:error("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:error("EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + self.sc_logger:warning("EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + source = "centreon", + event_class = "centreon", + node = tostring(self.sc_event.event.cache.host.name), + time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), + description = self.sc_event.event.output, + resource = tostring(self.sc_event.event.cache.host.name), + severity = self.sc_event.event.state + } +end + +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + source = "centreon", + event_class = "centreon", + node = tostring(self.sc_event.event.cache.host.name), + time_of_event = os.date("!%Y-%m-%d %H:%M:%S", self.sc_event.event.last_check), + description = self.sc_event.event.output, + resource = tostring(self.sc_event.event.cache.service.description), + severity = 5 + } + + if self.sc_event.event.state == 0 then + self.sc_event.event.formated_event.severity = 0 + elseif self.sc_event.event.state == 1 then + self.sc_event.event.formated_event.severity = 3 + elseif self.sc_event.event.state == 2 then + self.sc_event.event.formated_event.severity = 1 + elseif self.sc_event.event.state == 3 then + self.sc_event.event.formated_event.severity = 4 + end +end + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end + + return payload +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data(payload, queue_metadata) + local authToken + local counter = 0 + + -- generate a fake token for test purpose or use a real one if not testing + if self.sc_params.params.send_data_test == 1 then + authToken = "fake_token" + else + authToken = self:getAuthToken() + end + + local http_post_data = '{"records":[' .. payload .. ']}' + self.sc_logger:info('EventQueue:send_data: creating json: ' .. http_post_data) + + if self:call( + "api/global/em/jsonv2", + "POST", + http_post_data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + + diff --git a/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua new file mode 100644 index 00000000000..d3ea96e5164 --- /dev/null +++ b/stream-connectors/centreon-certified/servicenow/servicenow-incident-events-apiv2.lua @@ -0,0 +1,509 @@ +#!/usr/bin/lua + +-------------------------------------------------------------------------------- +-- Centreon Broker Service Now connector +-- documentation: https://docs.centreon.com/current/en/integrations/stream-connectors/servicenow.html +-------------------------------------------------------------------------------- + + +-- libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- EventQueue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +-- Constructor +-- @param conf The table given by the init() function and returned from the GUI +-- @return the new EventQueue +-------------------------------------------------------------------------------- + +function EventQueue.new (params) + local self = {} + local mandatory_parameters = { + [1] = "instance", + [2] = "client_id", + [3] = "client_secret", + [4] = "username", + [5] = "password" + } + + self.tokens = {} + self.tokens.authToken = nil + self.tokens.refreshToken = nil + + + self.events = {} + self.fail = false + + local logfile = params.logfile or "/var/log/centreon-broker/servicenow-incident-stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + self.sc_params.params.instance = params.instance + self.sc_params.params.client_id = params.client_id + self.sc_params.params.client_secret = params.client_secret + self.sc_params.params.username = params.username + self.sc_params.params.password = params.password + self.sc_params.params.http_server_url = params.http_server_url or "service-now.com" + self.sc_params.params.incident_table = params.incident_table or "incident" + self.sc_params.params.source = params.source or "centreon" + + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + -- this is an automatic ticketing stream connector, by default we only open ticket on warning/critical/unknown/down/unreachable states + self.sc_params.params.host_status = params.host_status or "1,2" + self.sc_params.params.service_status = params.service_status or "1,2,3" + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- force max_buffer_size to 1, we can't send bulk events + params.max_buffer_size = 1 + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + self.sc_params.params.http_server_url = self.sc_common:if_wrong_type(self.sc_params.params.http_server_url, "string", "service-now.com") + self.sc_params.params.incident_table = self.sc_common:if_wrong_type(self.sc_params.params.incident_table, "string", "incident") + self.sc_params.params.source = self.sc_common:if_wrong_type(self.sc_params.params.source, "string", "centreon") + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- getAuthToken: obtain a auth token +-- @return {string} self.tokens.authToken.token, the auth token +-------------------------------------------------------------------------------- +function EventQueue:getAuthToken () + if not self:refreshTokenIsValid() then + self:authToken() + end + + if not self:accessTokenIsValid() then + self:refreshToken(self.tokens.refreshToken.token) + end + + return self.tokens.authToken.token +end + +-------------------------------------------------------------------------------- +-- authToken: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:authToken () + local data = "grant_type=password&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, "EventQueue:authToken: Authentication failed, couldn't get tokens") + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } + + self.tokens.refreshToken = { + token = res.refresh_token, + expTime = os.time(os.date("!*t")) + 360000 + } +end + +-------------------------------------------------------------------------------- +-- refreshToken: refresh auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshToken (token) + local data = "grant_type=refresh_token&client_id=" .. self.sc_params.params.client_id .. "&client_secret=" .. self.sc_params.params.client_secret .. "&username=" .. self.sc_params.params.username .. "&password=" .. self.sc_params.params.password .. "&refresh_token=" .. token + + local res = self:call( + "oauth_token.do", + "POST", + data + ) + + if not res.access_token then + broker_log:error(1, 'EventQueue:refreshToken Bad access token') + return false + end + + self.tokens.authToken = { + token = res.access_token, + expTime = os.time(os.date("!*t")) + 1700 + } +end + +-------------------------------------------------------------------------------- +-- refreshTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:refreshTokenIsValid () + if not self.tokens.refreshToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.refreshToken.expTime then + self.tokens.refreshToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- accessTokenIsValid: obtain auth token +-------------------------------------------------------------------------------- +function EventQueue:accessTokenIsValid () + if not self.tokens.authToken then + return false + end + + if os.time(os.date("!*t")) > self.tokens.authToken.expTime then + self.tokens.authToken = nil + return false + end + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:call run api call +-- @param {string} url, the service now instance url +-- @param {string} method, the HTTP method that is used +-- @param {string} data, the data we want to send to service now +-- @param {string} authToken, the api auth token +-- @return {array} decoded output +-- @throw exception if http call fails or response is empty +-------------------------------------------------------------------------------- +function EventQueue:call(url, method, data, authToken) + data = data or nil + authToken = authToken or nil + + local queue_metadata = { + method = method or "GET" + } + + -- handle headers + if not authToken and queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add form header") + queue_metadata.headers = {"Content-Type: application/x-www-form-urlencoded"} + else + broker_log:info(3, "Add JSON header") + queue_metadata.headers = { + "Accept: application/json", + "Content-Type: application/json", + "Authorization: Bearer " .. authToken + } + end + + local endpoint = "https://" .. tostring(self.sc_params.params.instance) .. "." .. self.sc_params.params.http_server_url .. "/" .. tostring(url) + self.sc_logger:debug("EventQueue:call: Prepare url " .. endpoint) + + self.sc_logger:log_curl_command(endpoint, queue_metadata, self.sc_params.params, data) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(data) .. " to endpoint: " .. tostring(endpoint)) + return true + end + + local res = "" + local request = curl.easy() + :setopt_url(endpoint) + :setopt_writefunction(function (response) + res = res .. tostring(response) + end) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + self.sc_logger:debug("EventQueue:call: Request initialize") + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("EventQueue:call: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + request:setopt(curl.OPT_PROXYUSERPWD, self.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("EventQueue:call: proxy_password parameter is not set but proxy_username is used") + end + end + + if queue_metadata.method ~= "GET" then + self.sc_logger:debug("EventQueue:call: Add post data") + request:setopt_postfields(data) + end + + self.sc_logger:debug("EventQueue:call: request body " .. tostring(data)) + self.sc_logger:debug("EventQueue:call: request header " .. tostring(authToken)) + self.sc_logger:warning("EventQueue:call: Call url " .. endpoint) + request:perform() + + respCode = request:getinfo(curl.INFO_RESPONSE_CODE) + self.sc_logger:debug("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:debug("EventQueue:call: Response body : " .. tostring(res)) + + request:close() + + if respCode >= 300 then + self.sc_logger:error("EventQueue:call: HTTP Code : " .. respCode) + self.sc_logger:error("EventQueue:call: HTTP Error : " .. res) + return false + end + + if res == "" then + self.sc_logger:warning("EventQueue:call: HTTP Error : " .. res) + return false + end + + return broker.json_decode(res) +end + +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + source = self.sc_params.params.source, + short_description = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. tostring(event.cache.host.name) .. " " .. tostring(event.short_output), + cmdb_ci = tostring(event.cache.host.name), + comments = "HOST: " .. tostring(event.cache.host.name) .. "\n" + .. "OUTPUT: " .. tostring(event.output) .. "\n" + } +end + +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + source = self.sc_params.params.source, + short_description = self.sc_params.params.status_mapping[event.category][event.element][event.state] .. " " .. tostring(event.cache.host.name) .. " " .. tostring(event.cache.service.description) .. " " .. tostring(event.short_output), + cmdb_ci = tostring(event.cache.host.name), + comments = "HOST: " .. tostring(event.cache.host.name) .. "\n" + .. "SERVICE: " .. tostring(event.cache.service.description) .. "\n" + .. "OUTPUT: " .. tostring(event.output) .. "\n" + } +end + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- init, initiate stream connector with parameters from the configuration file +-- @param {table} parameters, the table with all the configuration parameters +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. ',' .. broker.json_encode(event) + end + + return payload +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return {boolean} +-------------------------------------------------------------------------------- +function EventQueue:send_data(payload, queue_metadata) + local authToken + local counter = 0 + + -- generate a fake token for test purpose or use a real one if not testing + if self.sc_params.params.send_data_test == 1 then + authToken = "fake_token" + else + authToken = self:getAuthToken() + end + + local http_post_data = payload + self.sc_logger:info('EventQueue:send_data: creating json: ' .. http_post_data) + + if self:call( + "api/now/table/" .. self.sc_params.params.incident_table, + "POST", + http_post_data, + authToken + ) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + + diff --git a/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua new file mode 100644 index 00000000000..80e48e7e3d8 --- /dev/null +++ b/stream-connectors/centreon-certified/signl4/signl4-events-apiv2.lua @@ -0,0 +1,345 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Signl4 Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require("cURL") +local mime = require("mime") + +-- Centreon lua core libraries +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- event_queue class +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "team_secret" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/signl4-events-apiv2.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- force buffer size to 1 to avoid breaking the communication with signl4 (can't send more than one event at once) + params.max_buffer_size = 1 + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.server_address = params.server_address or "https://connect.signl4.com" + self.sc_params.params.x_s4_source_system = params.x_s4_source_system or "Centreon" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file(true) + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + self.state_to_signlstatus_mapping = { + [0] = "resolved", + [1] = "new", + [2] = "new", + [3] = "new" + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self + end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +--------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + self.sc_event.event.formated_event = self.sc_macros:replace_sc_macro(template, self.sc_event.event, true) + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + EventType = "HOST", + Date = self.sc_macros:transform_date(self.sc_event.event.last_check), + Host = self.sc_event.event.cache.host.name, + Message = self.sc_event.event.output, + Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + Title = "HOST ALERT:" .. self.sc_event.event.cache.host.name .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, + ["X-S4-ExternalID"] = "HOSTALERT_" .. self.sc_event.event.host_id, + ["X-S4-Status"] = self.state_to_signlstatus_mapping[self.sc_event.event.state] + } +end + +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + EventType = "SERVICE", + Date = self.sc_macros:transform_date(self.sc_event.event.last_check), + Host = self.sc_event.event.cache.host.name, + Service = self.sc_event.event.cache.service.description, + Message = self.sc_event.event.output, + Status = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + Title = "SERVICE ALERT:" .. self.sc_event.event.cache.host.name .. "/" .. self.sc_event.event.cache.service.description .. " is " .. self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + ["X-S4-SourceSystem"] = self.sc_params.params.x_s4_source_system, + ["X-S4-ExternalID"] = "SERVICEALERT_" .. self.sc_event.event.host_id .. "_" .. self.sc_event.event.service_id, + ["X-S4-Status"] = self.state_to_signlstatus_mapping[self.sc_event.event.state] + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- + +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = self.sc_event.event.formated_event + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) .. '\n' + else + payload = payload .. broker.json_encode(event) .. '\n' + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.server_address .. "/webhook/" .. self.sc_params.params.team_secret + queue_metadata.headers = {"content-type: application/json"} + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Signl4 Server URL is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER,queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + -- performing the HTTP request + http_request:perform() + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + http_request:close() + + -- Handling the return code + local retval = false + + if http_response_code == 200 or http_response_code == 201 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/splunk/splunk-conf1.png b/stream-connectors/centreon-certified/splunk/splunk-conf1.png new file mode 100644 index 00000000000..00b532cdfaf Binary files /dev/null and b/stream-connectors/centreon-certified/splunk/splunk-conf1.png differ diff --git a/stream-connectors/centreon-certified/splunk/splunk-conf2.png b/stream-connectors/centreon-certified/splunk/splunk-conf2.png new file mode 100644 index 00000000000..32daa1b3a48 Binary files /dev/null and b/stream-connectors/centreon-certified/splunk/splunk-conf2.png differ diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua new file mode 100644 index 00000000000..1fb7596a534 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-events-apiv2.lua @@ -0,0 +1,342 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_macros = require("centreon-stream-connectors-lib.sc_macros") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "splunk_token" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/splunk-events.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.splunk_index = params.splunk_index or "" + self.sc_params.params.splunk_source = params.splunk_source or "" + self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" + self.sc_params.params.splunk_host = params.splunk_host or "Central" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + self.sc_macros = sc_macros.new(self.sc_params.params, self.sc_logger) + self.format_template = self.sc_params:load_event_format_file() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + }, + [categories.bam.id] = {} + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + local template = self.sc_params.params.format_template[category][element] + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + self.sc_event.event.formated_event = {} + + if self.format_template and template ~= nil and template ~= "" then + for index, value in pairs(template) do + self.sc_event.event.formated_event[index] = self.sc_macros:replace_sc_macro(value, self.sc_event.event) + end + else + -- can't format event if stream connector is not handling this kind of event and that it is not handled with a template file + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you should create a format file to handle this kind of element") + else + self.format_event[category][element]() + end + end + + self:add() + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + +function EventQueue:format_event_host() + self.sc_event.event.formated_event = { + event_type = "host", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + output = self.sc_event.event.output, + } +end + +function EventQueue:format_event_service() + self.sc_event.event.formated_event = { + event_type = "service", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + service_description = self.sc_event.event.cache.service.description, + output = self.sc_event.event.output, + } +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + -- store event in self.events lists + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = { + sourcetype = self.sc_params.params.splunk_sourcetype, + source = self.sc_params.params.splunk_source, + index = self.sc_params.params.splunk_index, + host = self.sc_params.params.splunk_host, + time = self.sc_event.event.last_check, + event = self.sc_event.event.formated_event + } + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + local url = self.sc_params.params.http_server_url + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + "authorization: Splunk " .. self.sc_params.params.splunk_token + } + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + if queue.sc_event:is_valid_category() then + if queue.sc_event:is_valid_element() then + -- format event if it is validated + if queue.sc_event:is_valid_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua new file mode 100755 index 00000000000..e3a14ceb4b0 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-events-http-apiv1.lua @@ -0,0 +1,321 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with an events index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, Centreon +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Centreon", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + event_type = type, + state = e.state, + hostname = hostname, + service_description = service_description, + output = string.gsub(e.output, "\n", "") + } + + self.events[#self.events + 1] = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_source, + index = self.splunk_index, + host = self.splunk_host, + time = e.ctime, + event = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 2 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-events.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 24 or e.element == 14)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua new file mode 100755 index 00000000000..7e9a4c5f946 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-events-luacurl-apiv1.lua @@ -0,0 +1,339 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with an events index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_index: index where you want to store the events +-- [OPTIONAL] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Central +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +local function get_hostgroups(host_id) + local hostgroups = broker_cache:get_hostgroups(host_id) + if not hostgroups then + hostgroups = "No hostgroups" + end + return hostgroups +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Central", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + event_type = type, + state = e.state, + state_type = e.state_type, + hostname = hostname, + service_description = ifnil_or_empty(service_description,hostname), + output = string.gsub(e.output, "\n", ""), + hostgroups = get_hostgroups(e.host_id), + acknowledged = e.acknowledged, + acknowledegement_type = e.acknowledgement_type, + check_command = e.check_command, + check_period = e.check_period, + event_handler = e.event_handler, + event_handler_enabled = e.event_handler_enabled, + execution_time = e.execution_time + } + + self.events[#self.events + 1] = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_source, + index = self.splunk_index, + host = self.splunk_host, + time = e.last_check, + event = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-events.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status + if not (e.category == 1 and e.element == 24 or e.element == 14) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + + -- Ignore Pending states + if e.state_type ~= 1 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Not HARD state type. Dropping.") + return true + end + + -- Ignore states different from previous hard state only + if e.last_hard_state_change and e.last_hard_state_change < e.last_check then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Last hard state change prior to last check => no state change. Dropping.") + return true + end + + -- Ignore objects in downtime + if e.scheduled_downtime_depth ~= 0 then --we keep only events in hard state and not in downtime + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Scheduled downtime. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + if current_event == previous_event then + broker_log:info(3, "write: Duplicate event ignored.") + return true + end + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua new file mode 100644 index 00000000000..f22704292c0 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-apiv2.lua @@ -0,0 +1,401 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Events +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(params) + local self = {} + + local mandatory_parameters = { + "http_server_url", + "splunk_token" + } + + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/splunk-metrics.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- checking mandatory parameters and setting a fail flag + if not self.sc_params:is_mandatory_config_set(mandatory_parameters, params) then + self.fail = true + end + + -- overriding default parameters for this stream connector if the default values doesn't suit the basic needs + self.sc_params.params.splunk_index = params.splunk_index or "" + self.sc_params.params.splunk_source = params.splunk_source or "" + self.sc_params.params.splunk_sourcetype = params.splunk_sourcetype or "_json" + self.sc_params.params.splunk_host = params.splunk_host or "Central" + self.sc_params.params.accepted_categories = params.accepted_categories or "neb" + self.sc_params.params.accepted_elements = params.accepted_elements or "host_status,service_status" + self.sc_params.params.max_buffer_size = params.max_buffer_size or 30 + self.sc_params.params.hard_only = params.hard_only or 0 + self.sc_params.params.enable_host_status_dedup = params.enable_host_status_dedup or 0 + self.sc_params.params.enable_service_status_dedup = params.enable_service_status_dedup or 0 + self.sc_params.params.metric_name_regex = params.metric_name_regex or "[^a-zA-Z0-9_]" + self.sc_params.params.metric_replacement_character = params.metric_replacement_character or "_" + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- only load the custom code file, not executed yet + if self.sc_params.load_custom_code_file and not self.sc_params:load_custom_code_file(self.sc_params.params.custom_code_file) then + self.sc_logger:error("[EventQueue:new]: couldn't successfully load the custom code file: " .. tostring(self.sc_params.params.custom_code_file)) + end + + self.sc_params:build_accepted_elements_info() + + self.sc_flush = sc_flush.new(self.sc_params.params, self.sc_logger) + + local categories = self.sc_params.params.bbdo.categories + local elements = self.sc_params.params.bbdo.elements + + self.format_event = { + [categories.neb.id] = { + [elements.host_status.id] = function () return self:format_event_host() end, + [elements.service_status.id] = function () return self:format_event_service() end + } + } + + self.format_metric = { + [categories.neb.id] = { + [elements.host_status.id] = function (metric) return self:format_metric_host(metric) end, + [elements.service_status.id] = function (metric) return self:format_metric_service(metric) end + } + } + + self.send_data_method = { + [1] = function (payload, queue_metadata) return self:send_data(payload, queue_metadata) end + } + + self.build_payload_method = { + [1] = function (payload, event) return self:build_payload(payload, event) end + } + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event method +---------------------------------------------------------------------------------- +function EventQueue:format_accepted_event() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + self.sc_logger:debug("[EventQueue:format_event]: starting format event") + + -- can't format event if stream connector is not handling this kind of event + if not self.format_event[category][element] then + self.sc_logger:error("[format_event]: You are trying to format an event with category: " + .. tostring(self.sc_params.params.reverse_category_mapping[category]) .. " and element: " + .. tostring(self.sc_params.params.reverse_element_mapping[category][element]) + .. ". If it is a not a misconfiguration, you can open an issue at https://github.com/centreon/centreon-stream-connector-scripts/issues") + else + self.sc_logger:debug("[EventQueue:format_event]: going to format it") + self.format_event[category][element]() + end + + self.sc_logger:debug("[EventQueue:format_event]: event formatting is finished") +end + + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_host method +-------------------------------------------------------------------------------- +function EventQueue:format_event_host() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "host", + state = event.state, + state_type = event.state_type, + hostname = event.cache.host.name, + ctime = event.last_check + } + + self.sc_logger:debug("[EventQueue:format_event_host]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_event_service method +-------------------------------------------------------------------------------- +function EventQueue:format_event_service() + local event = self.sc_event.event + + self.sc_event.event.formated_event = { + event_type = "service", + state = event.state, + state_type = event.state_type, + hostname = event.cache.host.name, + service_description = event.cache.service.description, + ctime = event.last_check + } + + self.sc_logger:debug("[EventQueue:format_event_service]: call build_metric ") + self.sc_metrics:build_metric(self.format_metric[event.category][event.element]) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_host method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_host(metric) + self.sc_logger:debug("[EventQueue:format_metric_host]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:format_metric_service method +-- @param metric {table} a single metric data +-------------------------------------------------------------------------------- +function EventQueue:format_metric_service(metric) + self.sc_logger:debug("[EventQueue:format_metric_service]: call format_metric ") + self:format_metric_event(metric) +end + +-------------------------------------------------------------------------------- +---- EventQueue:build_metadata method +-- @param metric {table} a single metric data +-- @return tags {table} a table with formated metadata +-------------------------------------------------------------------------------- +function EventQueue:format_metric_event(metric) + self.sc_logger:debug("[EventQueue:format_metric]: start real format metric ") + self.sc_event.event.formated_event["metric_name:" .. tostring(metric.metric_name)] = metric.value + + -- add metric instance in tags + if metric.instance ~= "" then + self.sc_event.event.formated_event["instance"] = metric.instance + end + + if metric.subinstance[1] then + self.sc_event.event.formated_event["subinstances"] = metric.subinstance + end + + self:add() + self.sc_logger:debug("[EventQueue:format_metric]: end real format metric ") +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add() + local category = self.sc_event.event.category + local element = self.sc_event.event.element + + self.sc_logger:debug("[EventQueue:add]: add event in queue category: " .. tostring(self.sc_params.params.reverse_category_mapping[category]) + .. " element: " .. tostring(self.sc_params.params.reverse_element_mapping[category][element])) + + self.sc_logger:debug("[EventQueue:add]: queue size before adding event: " .. tostring(#self.sc_flush.queues[category][element].events)) + + self.sc_flush.queues[category][element].events[#self.sc_flush.queues[category][element].events + 1] = { + sourcetype = self.sc_params.params.splunk_sourcetype, + source = self.sc_params.params.splunk_source, + index = self.sc_params.params.splunk_index, + host = self.sc_params.params.splunk_host, + time = self.sc_event.event.last_check, + fields = self.sc_event.event.formated_event + } + + self.sc_logger:info("[EventQueue:add]: queue size is now: " .. tostring(#self.sc_flush.queues[category][element].events) + .. ", max is: " .. tostring(self.sc_params.params.max_buffer_size)) +end + +-------------------------------------------------------------------------------- +-- EventQueue:build_payload, concatenate data so it is ready to be sent +-- @param payload {string} json encoded string +-- @param event {table} the event that is going to be added to the payload +-- @return payload {string} json encoded string +-------------------------------------------------------------------------------- +function EventQueue:build_payload(payload, event) + if not payload then + payload = broker.json_encode(event) + else + payload = payload .. broker.json_encode(event) + end + + return payload +end + +function EventQueue:send_data(payload, queue_metadata) + self.sc_logger:debug("[EventQueue:send_data]: Starting to send data") + queue_metadata.headers = { + "content-type: application/json", + "content-length:" .. string.len(payload), + "authorization: Splunk " .. self.sc_params.params.splunk_token + } + local url = self.sc_params.params.http_server_url + + self.sc_logger:log_curl_command(url, queue_metadata, self.sc_params.params, payload) + + -- write payload in the logfile for test purpose + if self.sc_params.params.send_data_test == 1 then + self.sc_logger:notice("[send_data]: " .. tostring(payload)) + return true + end + + self.sc_logger:info("[EventQueue:send_data]: Going to send the following json " .. tostring(payload)) + self.sc_logger:info("[EventQueue:send_data]: Splunk address is: " .. tostring(url)) + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.sc_params.params.connection_timeout) + :setopt(curl.OPT_SSL_VERIFYPEER, self.sc_params.params.allow_insecure_connection) + :setopt(curl.OPT_HTTPHEADER, queue_metadata.headers) + + -- set proxy address configuration + if (self.sc_params.params.proxy_address ~= '') then + if (self.sc_params.params.proxy_port ~= '') then + http_request:setopt(curl.OPT_PROXY, self.sc_params.params.proxy_address .. ':' .. self.sc_params.params.proxy_port) + else + self.sc_logger:error("[EventQueue:send_data]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.sc_params.params.proxy_username ~= '') then + if (self.sc_params.params.proxy_password ~= '') then + http_request:setopt(curl.OPT_PROXYUSERPWD, self.sc_params.params.proxy_username .. ':' .. self.sc_params.params.proxy_password) + else + broker_log:error("[EventQueue:send_data]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- adding the HTTP POST data + http_request:setopt_postfields(payload) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + self.sc_logger:info("[EventQueue:send_data]: HTTP POST request successful: return code is " .. tostring(http_response_code)) + retval = true + else + self.sc_logger:error("[EventQueue:send_data]: HTTP POST request FAILED, return code is " .. tostring(http_response_code) .. ". Message is: " .. tostring(http_response_body)) + end + + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + queue = EventQueue.new(conf) +end + +-- -------------------------------------------------------------------------------- +-- write, +-- @param {table} event, the event from broker +-- @return {boolean} +-------------------------------------------------------------------------------- +function write (event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return false + end + + -- initiate event object + queue.sc_metrics = sc_metrics.new(event, queue.sc_params.params, queue.sc_common, queue.sc_broker, queue.sc_logger) + queue.sc_event = queue.sc_metrics.sc_event + + if queue.sc_event:is_valid_category() then + if queue.sc_metrics:is_valid_bbdo_element() then + -- format event if it is validated + if queue.sc_metrics:is_valid_metric_event() then + queue:format_accepted_event() + end + --- log why the event has been dropped + else + queue.sc_logger:debug("dropping event because element is not valid. Event element is: " + .. tostring(queue.sc_params.params.reverse_element_mapping[queue.sc_event.event.category][queue.sc_event.event.element])) + end + else + queue.sc_logger:debug("dropping event because category is not valid. Event category is: " + .. tostring(queue.sc_params.params.reverse_category_mapping[queue.sc_event.event.category])) + end + + return flush() +end + + +-- flush method is called by broker every now and then (more often when broker has nothing else to do) +function flush() + local queues_size = queue.sc_flush:get_queues_size() + + -- nothing to flush + if queues_size == 0 then + return true + end + + -- flush all queues because last global flush is too old + if queue.sc_flush.last_global_flush < os.time() - queue.sc_params.params.max_all_queues_age then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- flush queues because too many events are stored in them + if queues_size > queue.sc_params.params.max_buffer_size then + if not queue.sc_flush:flush_all_queues(queue.build_payload_method[1], queue.send_data_method[1]) then + return false + end + + return true + end + + -- there are events in the queue but they were not ready to be send + return false +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua new file mode 100755 index 00000000000..6bf55dd63b6 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-http-apiv1.lua @@ -0,0 +1,292 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Metrics +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with a metrics index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [MANDATORY] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Centreon +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +local new_from_timestamp = require "luatz.timetable".new_from_timestamp +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "", + splunk_token = "", + splunk_index = "", + splunk_host = "Centreon", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + service_description = service_description, + hostname = hostname, + ctime = e.ctime + } + + event_data["metric_name:" .. e.name] = e.value + + self.events[#self.events + 1] = { + time = e.ctime, + source = self.splunk_source, + sourcetype = self.splunk_sourcetype, + index = self.splunk_index, + host = self.splunk_host, + fields = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-metrics.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk Metrics StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 3 and e.element == 1) then + broker_log:info(3, "write: Not a metric event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(3, "write: Raw event: " .. current_event) + -- + + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua new file mode 100755 index 00000000000..02f7c2162e3 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-metrics-luacurl-apiv1.lua @@ -0,0 +1,313 @@ +#!/usr/bin/lua +-------------------------------------------------------------------------------- +-- Centreon Broker Splunk Connector Metrics +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Prerequisites +-- You need a Splunk instance +-- You need to create a new HTTP events collector with a metrics index and get a token +-- +-- The lua-curl and luatz libraries are required by this script: +-- yum install lua-curl epel-release +-- yum install luarocks +-- luarocks install luatz +-------------------------------------------------------------------------------- + +-------------------------------------------------------------------------------- +-- Parameters: +-- [MANDATORY] http_server_url: your splunk API url +-- [MANDATORY] splunk_index: index where you want to store the events +-- [MANDATORY] splunk_token: see above, this will be your authentication token +-- [OPTIONAL] splunk_source: source of the HTTP events collector, must be http:something +-- [OPTIONAL] splunk_sourcetype: sourcetype of the HTTP events collector, default _json +-- [OPTIONAL] splunk_host: host field for the HTTP events collector, default Central +-- [OPTIONAL] http_proxy_string: default empty +-- +-------------------------------------------------------------------------------- + +-- Libraries +local curl = require "cURL" +-- Global variables +local previous_event = "" + +-- Useful functions +local function ifnil(var, alt) + if var == nil then + return alt + else + return var + end +end + +local function ifnil_or_empty(var, alt) + if var == nil or var == '' then + return alt + else + return var + end +end + +local function get_hostname(host_id) + local hostname = broker_cache:get_hostname(host_id) + if not hostname then + hostname = host_id + end + return hostname +end + +local function get_service_description(host_id, service_id) + local service = broker_cache:get_service_description(host_id, service_id) + if not service then + service = service_id + end + return service +end + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local EventQueue = {} +EventQueue.__index = EventQueue + +-------------------------------------------------------------------------------- +---- Constructor +---- @param conf The table given by the init() function and returned from the GUI +---- @return the new EventQueue +---------------------------------------------------------------------------------- + +function EventQueue.new(conf) + local retval = { + http_server_url = "", + http_proxy_string = "", + http_timeout = 5, + splunk_sourcetype = "_json", + splunk_source = "Centreon", + splunk_token = "", + splunk_index = "", + splunk_host = "Central", + filter_type = "metric,status", + max_buffer_size = 1, + max_buffer_age = 5, + skip_anon_events = 1 + } + for i,v in pairs(conf) do + if retval[i] then + retval[i] = v + broker_log:info(2, "EventQueue.new: getting parameter " .. i .. " => " .. v) + else + broker_log:warning(1, "EventQueue.new: ignoring unhandled parameter " .. i .. " => " .. v) + end + end + retval.__internal_ts_last_flush = os.time() + retval.events = {}, + setmetatable(retval, EventQueue) +-- Internal data initialization + broker_log:info(2, "EventQueue.new: setting the internal timestamp to " .. retval.__internal_ts_last_flush) + return retval +end + +-------------------------------------------------------------------------------- +---- EventQueue:add method +---- @param e An event +---------------------------------------------------------------------------------- + +function EventQueue:add(e) + + local type = "host" + local hostname = get_hostname(e.host_id) + if hostname == e.host_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get hostname for host_id '" .. e.host_id .."'") + return false + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."'. The event will be sent with the id only") + end + end + + local service_description = "" + if e.service_id then + type = "service" + service_description = get_service_description(e.host_id, e.service_id) + if service_description == e.service_id then + if self.skip_anon_events ~= 1 then + broker_log:error(0, "EventQueue:add: unable to get service_description for host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + else + broker_log:info(3, "EventQueue:add: ignoring that we can't resolve host_id '" .. e.host_id .."' and service_id '" .. e.service_id .."'") + end + end + end + + local event_data = { + service_description = ifnil_or_empty(service_description,hostname), + hostname = hostname, + ctime = e.last_check + } + + -- Managing perfdata + local metrics = "" + if e.perfdata then + local perf, err_str = broker.parse_perfdata(e.perfdata, true) + if perf then + for key,v in pairs(perf) do + event_data["metric_name:" .. key] = tostring(v.value) + end + end + end + + self.events[#self.events + 1] = { + time = e.last_time, + source = self.splunk_source, + sourcetype = self.splunk_sourcetype, + index = self.splunk_index, + host = self.splunk_host, + fields = event_data + } + + return true + +end + +-------------------------------------------------------------------------------- +---- EventQueue:flush method +---- Called when the max number of events or the max age are reached +---------------------------------------------------------------------------------- + +function EventQueue:flush() + + broker_log:info(3, "EventQueue:flush: Concatenating all the events as one string") + local http_post_data = "" + for _, raw_event in ipairs(self.events) do + http_post_data = http_post_data .. broker.json_encode(raw_event) + end + for s in http_post_data:gmatch("[^\r\n]+") do + broker_log:info(3, "EventQueue:flush: HTTP POST data: " .. s .. "") + end + + broker_log:info(3, "EventQueue:flush: HTTP POST url: \"" .. self.http_server_url .. "\"") + + local http_response_body = "" + local http_request = curl.easy() + :setopt_url(self.http_server_url) + :setopt_writefunction( + function (response) + http_response_body = http_response_body .. tostring(response) + end + ) + :setopt(curl.OPT_TIMEOUT, self.http_timeout) + :setopt( + curl.OPT_HTTPHEADER, + { + "content-type: application/json", + "content-length:" .. string.len(http_post_data), + "authorization: Splunk " .. self.splunk_token, + } + ) + + -- setting the CURLOPT_PROXY + if self.http_proxy_string and self.http_proxy_string ~= "" then + broker_log:info(3, "EventQueue:flush: HTTP PROXY string is '" .. self.http_proxy_string .. "'") + http_request:setopt(curl.OPT_PROXY, self.http_proxy_string) + end + + -- adding the HTTP POST data + http_request:setopt_postfields(http_post_data) + + -- performing the HTTP request + http_request:perform() + + -- collecting results + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + + http_request:close() + + -- Handling the return code + local retval = false + if http_response_code == 200 then + broker_log:info(2, "EventQueue:flush: HTTP POST request successful: return code is " .. http_response_code) + -- now that the data has been sent, we empty the events array + self.events = {} + retval = true + else + broker_log:error(0, "EventQueue:flush: HTTP POST request FAILED, return code is " .. http_response_code) + broker_log:error(1, "EventQueue:flush: HTTP POST request FAILED, message is:\n\"" .. http_response_body .. "\n\"\n") + end + -- and update the timestamp + self.__internal_ts_last_flush = os.time() + return retval +end + +-------------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +-------------------------------------------------------------------------------- + +local queue + +-- Fonction init() +function init(conf) + local log_level = 1 + local log_path = "/var/log/centreon-broker/stream-connector-splunk-metrics.log" + for i,v in pairs(conf) do + if i == "log_level" then + log_level = v + end + if i == "log_path" then + log_path = v + end + end + broker_log:set_parameters(log_level, log_path) + broker_log:info(0, "init: Starting Splunk Metrics StreamConnector (log level: " .. log_level .. ")") + broker_log:info(2, "init: Beginning init() function") + queue = EventQueue.new(conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning function") + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.__internal_ts_last_flush > queue.max_buffer_age) then + broker_log:info(2, "write: Queue max age (" .. os.time() - queue.__internal_ts_last_flush .. "/" .. queue.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Here come the filters + -- Host/service status only + if not (e.category == 1 and (e.element == 14 or e.element == 24)) then + broker_log:info(3, "write: Neither host nor service status event. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(2, "write: Raw event: " .. current_event) + -- + + -- Ignore pending states + if e.state and e.state == 4 then + broker_log:info(3, "write: " .. e.host_id .. "_" .. ifnil_or_empty(e.service_id, "H") .. " Pending state ignored. Dropping.") + return true + end + + -- workaround https://github.com/centreon/centreon-broker/issues/201 + current_event = broker.json_encode(e) + broker_log:info(2, "write: Raw event: " .. current_event) + -- + + -- The current event now becomes the previous + previous_event = current_event + -- Once all the filters have been passed successfully, we can add the current event to the queue + queue:add(e) + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.max_buffer_size) then + broker_log:info(2, "write: Queue max size (" .. #queue.events .. "/" .. queue.max_buffer_size .. ") is reached, flushing data") + return queue:flush() + end + broker_log:info(3, "write: Ending function") + + return true +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua b/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua new file mode 100644 index 00000000000..9d2732cc3c3 --- /dev/null +++ b/stream-connectors/centreon-certified/splunk/splunk-states-http-apiv1.lua @@ -0,0 +1,138 @@ +#!/usr/bin/lua +local http = require("socket.http") +local ltn12 = require("ltn12") + +-------------------------------------------------------------------------------- +-- Classe event_queue +-------------------------------------------------------------------------------- + +local event_queue = { + receiver_address = "", + receiver_port = 8088, + receiver_proto = "http", + splunk_sourcename = "", + splunk_sourcetype = "_json", + splunk_auth_var = "", + events = {}, + buffer_size = 50 +} + +-- Constructeur event_queue:new +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + broker_log:info(1, "event_queue:new: getting parameter " .. i .. " => " .. v) + if self[i] and i ~= "events" then + self[i] = v + end + end + return o +end + +-- Méthode event_queue:flush +function event_queue:flush() + broker_log:info(2, "event_queue:flush: Concatenating all the events as one JSON string") + -- we concatenate all the events as a serie of json objects separated by a whitespace + local post_data = "" + for i, json_event in ipairs(self.events) do + post_data = post_data .. json_event + end + broker_log:info(2, "event_queue:flush: HTTP POST request \"" .. self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector\"") + broker_log:info(2, "event_queue:flush: HTTP POST data are: '" .. post_data .. "'") + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = self.receiver_proto .. "://" .. self.receiver_address .. ":" .. self.receiver_port .. "/services/collector", + method = "POST", + headers = { + ["Authorization"] = "Splunk " .. self.splunk_auth_var, -- Splunk HTTP JSON API needs this header field to accept input + ["content-length"] = string.len(post_data) -- mandatory for POST request with body + }, + source = ltn12.source.string(post_data) -- request body needs to be formatted as a LTN12 source + } + if hr_code == 200 then + broker_log:info(2, "event_queue:flush: HTTP POST request successful: return code is " .. hr_code) + else + broker_log:error(1, "event_queue:flush: HTTP POST request FAILED: return code is " .. hr_code) + end + + -- now that the data has been sent, we flush the events array + self.events = {} +end + +-- Méthode event_queue:add +function event_queue:add(e) + local splunk_event_data = {} + local event_data = { + output = e.output, + state = e.state + } + local t_event_type = "host" + local t_host_name = broker_cache:get_hostname(e.host_id) + if t_host_name then + event_data.host_name = t_host_name + else + broker_log:warning(1, "event_queue:add: host_name for id " .. e.host_id .. " not found") + event_data.host_name = e.host_id + end + if e.service_id then + t_event_type = "service" + local t_service_description = broker_cache:get_service_description(e.host_id, e.service_id) + if t_service_description then + event_data.service_description = broker_cache:get_service_description(e.host_id, e.service_id) + else + broker_log:warning(1, "event_queue:add: service_description for id " .. e.host_id .. "." .. e.service_id .. " not found") + event_data.service_description = e.service_id + end + end + event_data.event_type = t_event_type + splunk_event_data = { + sourcetype = self.splunk_sourcetype, + source = self.splunk_sourcename, + time = e.ctime, + event = event_data + } + local json_splunk_event_data = broker.json_encode(splunk_event_data) + broker_log:info(3, "event_queue:add: Adding event #" .. #self.events) + broker_log:info(3, "event_queue:add: event json: " .. json_splunk_event_data) + self.events[#self.events + 1] = json_splunk_event_data + + if #self.events < self.buffer_size then + return false + else + self:flush() + return true + end +end +-------------------------------------------------------------------------------- + + +-------------------------------------------------------------------------------- +-- Fonctions requises pour Broker StreamConnector +-------------------------------------------------------------------------------- + +-- Fonction init() +function init(conf) + broker_log:set_parameters(1, "/var/log/centreon-broker/stream-connector-bis.log") + broker_log:info(2, "init: Beginning init() function") + queue = event_queue:new(nil, conf) + broker_log:info(2, "init: Ending init() function, Event queue created") +end + +-- Fonction write() +function write(e) + broker_log:info(3, "write: Beginning write() function") + queue:add(e) + broker_log:info(3, "write: Ending write() function") + return true +end + +-- Fonction filter() +function filter(category, element) + --broker_log:info(3, "category: ".. category .. " - element: " .. element) + if category == 1 and (element == 14 or element == 24) then + return true + end + return false +end + diff --git a/stream-connectors/centreon-certified/splunk/splunk.png b/stream-connectors/centreon-certified/splunk/splunk.png new file mode 100644 index 00000000000..f2021268091 Binary files /dev/null and b/stream-connectors/centreon-certified/splunk/splunk.png differ diff --git a/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua new file mode 100644 index 00000000000..0fce09f6c07 --- /dev/null +++ b/stream-connectors/centreon-certified/warp10/export-warp10-apiv1.lua @@ -0,0 +1,117 @@ +-- +-- Copyright 2024 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- +-- To work this script to provide a Broker stream connector output configuration +-- with the following informations: +-- ipaddr (string): the ip address of the Warp10 server +-- logfile (string): the log file +-- port (number): the Warp10 server port +-- token (string): the Warp10 write token +-- max_size (number): how many queries to store before sending them to the server. +-- + +local curl = require "cURL" + +local my_data = { + ipaddr = "172.17.0.1", + logfile = "/tmp/test-warp10.log", + port = 8080, + token = "", + max_size = 10, + data = {} +} + +function init(conf) + if conf.logfile then + my_data.logfile = conf.logfile + end + broker_log:set_parameters(3, my_data.logfile) + if conf.ipaddr then + my_data.ipaddr = conf.ipaddr + end + if conf.port then + my_data.port = conf.port + end + if not conf.token then + broker_log:error(0, "You must provide a token to write into Warp10") + end + my_data.token = conf.token + if conf.max_size then + my_data.max_size = conf.max_size + end +end + +local function flush() + local buf = table.concat(my_data.data, "\n") + local c = curl.easy{ + url = "http://" .. my_data.ipaddr .. ":" .. my_data.port .. "/api/v0/update", + post = true, + httpheader = { + "Transfer-Encoding:chunked", + "X-Warp10-Token:" .. my_data.token, + }, + postfields = buf } + + c:perform() + c:close() + my_data.data = {} + return true +end + +function write(d) + -- Service status + if d.category == 1 and d.element == 24 then + local pd = broker.parse_perfdata(d.perfdata) + local host = broker_cache:get_hostname(d.host_id) + local service = broker_cache:get_service_description(d.host_id, d.service_id) + if not host or not service then + broker_log:error(0, "You should restart engine to fill the cache") + return true + end + local labels = "hostname=" .. host .. ",service=" .. service .. ',' + local sgroups = broker_cache:get_servicegroups(d.host_id, d.service_id) + if sgroups and #sgroups > 0 then + grps = "" + for idx = 1, #sgroups do + grps = grps .. sgroups[idx].group_name .. ' ' + end + labels = labels .. "service_groups=" .. grps .. ',' + end + local hgroups = broker_cache:get_hostgroups(d.host_id) + if hgroups and #hgroups > 0 then + grps = "" + for idx = 1, #hgroups do + grps = grps .. hgroups[idx].group_name .. ' ' + end + labels = labels .. "host_groups=" .. grps .. ',' + end + for metric,v in pairs(pd) do + local line = tostring(d.last_update) .. "000000// " + .. metric + .. "{" .. labels .. "} " + .. tostring(v) + table.insert(my_data.data, line) + broker_log:info(0, "New line added to data: '" .. line .. "'") + end + if #my_data.data > my_data.max_size then + broker_log:info(0, "Flushing data") + return flush() + end + end + return false +end + diff --git a/stream-connectors/community-powered/canopsis/README.md b/stream-connectors/community-powered/canopsis/README.md new file mode 100644 index 00000000000..34d3b08ad12 --- /dev/null +++ b/stream-connectors/community-powered/canopsis/README.md @@ -0,0 +1,183 @@ +**Canopsis** + +- https://doc.canopsis.net/guide-developpement/struct-event/ + +## Description + +This script use the stream-connector mechanism of Centreon to get events from +the pollers. The event is then translated to a Canopsis event and sent to the +HTTP REST API. + +## Technical description + +This connector follow the best practices of the Centreon documentation +(see the listed links in the first section). + +The script is in lua language as imposed by the stream-connector specification. + +It get all the events from Centreon and convert these events in +a Canopsis compatible json format. + +Filtered events are sent to HTTP API of Canopsis by chunk to reduce the number of +connections. + +The filtered events are : + +- acknowledgment events (category 1, element 1) +- downtime events (category 1, element 5) +- host events (category 1, element 14) +- service events (category 1, element 24) + +Extra informations are added to the host and services as bellow : + +- action_url +- notes_url +- hostgroups +- servicegroups (for service events) + +### Acknowledgment + +Two kinds of ack are sent to Canopsis : + +- Ack creation +- Ack deletion + +An ack is positioned on the resource/component reference + +### Downtime + +Two kinds of downtime are sent to Canopsis as "pbehavior" : + +- Downtime creation +- Downtime cancellation + +A uniq ID is generated from the informations of the downtime carried by Centreon. + +*Note : The recurrent downtimes are not implemented by the stream connector yet.* + +### Host status + +All HARD events with a state changed from hosts are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- UP (0) // INFO (0) +-- DOWN (1) // CRITICAL (3) +-- UNREACHABLE (2) // MAJOR (2) +``` + +### Service status + +All HARD events with a state changed from services are sent to Canopsis. + +Take care of the state mapping as below : + +``` +-- CENTREON // CANOPSIS +-- --------------------- +-- OK (0) // INFO (0) +-- WARNING (1) // MINOR (1) +-- CRITICAL (2) // CRITICAL (3) +-- UNKNOWN (3) // MAJOR (2) +``` + +## Howto + +### Prerequisites + +* lua version >= 5.1.4 +* install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) + * >= 3.0rc1-2 ( from sources, you have to install also gcc + lua-devel packages ) available into canopsis repository +* centreon-broker version 19.10.5 or >= 20.04.2 + +### Installation + +**Software deployment from sources (centreon-broker 19.10.5 or >= 20.04.2) :** + +1. Copy the lua script `bbdo2canopsis.lua` from `canopsis` dir to `/usr/share/centreon-broker/lua/bbdo2canopsis.lua` +2. Change the permissions to this file `chown centreon-engine:centreon-engine /usr/share/centreon-broker/lua/bbdo2canopsis.lua` + +**Software deployment from packages (centreon-broker >= 20.04.2) :** + +1. Install canopsis repository first + +``` +echo "[canopsis] +name = canopsis +baseurl=https://repositories.canopsis.net/pulp/repos/centos7-canopsis/ +gpgcheck=0 +enabled=1" > /etc/yum.repos.d/canopsis.repo +``` + +2. install connector with Yum +``` +yum install canopsis-connector-centreon-stream-connector +``` + +**Enable the connector :** + +1. add a new "Generic - Stream connector" output on the central-broker-master (see the official documentation) +2. export the poller configuration (see the official documentation) +3. restart services 'systemctl restart cbd centengine gorgoned' + +If you modify this script in development mode ( directly into the centreon host ), +you will need to restart the Centreon services (at least the centengine service). + +### Configuration + +All the configuration can be made througt the Centreon interface as described in +the official documentation. + +**The main parameters you have to set are :** + +``` +connector_name = "your connector source name" +canopsis_user = "your Canopsis API user" +canopsis_password = "your Canopsis API password" +canopsis_host = "your Canopsis host" +``` + +**If you want to customize your queue parameters (optional) :** + +``` +max_buffer_age = 60 -- retention queue time before sending data +max_buffer_size = 10 -- buffer size in number of events +``` + +**The init spread timer (optional) :** + +``` +init_spread_timer = 360 -- time to spread events in seconds at connector starts +``` + +This timer is needed for the start of the connector. + +During this time, the connector send all HARD state events (with state change or +not) to update the events informations from Centreon to Canopsis. In that way +the level of information tends to a convergence. + +*This implies a burst of events and a higher load for the server during this time.* + +**On the Centreon WUI you can set these parameters as below :** + +In Configuration > Pollers > Broker configuration > central-broker-master > +Output > Select "Generic - Stream connector" > Add + +![centreon-configuration-screenshot](pictures/centreon-configuration-screenshot.png) + +### Check the output + +By default the connector use the HTTP REST API of Canopsis to send events. + +Check your alarm view to see the events from Centreon. + +All logs are dumped into the default log file "/var/log/centreon-broker/debug.log" + +#### Advanced usage + +You can also use a raw log file to dump all Canopsis events and manage your +own way to send events (by example with logstash) by editing the "sending_method" +variable en set the "file" method. \ No newline at end of file diff --git a/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua b/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua new file mode 100755 index 00000000000..ecb66ccbd3c --- /dev/null +++ b/stream-connectors/community-powered/canopsis/bbdo2canopsis.lua @@ -0,0 +1,529 @@ +#!/usr/bin/lua +----------------------------------------------------------------------------- +-- +-- DESCRIPTION +-- +-- Centreon Broker Canopsis Connector +-- Tested with Canopsis 3.42 and Centreon 20.04.6 +-- +-- References : +-- * https://doc.canopsis.net/interconnexions/#connecteurs +-- * https://docs.centreon.com/docs/centreon/en/19.10/developer/writestreamconnector.html +-- * https://docs.centreon.com/docs/centreon-broker/en/latest/exploit/stream_connectors.html#the-broker-cache-object +-- * https://docs.centreon.com/docs/centreon-broker/en/3.0/dev/mapping.html +-- +-- Prerequisites : +-- * install packages gcc + lua-devel +-- * install lua-socket library (http://w3.impa.br/~diego/software/luasocket/) +-- * Centreon version 19.10.5 or >= 20.04.2 + +----------------------------------------------------------------------------- +-- LIBS +----------------------------------------------------------------------------- + +local http = require("socket.http") +local ltn12 = require("ltn12") + +----------------------------------------------------------------------------- +-- GLOBAL SETTINGS +----------------------------------------------------------------------------- +version = "1.0.0" + +settings = { + debug_log = "/var/log/centreon-broker/debug.log", + verbose = true, + connector = "centreon-stream", + connector_name = "centreon-stream-central", + stream_file = "/var/log/centreon-broker/bbdo2canopsis.log", + canopsis_user = "root", + canopsis_password = "root", + canopsis_event_route = "/api/v2/event", + canopsis_downtime_route = "/api/v2/pbehavior", + canopsis_host = "localhost", + canopsis_port = 8082, + sending_method = "api", -- methods : api = Canopsis HTTP API // file = raw log file + sending_protocol = "http", + timezone = "Europe/Paris", + init_spread_timer = 360 -- time to spread events in seconds at connector starts +} + + +----------------------------------------------------------------------------- +-- CUSTOM FUNCTIONS +----------------------------------------------------------------------------- +-- Write a debug log when verbose is true +local function debug(output) + if settings.verbose then broker_log:info(3, "[STREAM-CANOPSIS] " .. output) end +end + +-- Write an important log +local function log(output) + broker_log:info(1, "[STREAM-CANOPSIS] " .. output) +end + +-- Dump an error +local function fatal(output) + broker_log:error(1, "[STREAM-CANOPSIS] " .. output) +end + +local function getVersion() + log("VERSION : ".. version) +end + +-- Send an event to stream file +local function writeIntoFile(output) + local file,err = io.open(settings.stream_file, 'a') + if file == nil then + fatal("Couldn't open file: " .. err) + else + log("Writting to stream file : " .. settings.stream_file) + file:write(broker.json_encode(output)) + file:close() + end +end + +local function deleteCanopsisAPI(route) + local http_result_body = {} + + log("Delete data from Canopsis : " .. route) + + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = settings.sending_protocol .. "://" .. settings.canopsis_user .. ":" .. settings.canopsis_password .. "@" .. settings.canopsis_host .. ":" .. settings.canopsis_port .. route, + method = "DELETE", + -- sink is where the request result's body will go + sink = ltn12.sink.table(result_body), + } + + -- handling the return code + if hr_code == 200 then + log("HTTP DELETE request successful: return code is " .. hr_code) + else + fatal("HTTP DELETE FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + fatal("HTTP DELETE FAILED: message line " .. i .. ' is "' .. v .. '"') + end + end + +end + +-- Send an event to Canopsis API +local function postCanopsisAPI(output, route) + local post_data = broker.json_encode(output) + local http_result_body = {} + + route = route or settings.canopsis_event_route + + log("Posting data to Canopsis " .. post_data .. " => To route : ".. route) + + local hr_result, hr_code, hr_header, hr_s = http.request{ + url = settings.sending_protocol .. "://" .. settings.canopsis_user .. ":" .. settings.canopsis_password .. "@" .. settings.canopsis_host .. ":" .. settings.canopsis_port .. route, + method = "POST", + -- sink is where the request result's body will go + sink = ltn12.sink.table(result_body), + -- request body needs to be formatted as a LTN12 source + source = ltn12.source.string(post_data), + headers = { + -- mandatory for POST request with body + ["content-length"] = string.len(post_data), + ["Content-Type"] = "application/json" + } + } + -- handling the return code + if hr_code == 200 then + log("HTTP POST request successful: return code is " .. hr_code) + else + fatal("HTTP POST FAILED: return code is " .. hr_code) + for i, v in ipairs(http_result_body) do + fatal("HTTP POST FAILED: message line " .. i .. ' is "' .. v .. '"') + end + end +end + + +-- Convert Centreon host state to a Canopsis state : +-- +-- CENTREON // CANOPSIS +-- --------------------- +-- UP (0) // INFO (0) +-- DOWN (1) // CRITICAL (3) +-- UNREACHABLE (2) // MAJOR (2) +-- +local function hostStateMapping(state) + local canostate = { 0, 3, 2 } + return canostate[state+1] -- state + 1 because in lua the index start to one +end + +-- Convert Centreon service state to a Canopsis state : +-- +-- CENTREON // CANOPSIS +-- --------------------- +-- OK (0) // INFO (0) +-- WARNING (1) // MINOR (1) +-- CRITICAL (2) // CRITICAL (3) +-- UNKNOWN (3) // MAJOR (2) +-- +local function serviceStateMapping(state) + local canostate = { 0, 1, 3, 2 } + return canostate[state+1] -- state + 1 because in lua the index start to one +end +-- **************** +-- GET BROKER_CACHE INFORMATIONS : + +-- Convert host_id to an hostname (need to restart centengine) +local function getHostname(host_id) + local host_name = broker_cache:get_hostname(host_id) + if not host_name then + debug("Unable to get name of host from broker_cache") + host_name = host_id + end + return host_name +end + +-- Convert service_id to a service name (need to restart centengine) +local function getServicename(host_id, service_id) + local service_description = broker_cache:get_service_description(host_id, service_id) + if not service_description then + debug("Unable to get service description from broker_cache") + service_description = service_id + end + return service_description +end + +-- Get a service groups list of a service +local function getServiceGroups(host_id, service_id) + local servicegroups = broker_cache:get_servicegroups(host_id, service_id) + local servicegroups_list = {} + + if not servicegroups then + debug("Unable to get servicegroups from broker_cache") + else + for servicegroup_id, servicegroup_name in pairs(servicegroups) do + table.insert(servicegroups_list, servicegroup_name["group_name"]) + end + end + + return servicegroups_list +end + +-- Get a hostgroups list of a host +local function getHostGroups(host_id) + local hostgroups = broker_cache:get_hostgroups(host_id) + local hostgroups_list = {} + + if not hostgroups then + debug("Unable to get hostgroups from broker_cache") + else + for hostgroup_id, hostgroup_name in pairs(hostgroups) do + table.insert(hostgroups_list, hostgroup_name["group_name"]) + end + end + + return hostgroups_list +end + +-- Get notes url list from a host or a service +local function getNotesURL(host_id, service_id) + local notes_url = '' + + if not service_id then + notes_url = broker_cache:get_notes_url(host_id) + else + notes_url = broker_cache:get_notes_url(host_id, service_id) + end + + if notes_url ~= "" and notes_url then + debug("extra information notes_url found for host_id "..host_id.." => "..notes_url) + return notes_url + else + debug("no extra information notes_url found for host_id "..host_id) + return "" + end +end + +-- Get action url list from a host or a service +local function getActionURL(host_id, service_id) + local action_url = '' + + if not service_id then + action_url = broker_cache:get_action_url(host_id) + else + notes_url = broker_cache:get_action_url(host_id, service_id) + end + + if action_url then + debug("extra information action_url found for host_id "..host_id.." => "..action_url) + return notes_url + else + debug("no extra information action_url found for host_id "..host_id) + return "" + end +end + +-- **************** + +-- Translate Centreon event to Canopsis event +local function canopsisMapping(d) + event = {} + -- HOST STATUS + if d.element == 14 and stateChanged(d) then + event = { + event_type = "check", + source_type = "component", + connector = settings.connector, + connector_name = settings.connector_name, + component = getHostname(d.host_id), + resource = "", + timestamp = d.last_check, + output = d.output, + state = hostStateMapping(d.state), + -- extra informations + hostgroups = getHostGroups(d.host_id), + notes_url = getNotesURL(d.host_id), + action_url = getActionURL(d.host_id) + } + debug("Streaming HOST STATUS for host_id ".. d.host_id) + -- SERVICE STATUS + elseif d.element == 24 and stateChanged(d) then + event = { + event_type = "check", + source_type = "resource", + connector = settings.connector, + connector_name = settings.connector_name, + component = getHostname(d.host_id), + resource = getServicename(d.host_id, d.service_id), + timestamp = d.last_check, + output = d.output, + state = serviceStateMapping(d.state), + -- extra informations + servicegroups = getServiceGroups(d.host_id, d.service_id), + notes_url = getNotesURL(d.host_id, d.service_id), + action_url = getActionURL(d.host_id, d.service_id), + hostgroups = getHostGroups(d.host_id) + } + debug("Streaming SERVICE STATUS for service_id ".. d.service_id) + -- ACK + elseif d.element == 1 then + event = { + event_type = "ack", + crecord_type = "ack", + author = d.author, + resource = "", + component = getHostname(d.host_id), + connector = settings.connector, + connector_name = settings.connector_name, + timestamp = d.entry_time, + output = d.comment_data, + origin = "centreon", + ticket = "", + state_type = 1, + ack_resources = false + } + if d.service_id then + event['source_type'] = "resource" + event['resource'] = getServicename(d.host_id, d.service_id) + event['ref_rk'] = event['resource'] .. "/" .. event['component'] + event['state'] = serviceStateMapping(d.state) + else + event['source_type'] = "component" + event['ref_rk'] = "undefined/" .. event['component'] + event['state'] = hostStateMapping(d.state) + end + + -- send ackremove + if d.deletion_time then + event['event_type'] = "ackremove" + event['crecord_type'] = "ackremove" + event['timestamp'] = d.deletion_time + end + + debug("Streaming ACK for host_id ".. d.host_id) + + -- DOWNTIME (to change with Canopsis "planning" feature when available) + elseif d.element == 5 then + + local canopsis_downtime_id = "centreon-downtime-".. d.internal_id .. "-" .. d.entry_time + + debug("Streaming DOWNTIME for host_id ".. d.host_id) + + if d.cancelled then + deleteCanopsisAPI(settings.canopsis_downtime_route .. "/" .. canopsis_downtime_id) + else + event = { + _id = canopsis_downtime_id, + author = d.author, + name = canopsis_downtime_id, + tstart = d.start_time, + tstop = d.end_time, + type_ = "Maintenance", + reason = "Autre", + timezone = settings.timezone, + comments = { { ['author'] = d.author, + ['message'] = d.comment_data } }, + filter = { ['$and']= { { ['_id'] = "" }, } }, + exdate = {}, + } + if not d.service_id then + event['filter']['$and'][1]['_id'] = getHostname(d.host_id) + else + event['filter']['$and'][1]['_id'] = getServicename(d.host_id, d.service_id).."/"..getHostname(d.host_id) + end + -- This event is sent directly and bypass the queue process of a standard event. + postCanopsisAPI(event, settings.canopsis_downtime_route) + end + + -- Note : The event can be duplicated by the Centreon broker + -- See previous commit to get the "duplicated" function if needed + + event = {} + end + return event +end + + +function stateChanged(d) + + if d.service_id then + debug("Checking state change for service_id event [".. d.service_id .. "]") + else + debug("Checking state change for host_id event [".. d.host_id .. "]") + end + + if d.state_type == 1 and -- if the event is in hard state + d.last_hard_state_change ~= nil then -- if the event has been in a hard state + + -- if the state has changed + -- (like noted in the omi connector, it could have a slight delta between last_check and last_hard_state_change) + if math.abs(d.last_check - d.last_hard_state_change) < 10 then + + if d.service_id then + debug("HARD state change detected for service_id [" .. d.service_id .. "]") + else + debug("HARD state change detected for host_id [" .. d.host_id .. "]") + end + + return true + + elseif os.time() - connector_start_time <= settings.init_spread_timer then -- if the connector has just started + + if d.service_id then + debug("HARD event for service_id [" .. d.service_id .. "] spread") + else + debug("HARD for host_id [" .. d.host_id .. "] spread") + end + + return true + + end + + -- note : No need to send new event without last_hard_state_change because + -- there is no state either + + end + + return false +end + +----------------------------------------------------------------------------- +-- Queue functions +----------------------------------------------------------------------------- + +local event_queue = { + __internal_ts_last_flush = nil, + events = {}, + max_buffer_size = 10, + max_buffer_age = 60 +} + +function event_queue:new(o, conf) + o = o or {} + setmetatable(o, self) + self.__index = self + for i,v in pairs(conf) do + if self[i] and i ~= "events" and string.sub(i, 1, 11) ~= "__internal_" then + debug("event_queue:new: getting parameter " .. i .. " => " .. v) + self[i] = v + else + debug("event_queue:new: ignoring parameter " .. i .. " => " .. v) + end + end + self.__internal_ts_last_flush = os.time() + debug("event_queue:new: setting the internal timestamp to " .. self.__internal_ts_last_flush) + return o +end + +function event_queue:add(e) + -- we finally append the event to the events table + if next(e) ~= nil then + self.events[#self.events + 1] = e + debug("Queuing event : " .. broker.json_encode(e)) + end + -- then we check whether it is time to send the events to the receiver and flush + if #self.events >= self.max_buffer_size then + debug("event_queue:add: flushing because buffer size reached " .. self.max_buffer_size .. " elements.") + self:flush() + return true + elseif os.time() - self.__internal_ts_last_flush >= self.max_buffer_age and + #self.events ~= 0 then + debug("event_queue:add: flushing " .. #self.events .. " elements because buffer age reached " .. (os.time() - self.__internal_ts_last_flush) .. "s and max age is " .. self.max_buffer_age .. "s.") + self:flush() + return true + else + return false + end +end + +function event_queue:flush() + --debug("DUMPING : " .. broker.json_encode(self.events)) + postCanopsisAPI(self.events) + -- now that the data has been sent, we empty the events array + self.events = {} + -- and update the timestamp + self.__internal_ts_last_flush = os.time() +end + +----------------------------------------------------------------------------- +-- Required functions for Broker StreamConnector +----------------------------------------------------------------------------- + +-- Init a stream connector +function init(conf) + connector_start_time = os.time() + + -- merge configuration from the WUI with default values + for k,v in pairs(conf) do settings[k] = v end + + broker_log:set_parameters(3, settings.debug_log) + getVersion() + debug("init : Beginning init() function") + debug("CONNECTOR:" .. settings.connector .. ";") + debug("CANOPSIS_HOST:" .. settings.canopsis_host .. ";") + + queue = event_queue:new(nil, conf) + debug("init : Ending init() function, Event queue created") +end + +-- Write events +function write(d) + debug("write : Beginning write() function") + if settings.sending_method == "api" then + --postCanopsisAPI(canopsisMapping(d)) -- for debug only + queue:add(canopsisMapping(d)) + elseif settings.sending_method == "file" then + writeIntoFile(canopsisMapping(d)) + --writeIntoFile(d) -- for debug only + end + debug("write : Ending write() function") + return true +end + +-- Filter events +function filter(category, element) + -- Filter NEB category types + if category == 1 and (element == 1 or -- Acknowledment + element == 5 or -- Downtime + element == 14 or -- Host status + element == 24) then -- Service status + return true + end + return false +end diff --git a/stream-connectors/community-powered/canopsis/pictures/centreon-configuration-screenshot.png b/stream-connectors/community-powered/canopsis/pictures/centreon-configuration-screenshot.png new file mode 100644 index 00000000000..7ba8f9f9f1f Binary files /dev/null and b/stream-connectors/community-powered/canopsis/pictures/centreon-configuration-screenshot.png differ diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm new file mode 100644 index 00000000000..ef2f930eaba --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bookworm @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi8 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye new file mode 100644 index 00000000000..ba6237e2c25 --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.bullseye @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi7 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy new file mode 100644 index 00000000000..ef2f930eaba --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/env/.env.jammy @@ -0,0 +1 @@ +LIBFFI_DEPENDENCY=libffi8 diff --git a/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml new file mode 100644 index 00000000000..bb29f45fd0e --- /dev/null +++ b/stream-connectors/dependencies/lua-cffi/packaging/lua-cffi.yaml @@ -0,0 +1,42 @@ +name: "lua-cffi" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua cffi library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-cffi/cffi.so" + dst: "/usr/lib64/lua/@luaver@/cffi.so" + packager: rpm + + - src: "../lua-cffi/cffi.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/5.3/cffi.so" + packager: deb + +overrides: + rpm: + depends: + - lua + - libffi + - libffi-devel + deb: + depends: + - "lua5.3" + - "${LIBFFI_DEPENDENCY}" + - "libffi-dev" + +rpm: + summary: lua cffi + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml new file mode 100644 index 00000000000..727d40ede53 --- /dev/null +++ b/stream-connectors/dependencies/lua-curl/packaging/lua-curl.yaml @@ -0,0 +1,55 @@ +name: "@NAME@" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua curl library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-curl/lcurl.so" + dst: "/usr/lib64/lua/@luaver@/lcurl.so" + file_info: + mode: 0644 + packager: rpm + - src: "../lua-curl/lcurl.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/5.3/lcurl.so" + file_info: + mode: 0644 + packager: deb + + - src: "../lua-curl/cURL.lua" + dst: "/usr/share/lua/@luaver@/cURL.lua" + packager: rpm + - src: "../lua-curl/cURL.lua" + dst: "/usr/share/lua/5.3/cURL.lua" + packager: deb + + - src: "../lua-curl/cURL" + dst: "/usr/share/lua/@luaver@/cURL" + packager: rpm + - src: "../lua-curl/cURL" + dst: "/usr/share/lua/5.3/cURL" + packager: deb + +overrides: + rpm: + depends: + - lua + deb: + depends: + - lua5.3 + +rpm: + summary: lua curl + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml new file mode 100644 index 00000000000..b289235795e --- /dev/null +++ b/stream-connectors/dependencies/lua-tz/packaging/lua-tz.yaml @@ -0,0 +1,38 @@ +name: "lua-tz" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua tz library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../lua-tz" + dst: "/usr/share/lua/@luaver@/luatz" + packager: rpm + + - src: "../lua-tz" + dst: "/usr/share/lua/5.3/luatz" + packager: deb + +overrides: + rpm: + depends: + - lua + deb: + depends: + - "lua5.3" + +rpm: + summary: lua tz + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua b/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua new file mode 100644 index 00000000000..af8ef79b3e0 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/google/auth/oauth.lua @@ -0,0 +1,290 @@ +#!/usr/bin/lua + +--- +-- oauth module for google oauth +-- @module oauth +-- @alias oauth +local oauth = {} + +local mime = require("mime") +local crypto = require("crypto") +local curl = require("cURL") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + + + +local OAuth = {} + +--- oauth constructor +-- @param params (table) the table of all the stream connector parameters +-- @param sc_common (object) a sc_common object instance +-- @param sc_logger (object) a sc_logger object instance +function oauth.new(params, sc_common, sc_logger) + local self = {} + + -- initiate stream connector logger + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new("/var/log/centreon-broker/gbq.log", 3) + end + self.sc_common = sc_common + + -- load stream connector parameters + self.params = params + + -- initiate standard params for google oauth + self.jwt_info = { + scope = params.scope_list, + api_key = params.api_key, + key_file = params.key_file_path, + hash_protocol = "sha256WithRSAEncryption", + jwt_header = {} + } + + -- put jwt header in params to be able to override them if needed + self.jwt_info.jwt_header = { + alg = "RS256", + typ = "JWT" + } + + setmetatable(self, { __index = OAuth }) + return self +end + +--- create_jwt_token: create a jwt token +-- @return false (boolean) if we can't open the key file nor create the claim nor the signature +-- @return true (boolean) if the jwt token has been successfully created +function OAuth:create_jwt_token() + + -- retrieve information that are in the key file + if not self:get_key_file() then + self.sc_logger:error("[google.auth.oauth:create_jwt]: an error occured while getting file: " + .. tostring(self.jwt_info.key_file)) + + return false + end + + -- b64 encoded json of the jwt_header + -- local jwt_header = mime.b64(broker.json_encode(self.jwt_info.jwt_header)) + local jwt_header = mime.b64(broker.json_encode(self.jwt_info.jwt_header)) + + -- build the claim part of the jwt + if not self:create_jwt_claim() then + self.sc_logger:error("[google.auth.oauth:create_jwt]: an error occured while creating the jwt claim") + + return false + end + + -- b64 encoded json of the jwt_claim + local jwt_claim = mime.b64(broker.json_encode(self.jwt_claim)) + + local string_to_sign = jwt_header .. "." .. jwt_claim + + -- sign our jwt_header and claim + if not self:create_signature(string_to_sign) then + self.sc_logger:error("[google.auth.oauth:create_jwt]: couldn't sign the concatenation of" + .. " the JWT header and the JWT claim.") + + return false + end + + -- create our jwt_token using the signature + self.jwt_token = string_to_sign .. "." .. mime.b64(self.signature) + + return true +end + +--- get_key_file: open the key file and store information in self.key_table +-- @return false (boolean) if the key file is not found or it is not a valid json file +-- @return true (boolean) if the information from the key file has been successfully loaded in self.key_table +function OAuth:get_key_file() + local file = io.open(self.jwt_info.key_file, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[google.auth.oauth:get_key_file]: couldn't open file " + .. tostring(self.jwt_info.key_file) .. ". Make sure your key file is there.") + return false + end + + local file_content = file:read("*a") + io.close(file) + + local key_table = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if (type(key_table) ~= "table") then + self.sc_logger:error("[google.auth.oauth:get_key_file]: the key file " + .. tostring(self.jwt_info.key_file) .. ". Is not a valid json file.") + return false + end + + self.key_table = key_table + return true +end + +--- create_jwt_claim: create the claim for the jwt token using information from the key table +-- @return false (boolean) if a mandatory information is missing in the key file. +-- @return true (boolean) if the claim has been successfully created +function OAuth:create_jwt_claim() + -- return false if there is a missing parameter in the key table + if + not self.key_table.client_email or + not self.key_table.auth_uri or + not self.key_table.token_uri or + not self.key_table.private_key or + not self.key_table.project_id + then + self.sc_logger:error("[google.auth.oauth:create_jwt_claim]: one of the following information wasn't found in the key_file:" + .. " client_email, auth_uri, token_uri, private_key or project_id. Make sure that " + .. tostring(self.key_file) .. " is a valid key file.") + return false + end + + -- jwt claim time to live + local iat = os.time() + self.jwt_expiration_date = iat + 3600 + + -- create jwt_claim table + self.jwt_claim = { + iss = self.key_table.client_email, + aud = self.key_table.token_uri, + scope = self.jwt_info.scope, + iat = iat, + exp = self.jwt_expiration_date + } + + return true +end + +--- create_signature: sign a string using the hash protocol provided by the user in the hash_protocol parameter +-- @param string_to_sign (string) the string that must be signed +-- @return false (boolean) if the key object is not created using the private key from the key file or if the sign operation failed +-- @return true (boolean) if the string has been successfully signed +function OAuth:create_signature(string_to_sign) + -- create a pkey object + local private_key_object = crypto.pkey.from_pem(self.key_table.private_key, true) + + -- return if the pkey object is not valid + if not private_key_object then + self.sc_logger:error("[google.auth.oauth:create_signature]: couldn't create private key object using crypto lib and" + .. " private key from key file " .. tostring(self.jwt_info.key_file)) + + return false + end + + -- sign the string + local signature = crypto.sign(self.jwt_info.hash_protocol, string_to_sign, private_key_object) + + -- return if string is not signed + if not signature then + self.sc_logger:error("[google.auth.oauth:create_signature]: couldn't sign string using crypto lib and the hash protocol: " + .. tostring(self.jwt_info.hash_protocol)) + + return false + end + + self.signature = signature + return true +end + +--- get_access_token: get an access token using the jwt token +-- @return false (boolean) if a jwt token needs to be generated and the operation fails or if we can't get access token from google api +-- @return access_token (string) the access token from google api +function OAuth:get_access_token() + + -- check if it is really needed to generate a new access_token + if not self.access_token or os.time() > self.jwt_expiration_date - 60 then + self.sc_logger:info("[google.auth.oauth:get_access_token]: no jwt_token found or jwt token expiration date has been reached." + .. " Generating a new JWT token") + + -- generate a new jwt token before asking for an access token + if not self:create_jwt_token() then + self.sc_logger:error("[google.auth.oauth:get_access_token]: couldn't generate a new JWT token.") + return false + end + else + -- an already valid access_token exist, give this one instead of a new one + return self.access_token + end + + local headers = { + 'Content-Type: application/x-www-form-urlencoded' + } + + self.sc_logger:info("[google.auth.oauth:get_access_token]: sending jwt token " .. tostring(self.jwt_token)) + + local data = { + grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer", + assertion = self.jwt_token + } + + -- ask google api for an access token + local result = broker.json_decode(self:curl_google(self.key_table.token_uri, headers, self.sc_common:generate_postfield_param_string(data))) + + -- return false if we didn't get an access token + if not result or not result.access_token then + self.sc_logger:error("[google.auth.oauth:get_access_token]: couldn't get access token") + return false + end + + self.access_token = result.access_token + return self.access_token +end + +--- curl_google: query google using curl +-- @param url (string) the google api url +-- @param headers (table) the curl http headers +-- @param data (string) [opt] url encoded url parameters +function OAuth:curl_google(url, headers, data) + local res = "" + -- initiate curl + local request = curl.easy() + :setopt_url(url) + :setopt_writefunction(function (response) + res = res .. response + end) + + -- add postfields url params + if data then + request:setopt_postfields(data) + end + + self.sc_logger:info("[google.auth.oauth:curl_google]: URL: " .. tostring(url) .. ". data " .. data) + + -- set proxy address configuration + if (self.params.proxy_address ~= "" and self.params.proxy_address) then + if (self.params.proxy_port ~= "" and self.params.proxy_port) then + request:setopt(curl.OPT_PROXY, self.params.proxy_address .. ':' .. self.params.proxy_port) + else + self.sc_logger:error("[google.auth.oauth:curl_google]: proxy_port parameter is not set but proxy_address is used") + end + end + + -- set proxy user configuration + if (self.params.proxy_username ~= '' and self.params.proxy_username) then + if (self.params.proxy_password ~= '' and self.params.proxy_username) then + request:setopt(curl.OPT_PROXYUSERPWD, self.params.proxy_username .. ':' .. self.params.proxy_password) + else + self.sc_logger:error("[google.auth.oauth:curl_google]: proxy_password parameter is not set but proxy_username is used") + end + end + + -- set up headers + request:setopt(curl.OPT_HTTPHEADER, headers) + + -- run query + request:perform() + + local code = request:getinfo(curl.INFO_RESPONSE_CODE) + + if code ~= 200 then + self.sc_logger:error("[google.auth.oauth:curl_google]: http code is: " .. tostring(code) .. ". Result is: " ..tostring(res)) + return false + end + + return res +end + +return oauth \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua new file mode 100644 index 00000000000..5de22f98bb8 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua @@ -0,0 +1,201 @@ +--- +-- bigquery module for google bigquery +-- @module bigquery +-- @alias bigquery +local bigquery = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local BigQuery = {} + +--- module constructor +-- @param params (table) table of all the stream connector parameters +-- @sc_logger (object) instance of the sc_logger module +function bigquery.new(params, sc_logger) + local self = {} + + -- initiate sc_logger + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + -- initiate parameters + self.params = params + -- initiate bigquery table schema mapping (1 = neb, 6 = bam) + self.schemas = { + [self.params.bbdo.categories.neb.id] = {}, + [self.params.bbdo.categories.bam.id] = {} + } + + setmetatable(self, { __index = BigQuery }) + return self +end + +--- get_tables_schema: load tables schemas according to the stream connector configuration +-- @return true (boolean) +function BigQuery:get_tables_schema() + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + -- use default schema + if self.params._sc_gbq_use_default_schemas == 1 then + self.schemas[categories.neb.id][elements.host_status.id] = self:default_host_table_schema() + self.schemas[categories.neb.id][elements.service_status.id] = self:default_service_table_schema() + self.schemas[categories.neb.id][elements.acknowledgement.id] = self:default_ack_table_schema() + self.schemas[categories.neb.id][elements.downtime.id] = self:default_dt_table_schema() + self.schemas[categories.bam.id][elements.ba_status.id] = self:default_ba_table_schema() + return true + end + + -- use a configuration file for all the schema + if self.params._sc_gbq_schema_config_file_path == 1 then + if self:load_tables_schema_file() then + return true + end + end + + -- create tables schemas from stream connector configuration itself (not the best idea) + if self.params._sc_gbq_use_default_schemas == 0 and self.params._sc_gbq_use_schema_config_file == 0 then + -- build hosts table schema + self:build_table_schema("^_sc_gbq_host_column_", "_sc_gbq_host_column_", self.schemas[categories.neb.id][elements.host_status.id]) + + -- build services table schema + self:build_table_schema("^_sc_gbq_service_column_", "_sc_gbq_service_column_", self.schemas[categories.neb.id][elements.service_status.id]) + + -- build ba table schema + self:build_table_schema("^_sc_gbq_ba_column_", "_sc_gbq_ba_column_", self.schemas[categories.bam.id][elements.ba_status.id]) + + -- build ack table schema + self:build_table_schema("^_sc_gbq_ack_column_", "_sc_gbq_ack_column_", self.schemas[categories.neb.id][elements.acknowledgement.id]) + + -- build dowtime table schema + self:build_table_schema("^_sc_gbq_dt_column_", "_sc_gbq_dt_column_", self.schemas[categories.neb.id][elements.downtime.id]) + end + + return true +end + +--- build_table_schema: create a table schema using the stream connector tables configuration +-- @param regex (string) the regex that the stream connector param must match in order to identify it as a column name in the table schema +-- @param substract (string) the string that is going to be removed from the parameter name to isolate the name of the column +-- @param structure (table) the schema table in which the column name and value are going to be stored +function BigQuery:build_table_schema(regex, substract, structure) + for param_name, param_value in pairs(self.params) do + if string.find(param_name, regex) ~= nil then + structure[string.gsub(param_name, substract, "")] = param_value + end + end +end + +--- default_host_table_schema: create a standard schema for a host event table +-- @return host_table (table) the table that is going to be used as a schema for bigquery host table +function BigQuery:default_host_table_schema() + return { + host_id = "{host_id}", + host_name = "{cache.host.name}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +end + +--- default_service_table_schema: create a standard schema for a service event table +-- @return service_table (table) the table that is going to be used as a schema for bigquery service table +function BigQuery:default_service_table_schema() + return { + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +end + +--- default_ack_table_schema: create a standard schema for an ack event table +-- @return ack_table (table) the table that is going to be used as a schema for bigquery ack table +function BigQuery:default_ack_table_schema() + return { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + entry_time = "{entry_time}" + } +end + +--- default_dt_table_schema: create a standard schema for a downtime event table +-- @return downtime_table (table) the table that is going to be used as a schema for bigquery downtime table +function BigQuery:default_dt_table_schema() + return { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + actual_start_time = "{actual_start_time}", + actual_end_time = "{deletion_time}" + } +end + +--- default_ba_table_schema: create a standard schema for a BA event table +-- @return ba_table (table) the table that is going to be used as a schema for bigquery BA table +function BigQuery:default_ba_table_schema() + return { + ba_id = "{ba_id}", + ba_name = "{cache.ba.ba_name}", + status = "{state}" + } +end + +--- load_tables_schema_file: load a table schema from a json configuration file +-- @return false (boolean) if we can't open the configuration file or it is not a valid json file +-- @return true (boolean) if everything went fine +function BigQuery:load_tables_schema_file() + local file = io.open(self.params._sc_gbq_schema_config_file_path, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[google.bq.bq_tabmes:load_tables_schema_file]: couldn't open file " + .. tostring(self.params._sc_gbq_schema_config_file_path) .. ". Make sure your table schema file is there.") + return false + end + + local file_content = file:read("*a") + io.close(file) + + local schemas = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if (type(schemas) ~= "table") then + self.sc_logger:error("[google.bq.bq_tabmes:load_tables_schema_file]: the table schema file " + .. tostring(self.params._sc_gbq_schema_config_file_path) .. ". Is not a valid json file.") + return false + end + + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + -- use default schema if we don't find a schema for a dedicated type of event + self.schemas[categories.neb.id][elements.host_status.id] = schemas.host or self:default_host_table_schema() + self.schemas[categories.neb.id][elements.service_status.id] = schemas.service or self:default_service_table_schema() + self.schemas[categories.neb.id][elements.acknowledgement.id] = schemas.ack or self:default_ack_table_schema() + self.schemas[categories.neb.id][elements.downtime.id] = schemas.dt or self:default_dt_table_schema() + self.schemas[categories.bam.id][elements.ba_status.id] = schemas.ba or self:default_ba_table_schema() + + return true +end + +return bigquery \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua new file mode 100644 index 00000000000..7e1d052c81b --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/config.lua @@ -0,0 +1,174 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") + +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end + +local KafkaConfig = {} +KafkaConfig.__index = KafkaConfig + +--[[ + Create configuration object or dublicate one. + Result will be set up the defaults. + + Please see CONFIGURATION.md for the default settings. +]]-- + +function KafkaConfig.new(original_config) + local config = { cb_ = {} } + setmetatable(config, KafkaConfig) + + if original_config and original_config.kafka_conf_ then + rawset(config, "kafka_conf_", librdkafka.rd_kafka_conf_dup(original_config.kafka_conf_)) + config:set_delivery_cb(original_config.cb_.dr_cb_) + config:set_stat_cb(original_config.cb_.stat_cb_) + config:set_error_cb(original_config.cb_.error_cb_) + config:set_log_cb(original_config.cb_.log_cb_) + else + rawset(config, "kafka_conf_", librdkafka.rd_kafka_conf_new()) + end + ffi.gc(config.kafka_conf_, function (config) + librdkafka.rd_kafka_conf_destroy(config) + end + ) + + return config +end + + +--[[ + Dump the configuration properties and values of `conf` to a map + with "key", "value" pairs. +]]-- + +function KafkaConfig:dump() + assert(self.kafka_conf_ ~= nil) + + local size = ffi.new("size_t[1]") + local dump = librdkafka.rd_kafka_conf_dump(self.kafka_conf_, size) + ffi.gc(dump, function(d) librdkafka.rd_kafka_conf_dump_free(d, size[0]) end) + + local result = {} + for i = 0, tonumber(size[0])-1,2 do + result[ffi.string(dump[i])] = ffi.string(dump[i+1]) + end + + return result +end + + +--[[ + Sets a configuration property. + + In case of failure "error(errstr)" is called and 'errstr' + is updated to contain a human readable error string. +]]-- + +function KafkaConfig:__newindex(name, value) + assert(self.kafka_conf_ ~= nil) + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + + if librdkafka.rd_kafka_conf_set(self.kafka_conf_, name, tostring(value), errbuf, ERRLEN) ~= librdkafka.RD_KAFKA_CONF_OK then + error(ffi.string(errbuf)) + end +end + + +--[[ + Set delivery report callback in provided conf object. + + Format: callback_function(payload, errstr) + 'payload' is the message payload + 'errstr' nil if everything is ok or readable error description otherwise +]]-- + +function KafkaConfig:set_delivery_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.dr_cb_ = callback + librdkafka.rd_kafka_conf_set_dr_cb(self.kafka_conf_, + function(rk, payload, len, err) + local errstr = nil + if err ~= librdkafka.RD_KAFKA_RESP_ERR_NO_ERROR then + errstr = ffi.string(librdkafka.rd_kafka_err2str(err)) + end + callback(ffi.string(payload, tonumber(len)), errstr) + end) + end +end + + +--[[ + Set statistics callback. + The statistics callback is called from `KafkaProducer:poll` every + `statistics.interval.ms` (needs to be configured separately). + + Format: callback_function(json) + 'json' - String containing the statistics data in JSON format +]]-- + +function KafkaConfig:set_stat_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.stat_cb_ = callback + librdkafka.rd_kafka_conf_set_stats_cb(self.kafka_conf_, + function(rk, json, json_len) + callback(ffi.string(json, json_len)) + return 0 --librdkafka will immediately free the 'json' pointer. + end) + end +end + + +--[[ + Set error callback. + The error callback is used by librdkafka to signal critical errors + back to the application. + + Format: callback_function(err_numb, reason) +]]-- + +function KafkaConfig:set_error_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.error_cb_ = callback + librdkafka.rd_kafka_conf_set_error_cb(self.kafka_conf_, + function(rk, err, reason) + callback(tonumber(err), ffi.string(reason)) + end) + end +end + +--[[ + Set logger callback. + The default is to print to stderr. + Alternatively the application may provide its own logger callback. + Or pass 'callback' as nil to disable logging. + + Format: callback_function(level, fac, buf) +]]-- + +function KafkaConfig:set_log_cb(callback) + assert(self.kafka_conf_ ~= nil) + + if callback then + self.cb_.log_cb_ = callback + librdkafka.rd_kafka_conf_set_log_cb(self.kafka_conf_, + function(rk, level, fac, buf) + callback(tonumber(level), ffi.string(fac), ffi.string(buf)) + end) + end +end + +return KafkaConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua new file mode 100644 index 00000000000..7305d14a369 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua @@ -0,0 +1,80 @@ +#!/usr/bin/lua + +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end + +ffi.cdef[[ + typedef struct rd_kafka_s rd_kafka_t; + typedef struct rd_kafka_conf_s rd_kafka_conf_t; + typedef struct rd_kafka_topic_s rd_kafka_topic_t; + typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t; + + typedef enum rd_kafka_type_t { + RD_KAFKA_PRODUCER, + RD_KAFKA_CONSUMER + } rd_kafka_type_t; + + typedef enum { + RD_KAFKA_RESP_ERR__BEGIN = -200, + RD_KAFKA_RESP_ERR_NO_ERROR = 0, + /* ... */ + } rd_kafka_resp_err_t; + + typedef enum { + RD_KAFKA_CONF_UNKNOWN = -2, /* Unknown configuration name. */ + RD_KAFKA_CONF_INVALID = -1, /* Invalid configuration value. */ + RD_KAFKA_CONF_OK = 0 /* Configuration okay */ + } rd_kafka_conf_res_t; + + rd_kafka_conf_t *rd_kafka_conf_new (void); + rd_kafka_conf_t *rd_kafka_conf_dup (const rd_kafka_conf_t *conf); + void rd_kafka_conf_destroy (rd_kafka_conf_t *conf); + const char **rd_kafka_conf_dump (rd_kafka_conf_t *conf, size_t *cntp); + void rd_kafka_conf_dump_free (const char **arr, size_t cnt); + rd_kafka_conf_res_t rd_kafka_conf_set (rd_kafka_conf_t *conf, const char *name, const char *value, + char *errstr, size_t errstr_size); + void rd_kafka_conf_set_dr_cb (rd_kafka_conf_t *conf, void (*dr_cb) (rd_kafka_t *rk, + void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque)); + void rd_kafka_conf_set_error_cb (rd_kafka_conf_t *conf, void (*error_cb) (rd_kafka_t *rk, int err, + const char *reason, void *opaque)); + void rd_kafka_conf_set_stats_cb (rd_kafka_conf_t *conf, int (*stats_cb) (rd_kafka_t *rk, char *json, + size_t json_len, void *opaque)); + void rd_kafka_conf_set_log_cb (rd_kafka_conf_t *conf, void (*log_cb) (const rd_kafka_t *rk, int level, + const char *fac, const char *buf)); + + rd_kafka_t *rd_kafka_new (rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size); + void rd_kafka_destroy (rd_kafka_t *rk); + int rd_kafka_brokers_add (rd_kafka_t *rk, const char *brokerlist); + + rd_kafka_topic_conf_t *rd_kafka_topic_conf_new (void); + rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup (const rd_kafka_topic_conf_t *conf); + rd_kafka_conf_res_t rd_kafka_topic_conf_set (rd_kafka_topic_conf_t *conf, const char *name, + const char *value, char *errstr, size_t errstr_size); + void rd_kafka_topic_conf_destroy (rd_kafka_topic_conf_t *topic_conf); + const char **rd_kafka_topic_conf_dump (rd_kafka_topic_conf_t *conf, size_t *cntp); + + rd_kafka_topic_t *rd_kafka_topic_new (rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf); + const char *rd_kafka_topic_name (const rd_kafka_topic_t *rkt); + void rd_kafka_topic_destroy (rd_kafka_topic_t *rkt); + + int rd_kafka_produce (rd_kafka_topic_t *rkt, int32_t partitition, int msgflags, void *payload, size_t len, + const void *key, size_t keylen, void *msg_opaque); + + int rd_kafka_outq_len (rd_kafka_t *rk); + int rd_kafka_poll (rd_kafka_t *rk, int timeout_ms); + + int rd_kafka_wait_destroyed (int timeout_ms); + + rd_kafka_resp_err_t rd_kafka_errno2err (int errnox); + const char *rd_kafka_err2str (rd_kafka_resp_err_t err); + int rd_kafka_thread_cnt (void); +]] + +local librdkafka = ffi.load("librdkafka.so.1") +return librdkafka + diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua new file mode 100644 index 00000000000..bfb28827cf0 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/producer.lua @@ -0,0 +1,178 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local KafkaConfig = require("centreon-stream-connectors-lib.rdkafka.config") +local KafkaTopic = require("centreon-stream-connectors-lib.rdkafka.topic") +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end + +local DEFAULT_DESTROY_TIMEOUT_MS = 3000 + +local KafkaProducer = {} +KafkaProducer.__index = KafkaProducer + + +--[[ + Creates a new Kafka producer. + + 'kafka_config' is an optional object that will be used instead of the default + configuration. + The 'kafka_config' object is reusable after this call. + + 'destroy_timeout_ms' is a parameter that is used to determine how long client + will wait while all rd_kafka_t objects will be destroyed. + + Returns the new object on success or "error(errstr)" on error in which case + 'errstr' is set to a human readable error message. +]]-- + +function KafkaProducer.new(kafka_config, destroy_timeout_ms) + local config = nil + if kafka_config ~= nil then + config = KafkaConfig.new(kafka_config).kafka_conf_ + ffi.gc(config, nil) + end + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + local kafka = librdkafka.rd_kafka_new(librdkafka.RD_KAFKA_PRODUCER, config, errbuf, ERRLEN) + + if kafka == nil then + error(ffi.string(errbuf)) + end + + local producer = {kafka_ = kafka} + KafkaTopic.kafka_topic_map_[kafka] = {} + + setmetatable(producer, KafkaProducer) + ffi.gc(producer.kafka_, function (...) + for k, topic_ in pairs(KafkaTopic.kafka_topic_map_[producer.kafka_]) do + librdkafka.rd_kafka_topic_destroy(topic_) + end + KafkaTopic.kafka_topic_map_[producer.kafka_] = nil + librdkafka.rd_kafka_destroy(...) + librdkafka.rd_kafka_wait_destroyed(destroy_timeout_ms or DEFAULT_DESTROY_TIMEOUT_MS) + end + ) + + return producer +end + + +--[[ + Adds a one or more brokers to the kafka handle's list of initial brokers. + Additional brokers will be discovered automatically as soon as rdkafka + connects to a broker by querying the broker metadata. + + If a broker name resolves to multiple addresses (and possibly + address families) all will be used for connection attempts in + round-robin fashion. + + 'broker_list' is a ,-separated list of brokers in the format: + [:],[:]... + + Returns the number of brokers successfully added. + + NOTE: Brokers may also be defined with the 'metadata.broker.list' + configuration property. +]]-- + +function KafkaProducer:brokers_add(broker_list) + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_brokers_add(self.kafka_, broker_list) +end + + +--[[ + Produce and send a single message to broker. + + `produce()` is an asynch non-blocking API. + + 'partition' is the target partition, either: + - RD_KAFKA_PARTITION_UA (unassigned) for + automatic partitioning using the topic's partitioner function, or + - a fixed partition (0..N) + + 'payload' is the message payload. + + 'key' is an optional message key, if non-nil it will be passed to the topic + partitioner as well as be sent with the message to the broker and passed + on to the consumer. + + + Returns "error(errstr)" on error in which case 'errstr' is set to a human + readable error message. +]]-- + +function KafkaProducer:produce(kafka_topic, partition, payload, key) + assert(self.kafka_ ~= nil) + assert(kafka_topic.topic_ ~= nil) + + local keylen = 0 + if key then keylen = #key end + + if payload == nil or #payload == 0 then + if keylen == 0 then + return + end + end + + local RD_KAFKA_MSG_F_COPY = 0x2 + local produce_result = librdkafka.rd_kafka_produce(kafka_topic.topic_, partition, RD_KAFKA_MSG_F_COPY, + ffi.cast("void*", payload), #payload, ffi.cast("void*", key), keylen, nil) + + if produce_result == -1 then + error(ffi.string(librdkafka.rd_kafka_err2str(librdkafka.rd_kafka_errno2err(ffi.errno())))) + end +end + +--[[ + Polls the provided kafka handle for events. + + Events will cause application provided callbacks to be called. + + The 'timeout_ms' argument specifies the minimum amount of time + (in milliseconds) that the call will block waiting for events. + For non-blocking calls, provide 0 as 'timeout_ms'. + To wait indefinately for an event, provide -1. + + Events: + - delivery report callbacks (if dr_cb is configured) [producer] + - error callbacks (if error_cb is configured) [producer & consumer] + - stats callbacks (if stats_cb is configured) [producer & consumer] + + Returns the number of events served. + + NOTE: This function doesn't use jit compilation +]]-- + +function KafkaProducer:poll(timeout_ms) + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_poll(self.kafka_, timeout_ms) +end +-- jit.off(KafkaProducer.poll) + +--[[ + Returns the current out queue length: + messages waiting to be sent to, or acknowledged by, the broker. +]]-- + +function KafkaProducer:outq_len() + assert(self.kafka_ ~= nil) + return librdkafka.rd_kafka_outq_len(self.kafka_) +end + +--[[ + Retrieve the current number of threads in use by librdkafka. +]]-- + +function KafkaProducer.thread_cnt() + return librdkafka.rd_kafka_thread_cnt() +end + +return KafkaProducer diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua new file mode 100644 index 00000000000..7b308698aff --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic.lua @@ -0,0 +1,61 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +local KafkaTopicConfig = require("centreon-stream-connectors-lib.rdkafka.topic_config") +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end + +local KafkaTopic = { kafka_topic_map_ = {} } +-- KafkaProducer will delete all topics on destroy +-- It was done in order to avoid destroing topics before destroing producer + +KafkaTopic.__index = KafkaTopic + +--[[ + Creates a new topic handle for topic named 'topic_name'. + + 'conf' is an optional configuration for the topic that will be used + instead of the default topic configuration. + The 'conf' object is reusable after this call. + + Returns the new topic handle or "error(errstr)" on error in which case + 'errstr' is set to a human readable error message. +]]-- + +function KafkaTopic.new(kafka_producer, topic_name, topic_config) + assert(kafka_producer.kafka_ ~= nil) + + local config = nil + if topic_config and topic_config.topic_config_ then + config = KafkaTopicConfig.new(topic_config).topic_conf_ + ffi.gc(config, nil) + end + + local rd_topic = librdkafka.rd_kafka_topic_new(kafka_producer.kafka_, topic_name, config) + + if rd_topic == nil then + error(ffi.string(librdkafka.rd_kafka_err2str(librdkafka.rd_kafka_errno2err(ffi.errno())))) + end + + local topic = {topic_ = rd_topic} + setmetatable(topic, KafkaTopic) + table.insert(KafkaTopic.kafka_topic_map_[kafka_producer.kafka_], rd_topic) + return topic +end + + +--[[ + Returns the topic name +]]-- + +function KafkaTopic:name() + assert(self.topic_ ~= nil) + return ffi.string(librdkafka.rd_kafka_topic_name(self.topic_)) +end + +return KafkaTopic diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua new file mode 100644 index 00000000000..21a4e8a3e46 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua @@ -0,0 +1,76 @@ +#!/usr/bin/lua + +local librdkafka = require("centreon-stream-connectors-lib.rdkafka.librdkafka") +-- ffi for el7 +local status, ffi = pcall(require, 'ffi') + +-- use cffi instead of ffi for el8 +if (not status) then + ffi = require 'cffi' +end + +local KafkaTopicConfig = {} +KafkaTopicConfig.__index = KafkaTopicConfig + +--[[ + Create topic configuration object. +]]-- + +function KafkaTopicConfig.new(original_config) + local config = {} + setmetatable(config, KafkaTopicConfig) + + if original_config and original_config.topic_conf_ then + rawset(config, "topic_conf_", librdkafka.rd_kafka_topic_conf_dup(original_config.topic_conf_)) + else + rawset(config, "topic_conf_", librdkafka.rd_kafka_topic_conf_new()) + end + ffi.gc(config.topic_conf_, function (config) + librdkafka.rd_kafka_topic_conf_destroy(config) + end + ) + + return config +end + + +--[[ + Dump the topic configuration properties and values of `conf` to a map + with "key", "value" pairs. +]]-- + +function KafkaTopicConfig:dump() + assert(self.topic_conf_ ~= nil) + + local size = ffi.new("size_t[1]") + local dump = librdkafka.rd_kafka_topic_conf_dump(self.topic_conf_, size) + ffi.gc(dump, function(d) librdkafka.rd_kafka_conf_dump_free(d, size[0]) end) + + local result = {} + for i = 0, tonumber(size[0])-1,2 do + result[ffi.string(dump[i])] = ffi.string(dump[i+1]) + end + + return result +end + + +--[[ + Sets a configuration property. + + In case of failure "error(errstr)" is called and 'errstr' + is updated to contain a human readable error string. +]]-- + +function KafkaTopicConfig:__newindex(name, value) + assert(self.topic_conf_ ~= nil) + + local ERRLEN = 256 + local errbuf = ffi.new("char[?]", ERRLEN) -- cdata objects are garbage collected + + if librdkafka.rd_kafka_topic_conf_set(self.topic_conf_, name, value, errbuf, ERRLEN) ~= librdkafka.RD_KAFKA_CONF_OK then + error(ffi.string(errbuf)) + end +end + +return KafkaTopicConfig diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua new file mode 100644 index 00000000000..0fff3b9bd4d --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_broker.lua @@ -0,0 +1,357 @@ +#!/usr/bin/lua + +--- +-- Module with Centreon broker related methods for easier usage +-- @module sc_broker +-- @alias sc_broker + +local sc_broker = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +local ScBroker = {} + +function sc_broker.new(logger) + local self = {} + + broker_api_version = 2 + + self.logger = logger + if not self.logger then + self.logger = sc_logger.new() + end + + setmetatable(self, { __index = ScBroker }) + + return self +end + + +--- get_host_all_infos: retrieve all informations from a host +-- @param host_id (number) +-- @return false (boolean) if host_id isn't valid or no information were found in broker cache +-- @return host_info (table) all the informations from the host +function ScBroker:get_host_all_infos(host_id) + -- return because host_id isn't valid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_host_all_infos]: host id is nil") + return false + end + + -- get host information from broker cache + local host_info = broker_cache:get_host(host_id) + + -- return false only if no host information were found in broker cache + if not host_info then + self.logger:warning("[sc_broker:get_host_all_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + return false + end + + return host_info +end + +--- get_service_all_infos: retrieve informations from a service +-- @param host_id (number) +-- @params service_id (number) +-- @return false (boolean) if host id or service id aren't valid +-- @return service (table) all the informations from the service +function ScBroker:get_service_all_infos(host_id, service_id) + -- return because host_id or service_id isn't valid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_service_all_infos]: host id or service id is nil") + return false + end + + -- get service information from broker cache + local service_info = broker_cache:get_service(host_id, service_id) + + -- return false only if no service information were found in broker cache + if not service_info then + self.logger:warning("[sc_broker:get_service_all_infos]: No service information found for host_id: " .. tostring(host_id) + .. " and service_id: " .. tostring(service_id) .. ". Restarting centengine should fix this.") + return false + end + + return service_info +end + +--- get_host_infos: retrieve the the desired host informations +-- @param host_id (number) +-- @params info (string|table) the name of the wanted host parameter or a table of all wanted host parameters +-- @return false (boolean) if host_id is nil or empty +-- @return host (any) a table of all wanted host params if input param is a table. The single parameter if input param is a string +function ScBroker:get_host_infos(host_id, info) + -- return because host_id isn't valid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_host_infos]: host id is nil") + return false + end + + -- prepare return table with host information + local host = { + host_id = host_id + } + + -- return host_id only if no specific param is asked + if info == nil then + return host + end + + -- get host information from broker cache + local host_info = broker_cache:get_host(host_id) + + -- return host_id only if no host information were found in broker cache + if not host_info then + self.logger:warning("[sc_broker:get_host_infos]: No host information found for host_id: " .. tostring(host_id) .. ". Restarting centengine should fix this.") + return host + end + + -- get the desired param and return the information + if type(info) == "string" then + if host_info[info] then + return host_info[info] + end + end + + -- get all the desired params and return the information + if type(info) == "table" then + for _, param in ipairs(info) do + if host_info[param] then + host[param] = host_info[param] + end + end + + return host + end +end + +--- get_service_infos: retrieve the the desired service informations +-- @param host_id (number) +-- @param service_id (number) +-- @params info (string|table) the name of the wanted host parameter or a table of all wanted service parameters +-- @return false (boolean) if host_id and/or service_id are nil or empty +-- @return service (any) a table of all wanted service params if input param is a table. A single parameter if input param is a string +function ScBroker:get_service_infos(host_id, service_id, info) + -- return because host_id or service_id isn't valid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_service_infos]: host id or service id is invalid") + return false + end + + -- prepare return table with service information + local service = { + host_id = host_id, + service_id = service_id + } + + -- return host_id and service_id only if no specific param is asked + if info == nil then + return service + end + + -- get service information from broker cache + local service_info = broker_cache:get_service(host_id, service_id) + + -- return host_id and service_id only if no host information were found in broker cache + if not service_info then + self.logger:warning("[sc_broker:get_service_infos]: No service information found for host_id: " .. tostring(host_id) .. " and service_id: " .. tostring(service_id) + .. ". Restarting centengine should fix this.") + return service + end + + -- get the desired param and return the information + if type(info) == "string" then + if service_info[info] then + return service_info[info] + end + end + + -- get all the desired params and return the information + if type(info) == "table" then + for _, param in ipairs(info) do + if service_info[param] then + service[param] = service_info[param] + end + end + + return service + end +end + +--- get_hostgroups: retrieve hostgroups from host_id +-- @param host_id (number) +-- @return false (boolean) if host id is invalid or no hostgroup found +-- @return hostgroups (table) a table of all hostgroups for the host +function ScBroker:get_hostgroups(host_id) + -- return false if host id is invalid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_hostgroup]: host id is nil or empty") + return false + end + + -- get hostgroups + local hostgroups = broker_cache:get_hostgroups(host_id) + + -- return false if no hostgroups were found + if not hostgroups then + return false + end + + return hostgroups +end + +--- get_servicegroups: retrieve servicegroups from service_id +-- @param host_id (number) +-- @param service_id (number) +-- @return false (boolean) if host_id or service_id are invalid or no service group found +-- @return servicegroups (table) a table of all servicegroups for the service +function ScBroker:get_servicegroups(host_id, service_id) + -- return false if service id is invalid + if host_id == nil or host_id == "" or service_id == nil or service_id == "" then + self.logger:warning("[sc_broker:get_servicegroups]: service id is nil or empty") + return false + end + + -- get servicegroups + local servicegroups = broker_cache:get_servicegroups(host_id, service_id) + + -- return false if no servicegroups were found + if not servicegroups then + return false + end + + return servicegroups +end + +--- get_severity: retrieve severity from host or service +-- @param host_id (number) +-- @param [opt] service_id (number) +-- @return false (boolean) if host id is invalid or no severity were found +-- @return severity (table) all the severity from the host or the service +function ScBroker:get_severity(host_id, service_id) + -- return false if host id is invalid + if host_id == nil or host_id == "" then + self.logger:warning("[sc_broker:get_severity]: host id is nil or empty") + return false + end + + local service_id = service_id or nil + local severity = nil + + -- get host severity + if service_id == nil then + severity = broker_cache:get_severity(host_id) + + -- return false if no severity were found + if not severity then + self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host: " .. tostring(host_id)) + return false + end + + return severity + end + + -- get severity for service + severity = broker_cache:get_severity(host_id, service_id) + + -- return false if no severity were found + if not severity then + self.logger:warning("[sc_broker:get_severity]: no severity found in broker cache for host id: " .. tostring(host_id) .. " and service id: " .. tostring(service_id)) + return false + end + + return severity +end + +--- get_instance: retrieve poller from instance_id +-- @param host_id (number) +-- @return false (boolean) if host_id is invalid or no instance found in cache +-- @return name (string) the name of the instance +function ScBroker:get_instance(instance_id) + -- return false if instance_id is invalid + if instance_id == nil or instance_id == "" then + self.logger:warning("[sc_broker:get_instance]: instance id is nil or empty") + return false + end + + -- get instance name + local name = broker_cache:get_instance_name(instance_id) + + -- return false if no instance name is found + if not name then + self.logger:warning("[sc_broker:get_instance]: couldn't get instance name from broker cache for instance id: " .. tostring(instance_id)) + return false + end + + return name +end + +--- get_ba_info: retrieve ba name and description from ba id +-- @param ba_id (number) +-- @return false (boolean) if the ba_id is invalid or no information were found in the broker cache +-- @return ba_info (table) a table with the name and description of the ba +function ScBroker:get_ba_infos(ba_id) + -- return false if ba_id is invalid + if ba_id == nil or ba_id == "" then + self.logger:warning("[sc_broker:get_ba_infos]: ba id is nil or empty") + return false + end + + -- get ba info + local ba_info = broker_cache:get_ba(ba_id) + + -- return false if no informations are found + if ba_info == nil then + self.logger:warning("[sc_broker:get_ba_infos]: couldn't get ba informations in cache for ba_id: " .. tostring(ba_id)) + return false + end + + return ba_info +end + +--- get_bvs_infos: retrieve bv name and description from ba_id +-- @param ba_id (number) +-- @param false (boolean) if ba_id is invalid or no information are found in the broker_cache +-- @return bvs (table) name and description of all the bvs +function ScBroker:get_bvs_infos(ba_id) + -- return false if ba_id is invalid + if ba_id == nil or ba_id == "" then + self.logger:warning("[sc_broker:get_bvs]: ba id is nil or empty") + return false + end + + -- get bvs id + local bvs_id = broker_cache:get_bvs(ba_id) + + -- return false if no bv id are found for ba_id + if bvs_id == nil or bvs_id == "" then + self.logger:warning("[sc_broker:get_bvs]: couldn't get bvs for ba id: " .. tostring(ba_id)) + return false + end + + local bv_infos = nil + local found_bv = false + local bvs = {} + + -- get bv info (name + description) for each found bv + for _, id in ipairs(bvs_id) do + bv_infos = broker_cache:get_bv(id) + + -- add bv information to the list + if bv_infos then + table.insert(bvs,bv_infos) + found_bv = true + else + self.logger:warning("[sc_broker:get_bvs]: couldn't get bv information for bv id: " .. tostring(bv_id)) + end + end + + -- return false if there are no bv information + if not found_bv then + return false + end + + return bvs +end + +return sc_broker diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua new file mode 100644 index 00000000000..20652248a81 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_common.lua @@ -0,0 +1,369 @@ +#!/usr/bin/lua + +--- +-- Module with common methods for Centreon Stream Connectors +-- @module sc_common +-- @alias sc_common + +local sc_common = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +local function ifnil_or_empty(var, alt) + if var == nil or var == "" then + return alt + else + return var + end +end + +local ScCommon = {} + +function sc_common.new(sc_logger) + local self = {} + + self.sc_logger = sc_logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + setmetatable(self, { __index = ScCommon }) + + return self +end + +--- ifnil_or_empty: change a nil or empty variable for a specified value +-- @param var (string|number) the variable that needs to be checked +-- @param alt (string|number|table) the alternate value if "var" is nil or empty +-- @return var or alt (string|number|table) the variable or the alternate value +function ScCommon:ifnil_or_empty(var, alt) + return ifnil_or_empty(var, alt) +end + +--- if_wrong_type: change a wrong type variable with a default value +-- @param var (any) the variable that needs to be checked +-- @param type (string) the expected type of the variable +-- @param default (any) the default value for the variable if type is wrong +-- @return var or default (any) the variable if type is good or the default value +function ScCommon:if_wrong_type(var, var_type, default) + if type(var) == var_type then + return var + end + + return default +end + +--- boolean_to_number: convert boolean variable to number +-- @param boolean (boolean) the boolean that will be converted +-- @return (number) a number according to the boolean value +function ScCommon:boolean_to_number(boolean) + return boolean and 1 or 0 +end + +--- number_to_boolean: convert a 0, 1 number to its boolean counterpart +-- @param number (number) the number to convert +-- @return (boolean) true if param is 1, false if param is 0 +function ScCommon:number_to_boolean(number) + if number ~= 0 and number ~= 1 then + self.sc_logger:error("[sc_common:number_to_boolean]: number is not 1 or 0. Returning nil. Parameter value is: " .. tostring(number)) + return nil + end + + if number == 1 then + return true + end + + return false +end + + +--- check_boolean_number_option_syntax: make sure the number is either 1 or 0 +-- @param number (number) the boolean number that must be validated +-- @param default (number) the default value that is going to be return if the default number is not validated +-- @return number (number) a boolean number +function ScCommon:check_boolean_number_option_syntax(number, default) + if number ~= 1 and number ~= 0 then + number = default + end + + return number +end + +--- split: convert a string into a table +-- @param text (string) the string that is going to be splitted into a table +-- @param [opt] separator (string) the separator character that will be used to split the string +-- @return false (boolean) if text param is empty or nil +-- @return table (table) a table of strings +function ScCommon:split(text, separator) + -- return false if text is nil or empty + if text == nil or text == "" then + self.sc_logger:error("[sc_common:split]: could not split text because it is nil or empty") + return false + end + + local hash = {} + + -- set default separator + separator = ifnil_or_empty(separator, ",") + + for value in string.gmatch(text, "([^" .. separator .. "]+)") do + table.insert(hash, value) + end + + return hash +end + +--- compare_numbers: compare two numbers, if comparison is valid, then return true +-- @param firstNumber {number} +-- @param secondNumber {number} +-- @param operator {string} the mathematical operator that is used for the comparison +-- @return {boolean} +function ScCommon:compare_numbers(firstNumber, secondNumber, operator) + if operator ~= "==" and operator ~= "~=" and operator ~= "<" and operator ~= ">" and operator ~= ">=" and operator ~= "<=" then + return nil + end + + if type(firstNumber) ~= "number" or type(secondNumber) ~= "number" then + return nil + end + + if operator == "<" then + if firstNumber < secondNumber then + return true + end + elseif operator == ">" then + if firstNumber > secondNumber then + return true + end + elseif operator == ">=" then + if firstNumber >= secondNumber then + return true + end + elseif operator == "<=" then + if firstNumber <= secondNumber then + return true + end + elseif operator == "==" then + if firstNumber == secondNumber then + return true + end + elseif operator == "~=" then + if firstNumber ~= secondNumber then + return true + end + end + + return false +end + +--- generate_postfield_param_string: convert a table of parameters into an url encoded url parameters string +-- @param params (table) the table of all url string parameters to convert +-- @return false (boolean) if params variable is not a table +-- @return param_string (string) the url encoded parameters string +function ScCommon:generate_postfield_param_string(params) + -- return false because params type is wrong + if (type(params) ~= "table") then + self.sc_logger:error("[sc_common:generate_postfield_param_string]: parameters to convert aren't in a table") + return false + end + + local param_string = "" + + -- concatenate data in params table into a string + for field, value in pairs(params) do + if param_string == "" then + param_string = field .. "=" .. broker.url_encode(value) + else + param_string = param_string .. "&" .. field .. "=" .. broker.url_encode(value) + end + end + + -- return url encoded string + return param_string +end + +--- load_json_file: load a json file +-- @param json_file (string) path to the json file +-- @return true|false (boolean) if json file is valid or not +-- @return content (table) the parsed json +function ScCommon:load_json_file(json_file) + local file = io.open(json_file, "r") + + -- return false if we can't open the file + if not file then + self.sc_logger:error("[sc_common:load_json_file]: couldn't open file " + .. tostring(json_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + return false + end + + -- get content of the file + local file_content = file:read("*a") + io.close(file) + + -- parse it + local content, error = broker.json_decode(file_content) + + -- return false if json couldn't be parsed + if error then + self.sc_logger:error("[sc_common:load_json_file]: could not parse json file " + .. tostring(json_file) .. ". Error is: " .. tostring(error)) + return false + end + + return true, content +end + +--- json_escape: escape json special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:json_escape(string) + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:json_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) + return string + end + + return string:gsub('\\', '\\\\') + :gsub('\t', '\\t') + :gsub('\n', '\\n') + :gsub('\b', '\\b') + :gsub('\r', '\\r') + :gsub('\f', '\\f') + :gsub('/', '\\/') + :gsub('"', '\\"') +end + +--- xml_escape: escape xml special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:xml_escape(string) + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:xml_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) + return string + end + + return string:gsub('&', '&') + :gsub('<', '$lt;') + :gsub('>', '>') + :gsub('"', '"') + :gsub("'", "'") +end + +--- lua_regex_escape: escape lua regex special characters in a string +-- @param string (string) the string that must be escaped +-- @return string (string) the string with escaped characters +function ScCommon:lua_regex_escape(string) + if type(string) ~= "string" then + self.sc_logger:error("[sc_common:lua_regex_escape]: the input parameter is not valid, it must be a string. Sent value: " .. tostring(string)) + return string + end + + return string:gsub('%%', '%%%%') + :gsub('%.', '%%.') + :gsub("%*", "%%*") + :gsub("%-", "%%-") + :gsub("%(", "%%(") + :gsub("%)", "%%)") + :gsub("%[", "%%[") + :gsub("%]", "%%]") + :gsub("%$", "%%$") + :gsub("%^", "%%^") + :gsub("%+", "%%+") + :gsub("%?", "%%?") +end + +--- dumper: dump variables for debug purpose +-- @param variable (any) the variable that must be dumped +-- @param result (string) [opt] the string that contains the dumped variable. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE +-- @param tab_char (string) [opt] the string that contains the tab character. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE (and design) +-- @return result (string) the dumped variable +function ScCommon:dumper(variable, result, tab_char) + -- tabulation handling + if not tab_char then + tab_char = "" + else + tab_char = tab_char .. "\t" + end + + -- non table variables handling + if type(variable) ~= "table" then + if result then + result = result .. "\n" .. tab_char .. "[" .. type(variable) .. "]: " .. tostring(variable) + else + result = "\n[" .. type(variable) .. "]: " .. tostring(variable) + end + else + if not result then + result = "\n[table]" + tab_char = "\t" + end + + -- recursive looping through each tables in the table + for index, value in pairs(variable) do + if type(value) ~= "table" then + if result then + result = result .. "\n" .. tab_char .. "[" .. type(value) .. "] " .. tostring(index) .. ": " .. tostring(value) + else + result = "\n" .. tostring(index) .. " [" .. type(value) .. "]: " .. tostring(value) + end + else + if result then + result = result .. "\n" .. tab_char .. "[" .. type(value) .. "] " .. tostring(index) .. ": " + else + result = "\n[" .. type(value) .. "] " .. tostring(index) .. ": " + end + result = self:dumper(value, result, tab_char) + end + end + end + + return result +end + +--- trim: remove spaces at the beginning and end of a string (or remove the provided character) +-- @param string (string) the string that will be trimmed +-- @param character [opt] (string) the character to trim +-- @return string (string) the trimmed string +function ScCommon:trim(string, character) + local result = "" + local count = "" + if not character then + result, count = string.gsub(string, "^%s*(.-)%s*$", "%1") + else + result, count = string.gsub(string, "^" .. character .. "*(.-)" .. character .. "*$", "%1") + end + + return result +end + +--- get the first digit of bbdo protocol version +-- @return bbdo_version (number) the first digit of the bddo version +function ScCommon:get_bbdo_version() + local bbdo_version + + if broker.bbdo_version ~= nil then + _, _, bbdo_version = string.find(broker.bbdo_version(), "(%d+).%d+.%d+") + else + bbdo_version = 2 + end + + return tonumber(bbdo_version) +end + +--- is_valid_pattern: check if a Lua pattern is valid or not +-- @param pattern (string) the pattern that must be validated +-- @return boolean (boolean) true if pattern is valid, false otherwise +function ScCommon:is_valid_pattern(pattern) + local status, result = pcall(string.match, "a random string", pattern) + + if not status then + self.sc_logger:error("[sc_common:validate_pattern]: invalid pattern. Error message is: " .. tostring(result)) + end + + return status +end + +return sc_common diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua new file mode 100644 index 00000000000..2441444e04e --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_event.lua @@ -0,0 +1,1373 @@ +#!/usr/bin/lua + +--- +-- Module to help handle events from Centreon broker +-- @module sc_event +-- @alias sc_event + +local sc_event = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +local ScEvent = {} + +function sc_event.new(event, params, common, logger, broker) + local self = {} + + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + self.sc_common = common + self.params = params + self.event = event + self.sc_broker = broker + self.bbdo_version = self.sc_common:get_bbdo_version() + + self.event.cache = {} + + setmetatable(self, { __index = ScEvent }) + return self +end + +--- is_valid_category: check if the event is in an accepted category +-- @retun true|false (boolean) +function ScEvent:is_valid_category() + return self:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, self.event.category) +end + +--- is_valid_element: check if the event is an accepted element +-- @return true|false (boolean) +function ScEvent:is_valid_element() + return self:find_in_mapping(self.params.element_mapping[self.event.category], self.params.accepted_elements, self.event.element) +end + +--- find_in_mapping: check if item type is in the mapping and is accepted +-- @param mapping (table) the mapping table +-- @param reference (string) the accepted values for the item +-- @param item (string) the item we want to find in the mapping table and in the reference +-- @return (boolean) +function ScEvent:find_in_mapping(mapping, reference, item) + for mapping_index, mapping_value in pairs(mapping) do + for reference_index, reference_value in pairs(self.sc_common:split(reference, ",")) do + if item == mapping_value and mapping_index == reference_value then + return true + end + end + end + + return false +end + +--- is_valid_event: check if the event is accepted depending on configured conditions +-- @return true|false (boolean) +function ScEvent:is_valid_event() + local is_valid_event = false + + -- run validation tests depending on the category of the event + if self.event.category == self.params.bbdo.categories.neb.id then + is_valid_event = self:is_valid_neb_event() + elseif self.event.category == self.params.bbdo.categories.storage.id then + is_valid_event = self:is_valid_storage_event() + elseif self.event.category == self.params.bbdo.categories.bam.id then + is_valid_event = self:is_valid_bam_event() + end + + -- run custom code + if self.params.custom_code and type(self.params.custom_code) == "function" then + self, is_valid_event = self.params.custom_code(self) + end + + return is_valid_event +end + +--- is_valid_neb_event: check if the event is an accepted neb type event +-- @return true|false (boolean) +function ScEvent:is_valid_neb_event() + local is_valid_event = false + + -- run validation tests depending on the element type of the neb event + if self.event.element == self.params.bbdo.elements.host_status.id then + is_valid_event = self:is_valid_host_status_event() + elseif self.event.element == self.params.bbdo.elements.service_status.id then + is_valid_event = self:is_valid_service_status_event() + elseif self.event.element == self.params.bbdo.elements.acknowledgement.id then + is_valid_event = self:is_valid_acknowledgement_event() + elseif self.event.element == self.params.bbdo.elements.downtime.id then + is_valid_event = self:is_valid_downtime_event() + end + + return is_valid_event +end + +--- is_valid_host_status_event: check if the host status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_host_status_event() + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.host_status) then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) + return false + end + + -- return false if event status is a duplicate and dedup is enabled + if self:is_host_status_event_duplicated() then + self.sc_logger:warning("[sc_event:is_host_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) + .. " is sending a duplicated event. Dedup option (enable_host_status_dedup) is set to: " .. tostring(self.params.enable_host_status_dedup)) + return false + end + + -- return false if one of event ack, downtime, state type (hard soft) or flapping aren't valid + if not self:is_valid_event_states() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in a validated downtime, ack or hard/soft state") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_host_status_event]: host_id: " .. tostring(self.event.host_id) .. " is not in an accepted hostgroup") + return false + end + + -- in bbdo 2 last_update do exist but not in bbdo3. + -- last_check also exist in bbdo2 but it is preferable to stay compatible with all stream connectors + if not self.event.last_update and self.event.last_check then + self.event.last_update = self.event.last_check + elseif not self.event.last_check and self.event.last_update then + self.event.last_check = self.event.last_update + end + + self:build_outputs() + + return true +end + +--- is_valid_service_status_event: check if the service status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_service_status_event() + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: host_id: " .. tostring(self.event.host_id) + .. " hasn't been validated for service with id: " .. tostring(self.event.service_id)) + return false + end + + -- return false if we can't get service description of service id is nil + if not self:is_valid_service() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.service_status) then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state])) + return false + end + + -- return false if event status is a duplicate and dedup is enabled + if self:is_service_status_event_duplicated() then + self.sc_logger:warning("[sc_event:is_service_status_event_duplicated]: host_id: " .. tostring(self.event.host_id) + .. " service_id: " .. tostring(self.event.service_id) .. " is sending a duplicated event. Dedup option (enable_service_status_dedup) is set to: " .. tostring(self.params.enable_service_status_dedup)) + return false + end + + -- return false if one of event ack, downtime, state type (hard soft) or flapping aren't valid + if not self:is_valid_event_states() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in a validated downtime, ack or hard/soft state") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + + -- in bbdo 2 last_update do exist but not in bbdo3. + -- last_check also exist in bbdo2 but it is preferable to stay compatible with all stream connectors + if not self.event.last_update and self.event.last_check then + self.event.last_update = self.event.last_check + elseif not self.event.last_check and self.event.last_update then + self.event.last_check = self.event.last_update + end + + self:build_outputs() + + return true +end + +--- is_valid_host: check if host name and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_valid_host() + + -- return false if host id is nil + if (not self.event.host_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_host]: Invalid host with id: " .. tostring(self.event.host_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.host = self.sc_broker:get_host_all_infos(self.event.host_id) + + -- return false if we can't get hostname + if (not self.event.cache.host and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_host]: No name for host with id: " .. tostring(self.event.host_id) + .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + return false + elseif (not self.event.cache.host and self.params.skip_anon_events == 0) then + self.event.cache.host = { + name = self.event.host_id + } + end + + -- force host name to be its id if no name has been found + if not self.event.cache.host.name then + self.event.cache.host.name = self.event.cache.host.host_id or self.event.host_id + end + + -- return false if event is coming from fake bam host + if string.find(self.event.cache.host.name, "^_Module_BAM_*") then + self.sc_logger:debug("[sc_event:is_valid_host]: Host is a BAM fake host: " .. tostring(self.event.cache.host.name)) + return false + end + + -- loop through each Lua pattern to check if host name match the filter + local is_valid_pattern = false + if self.params.accepted_hosts ~= "" then + for index, pattern in ipairs(self.params.accepted_hosts_pattern_list) do + if string.match(self.event.cache.host.name, pattern) then + self.sc_logger:debug("[sc_event:is_valid_host]: host " .. tostring(self.event.cache.host.name) + .. " matched pattern: " .. tostring(pattern)) + is_valid_pattern = true + break + end + end + else + is_valid_pattern = true + end + + if not is_valid_pattern then + self.sc_logger:info("[sc_event:is_valid_host]: Host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_hosts pattern: " .. tostring(self.params.accepted_hosts) + .. " or any of the sub-patterns if accepted_hosts_enable_split_pattern is enabled") + return false + end + + return true +end + +--- is_valid_service: check if service description and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_valid_service() + + -- return false if service id is nil + if (not self.event.service_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid service with id: " .. tostring(self.event.service_id) .. " skip nil id is: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.service = self.sc_broker:get_service_all_infos(self.event.host_id, self.event.service_id) + + -- return false if we can't get service description + if (not self.event.cache.service and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_service]: Invalid description for service with id: " .. tostring(self.event.service_id) + .. " and skip anon events is: " .. tostring(self.params.skip_anon_events)) + return false + elseif (not self.event.cache.service and self.params.skip_anon_events == 0) then + self.event.cache.service = { + description = self.event.service_id + } + end + + -- force service description to its id if no description has been found + if not self.event.cache.service.description then + self.event.cache.service.description = self.event.service_id + end + + -- loop through each Lua pattern to check if service description match the filter + local is_valid_pattern = false + if self.params.accepted_services ~= "" then + for index, pattern in ipairs(self.params.accepted_services_pattern_list) do + if string.match(self.event.cache.service.description, pattern) then + self.sc_logger:debug("[sc_event:is_valid_service]: service " .. tostring(self.event.cache.service.description) + .. " from host: " .. tostring(self.event.cache.host.name) .. " matched pattern: " .. tostring(pattern)) + is_valid_pattern = true + break + end + end + else + is_valid_pattern = true + end + + if not is_valid_pattern then + self.sc_logger:info("[sc_event:is_valid_service]: Service: " .. tostring(self.event.cache.service.description) .. " from host: " .. tostring(self.event.cache.host.name) + .. " doesn't match accepted_services pattern: " .. tostring(self.params.accepted_services) + .. " or any of the sub-patterns if accepted_services_enable_split_pattern is enabled") + return false + end + + return true +end + +--- is_valid_event_states: wrapper method that checks common aspect of an event such as ack and state_type +-- @return true|false (boolean) +function ScEvent:is_valid_event_states() + -- return false if state_type (HARD/SOFT) is not valid + if not self:is_valid_event_state_type() then + return false + end + + -- return false if acknowledge state is not valid + if not self:is_valid_event_acknowledge_state() then + return false + end + + -- return false if downtime state is not valid + if not self:is_valid_event_downtime_state() then + return false + end + + -- return false if flapping state is not valid + if not self:is_valid_event_flapping_state() then + return false + end + + return true +end + +--- is_valid_event_status: check if the event has an accepted status +-- @param accepted_status_list (string) a coma separated list of accepted status ("ok,warning,critical") +-- @return true|false (boolean) +function ScEvent:is_valid_event_status(accepted_status_list) + local status_list = self.sc_common:split(accepted_status_list, ",") + + if not status_list then + self.sc_logger:error("[sc_event:is_valid_event_status]: accepted_status list is nil or empty") + return false + end + + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.state and self.event.current_state) then + self.event.state = self.event.current_state + end + + if (not self.event.current_state and self.event.state) then + self.event.current_state = self.event.state + end + -- end compat patch + + for _, status_id in ipairs(status_list) do + if tostring(self.event.state) == status_id then + return true + end + end + + -- handle downtime event specific case for logging + if (self.event.category == self.params.bbdo.categories.neb.id and self.event.element == self.params.bbdo.elements.downtime.id) then + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) + return false + end + + -- log for everything else + self.sc_logger:warning("[sc_event:is_valid_event_status] event has an invalid state. Current state: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Accepted states are: " .. tostring(accepted_status_list)) + return false +end + +--- is_valid_event_state_type: check if the state type (HARD/SOFT) is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_event_state_type() + if not self.sc_common:compare_numbers(self.event.state_type, self.params.hard_only, ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_state_type]: event is not in an valid state type. Event state type must be above or equal to " .. tostring(self.params.hard_only) + .. ". Current state type: " .. tostring(self.event.state_type)) + return false + end + + return true +end + +--- is_valid_event_acknowledge_state: check if the acknowledge state of the event is valid +-- @return true|false (boolean) +function ScEvent:is_valid_event_acknowledge_state() + -- compat patch bbdo 3 => bbdo 2 + if (not self.event.acknowledged and self.event.acknowledgement_type) then + if self.event.acknowledgement_type >= 1 then + self.event.acknowledged = true + else + self.event.acknowledged = false + end + end + + if not self.sc_common:compare_numbers(self.params.acknowledged, self.sc_common:boolean_to_number(self.event.acknowledged), ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_acknowledge_state]: event is not in an valid ack state. Event ack state must be below or equal to " .. tostring(self.params.acknowledged) + .. ". Current ack state: " .. tostring(self.sc_common:boolean_to_number(self.event.acknowledged))) + return false + end + + return true +end + +--- is_valid_event_downtime_state: check if the event is in an accepted downtime state +-- @return true|false (boolean) +function ScEvent:is_valid_event_downtime_state() + -- patch compat bbdo 3 => bbdo 2 + if (not self.event.scheduled_downtime_depth and self.event.downtime_depth) then + self.event.scheduled_downtime_depth = self.event.downtime_depth + end + + if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.scheduled_downtime_depth, ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_downtime_state]: event is not in an valid downtime state. Event downtime state must be below or equal to " .. tostring(self.params.in_downtime) + .. ". Current downtime state: " .. tostring(self.sc_common:boolean_to_number(self.event.scheduled_downtime_depth))) + return false + end + + return true +end + +--- is_valid_event_flapping_state: check if the event is in an accepted flapping state +-- @return true|false (boolean) +function ScEvent:is_valid_event_flapping_state() + if not self.sc_common:compare_numbers(self.params.flapping, self.sc_common:boolean_to_number(self.event.flapping), ">=") then + self.sc_logger:warning("[sc_event:is_valid_event_flapping_state]: event is not in an valid flapping state. Event flapping state must be below or equal to " .. tostring(self.params.flapping) + .. ". Current flapping state: " .. tostring(self.sc_common:boolean_to_number(self.event.flapping))) + return false + end + + return true +end + +--- is_valid_hostgroup: check if the event is in an accepted hostgroup +-- @return true|false (boolean) +function ScEvent:is_valid_hostgroup() + self.event.cache.hostgroups = self.sc_broker:get_hostgroups(self.event.host_id) + + -- return true if options are not set or if both options are set + local accepted_hostgroups_isnotempty = self.params.accepted_hostgroups ~= "" + local rejected_hostgroups_isnotempty = self.params.rejected_hostgroups ~= "" + if (not accepted_hostgroups_isnotempty and not rejected_hostgroups_isnotempty) or (accepted_hostgroups_isnotempty and rejected_hostgroups_isnotempty) then + return true + end + + -- return false if no hostgroups were found + if not self.event.cache.hostgroups then + if accepted_hostgroups_isnotempty then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups ..".") + return false + elseif rejected_hostgroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_hostgroup]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups ..".") + return true + end + end + + local accepted_hostgroup_name = self:find_hostgroup_in_list(self.params.accepted_hostgroups) + local rejected_hostgroup_name = self:find_hostgroup_in_list(self.params.rejected_hostgroups) + + -- return false if the host is not in a valid hostgroup + if accepted_hostgroups_isnotempty and not accepted_hostgroup_name then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not in an accepted hostgroup. Accepted hostgroups are: " .. self.params.accepted_hostgroups) + return false + elseif rejected_hostgroups_isnotempty and rejected_hostgroup_name then + self.sc_logger:warning("[sc_event:is_valid_hostgroup]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is in a rejected hostgroup. Rejected hostgroups are: " .. self.params.rejected_hostgroups) + return false + else + local debug_msg = "[sc_event:is_valid_hostgroup]: event for host with id: " .. tostring(self.event.host_id) + if accepted_hostgroups_isnotempty then + debug_msg = debug_msg .. " matched hostgroup: " .. tostring(accepted_hostgroup_name) + elseif rejected_hostgroups_isnotempty then + debug_msg = debug_msg .. " did not match hostgroup: " .. tostring(rejected_hostgroup_name) + end + self.sc_logger:debug(debug_msg) + end + + return true +end + +--- find_hostgroup_in_list: compare accepted hostgroups from parameters with the event hostgroups +-- @param hostgroups_list (string) a coma separated list of hostgroup name +-- @return hostgroup_name (string) the name of the first matching hostgroup +-- @return false (boolean) if no matching hostgroup has been found +function ScEvent:find_hostgroup_in_list(hostgroups_list) + if hostgroups_list == nil or hostgroups_list == "" then + return false + else + for _, hostgroup_name in ipairs(self.sc_common:split(hostgroups_list, ",")) do + for _, event_hostgroup in pairs(self.event.cache.hostgroups) do + if hostgroup_name == event_hostgroup.group_name then + return hostgroup_name + end + end + end + end + return false +end + +--- is_valid_servicegroup: check if the event is in an accepted servicegroup +-- @return true|false (boolean) +function ScEvent:is_valid_servicegroup() + self.event.cache.servicegroups = self.sc_broker:get_servicegroups(self.event.host_id, self.event.service_id) + + -- return true if options are not set or if both options are set + local accepted_servicegroups_isnotempty = self.params.accepted_servicegroups ~= "" + local rejected_servicegroups_isnotempty = self.params.rejected_servicegroups ~= "" + if (not accepted_servicegroups_isnotempty and not rejected_servicegroups_isnotempty) or (accepted_servicegroups_isnotempty and rejected_servicegroups_isnotempty) then + return true + end + + -- return false if no servicegroups were found + if not self.event.cache.servicegroups then + if accepted_servicegroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is not linked to a servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups ..".") + return false + elseif rejected_servicegroups_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: accepting event because service with id: " .. tostring(self.event.service_id) + .. " is not linked to a servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups ..".") + return true + end + end + + local accepted_servicegroup_name = self:find_servicegroup_in_list(self.params.accepted_servicegroups) + local rejected_servicegroup_name = self:find_servicegroup_in_list(self.params.rejected_servicegroups) + + -- return false if the service is not in a valid servicegroup + if accepted_servicegroups_isnotempty and not accepted_servicegroup_name then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is not in an accepted servicegroup. Accepted servicegroups are: " .. self.params.accepted_servicegroups) + return false + elseif rejected_servicegroups_isnotempty and rejected_servicegroup_name then + self.sc_logger:debug("[sc_event:is_valid_servicegroup]: dropping event because service with id: " .. tostring(self.event.service_id) + .. " is in an rejected servicegroup. Rejected servicegroups are: " .. self.params.rejected_servicegroups) + return false + end + + local debug_msg = "[sc_event:is_valid_servicegroup]: event for service with id: " .. tostring(self.event.service_id) + if accepted_servicegroups_isnotempty then + debug_msg = debug_msg .. " matched servicegroup: " .. tostring(accepted_servicegroup_name) + elseif rejected_servicegroups_isnotempty then + debug_msg = debug_msg .. " did not match servicegroup: " .. tostring(rejected_servicegroup_name) + end + self.sc_logger:debug(debug_msg) + + return true +end + +--- find_servicegroup_in_list: compare accepted servicegroups from parameters with the event servicegroups +-- @param servicegroups_list (string) a coma separated list of servicegroup name +-- @return servicegroup_name or false (string|boolean) the name of the first matching servicegroup if found or false if not found +function ScEvent:find_servicegroup_in_list(servicegroups_list) + if servicegroups_list == nil or servicegroups_list == "" then + return false + else + for _, servicegroup_name in ipairs(self.sc_common:split(servicegroups_list, ",")) do + for _, event_servicegroup in pairs(self.event.cache.servicegroups) do + if servicegroup_name == event_servicegroup.group_name then + return servicegroup_name + end + end + end + end + return false +end + +--- is_valid_bam_event: check if the event is an accepted bam type event +-- @return true|false (boolean) +function ScEvent:is_valid_bam_event() + -- return false if ba name is invalid or ba_id is nil + if not self:is_valid_ba() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " hasn't been validated") + return false + end + + -- return false if BA status is not accepted + if not self:is_valid_ba_status_event() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " has an invalid state") + return false + end + + -- return false if BA downtime state is not accepted + if not self:is_valid_ba_downtime_state() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in a validated downtime state") + return false + end + + -- DO NOTHING FOR THE MOMENT + if not self:is_valid_ba_acknowledge_state() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in a validated acknowledge state") + return false + end + + -- return false if BA is not in an accepted BV + if not self:is_valid_bv() then + self.sc_logger:warning("[sc_event:is_valid_bam_event]: ba_id: " .. tostring(self.event.ba_id) .. " is not in an accepted BV") + return false + end + + return true +end + +--- is_valid_ba: check if ba name and/or id are valid +-- @return true|false (boolean) +function ScEvent:is_valid_ba() + + -- return false if ba_id is nil + if (not self.event.ba_id and self.params.skip_nil_id == 1) then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) .. ". And skip nil id is set to: " .. tostring(self.params.skip_nil_id)) + return false + end + + self.event.cache.ba = self.sc_broker:get_ba_infos(self.event.ba_id) + + -- return false if we can't get ba name + if (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 1) then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA with id: " .. tostring(self.event.ba_id) + .. ". Found BA name is: " .. tostring(self.event.cache.ba.ba_name) .. ". And skip anon event param is set to: " .. tostring(self.params.skip_anon_events)) + return false + elseif (not self.event.cache.ba.ba_name and self.params.skip_anon_events == 0) then + self.event.cache.ba = { + ba_name = self.event.ba_id + } + end + + return true +end + +--- is_valid_ba_status_event: check if the ba status event is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_status_event() + if not self:is_valid_event_status(self.params.ba_status) then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA status for BA id: " .. tostring(self.event.ba_id) .. ". State is: " + .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.state]) .. ". Acceptes states are: " .. tostring(self.params.ba_status)) + return false + end + + return true +end + +--- is_valid_ba_downtime_state: check if the ba downtime state is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_downtime_state() + if not self.sc_common:compare_numbers(self.params.in_downtime, self.sc_common:boolean_to_number(self.event.in_downtime), ">=") then + self.sc_logger:warning("[sc_event:is_valid_ba]: Invalid BA downtime state for BA id: " .. tostring(self.event.ba_id) .. " downtime state is : " .. tostring(self.event.in_downtime) + .. " and accepted downtime state must be below or equal to: " .. tostring(self.params.in_downtime)) + return false + end + + return true +end + +--- is_valid_ba_acknowledge_state: check if the ba acknowledge state is an accepted one +-- @return true|false (boolean) +function ScEvent:is_valid_ba_acknowledge_state() + -- if not self.sc_common:compare_numbers(self.params.in_downtime, self.event.in_downtime, '>=') then + -- return false + -- end + + return true +end + +--- is_valid_bv: check if the event is in an accepted BV +-- @return true|false (boolean) +function ScEvent:is_valid_bv() + self.event.cache.bvs = self.sc_broker:get_bvs_infos(self.event.host_id) + + -- return true if options are not set or if both options are set + local accepted_bvs_isnotempty = self.params.accepted_bvs ~= "" + local rejected_bvs_isnotempty = self.params.rejected_bvs ~= "" + if (not accepted_bvs_isnotempty and not rejected_bvs_isnotempty) or (accepted_bvs_isnotempty and rejected_bvs_isnotempty) then + return true + end + + -- return false if no bvs were found + if not self.event.cache.bvs then + if accepted_bvs_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a BV. Accepted BVs are: " .. self.params.accepted_bvs ..".") + return false + elseif rejected_bvs_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_bv]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a BV. Rejected BVs are: " .. self.params.rejected_bvs ..".") + return true + end + end + + local accepted_bv_name = self:find_bv_in_list(self.params.accepted_bvs) + local rejected_bv_name = self:find_bv_in_list(self.params.rejected_bvs) + + -- return false if the BA is not in a valid BV + if accepted_bvs_isnotempty and not accepted_bv_name then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + .. " is not in an accepted BV. Accepted BVs are: " .. self.params.accepted_bvs) + return false + elseif rejected_bvs_isnotempty and rejected_bv_name then + self.sc_logger:debug("[sc_event:is_valid_bv]: dropping event because BA with id: " .. tostring(self.event.ba_id) + .. " is in a rejected BV. Rejected BVs are: " .. self.params.rejected_bvs) + return false + else + self.sc_logger:debug("[sc_event:is_valid_bv]: event for BA with id: " .. tostring(self.event.ba_id) + .. "matched BV: " .. accepted_bv_name) + end + + return true +end + +--- find_bv_in_list: compare accepted BVs from parameters with the event BVs +-- @param bvs_list (string) a coma separated list of BV name +-- @return bv_name (string) the name of the first matching BV +-- @return false (boolean) if no matching BV has been found +function ScEvent:find_bv_in_list(bvs_list) + if bvs_list == nil or bvs_list == "" then + return false + else + for _, bv_name in ipairs(self.sc_common:split(bvs_list,",")) do + for _, event_bv in pairs(self.event.cache.bvs) do + if bv_name == event_bv.bv_name then + return bv_name + end + end + end + end + return false +end + +--- is_valid_poller: check if the event is monitored from an accepted poller +-- @return true|false (boolean) +function ScEvent:is_valid_poller() + -- return false if instance id is not found in cache + if not self.event.cache.host.instance_id then + self.sc_logger:warning("[sc_event:is_valid_poller]: no instance ID found for host ID: " .. tostring(self.event.host_id)) + return false + end + + self.event.cache.poller = self.sc_broker:get_instance(self.event.cache.host.instance_id) + + -- required if we want to easily have access to poller name with macros {cache.instance.name} + self.event.cache.instance = { + id = self.event.cache.host.instance_id, + name = self.event.cache.poller + } + + -- return true if options are not set or if both options are set + local accepted_pollers_isnotempty = self.params.accepted_pollers ~= "" + local rejected_pollers_isnotempty = self.params.rejected_pollers ~= "" + if (not accepted_pollers_isnotempty and not rejected_pollers_isnotempty) or (accepted_pollers_isnotempty and rejected_pollers_isnotempty) then + return true + end + + -- return false if no poller found in cache + if not self.event.cache.poller then + if accepted_pollers_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to an accepted poller (no poller found in cache). Accepted pollers are: " .. self.params.accepted_pollers) + return false + elseif rejected_pollers_isnotempty then + self.sc_logger:debug("[sc_event:is_valid_poller]: accepting event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to a rejected poller (no poller found in cache). Rejected pollers are: " .. self.params.rejected_pollers) + return true + end + end + + local accepted_poller_name = self:find_poller_in_list(self.params.accepted_pollers) + local rejected_poller_name = self:find_poller_in_list(self.params.rejected_pollers) + + -- return false if the host is not monitored from a valid poller + if accepted_pollers_isnotempty and not accepted_poller_name then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is not linked to an accepted poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Accepted pollers are: " .. self.params.accepted_pollers) + return false + elseif rejected_pollers_isnotempty and rejected_poller_name then + self.sc_logger:debug("[sc_event:is_valid_poller]: dropping event because host with id: " .. tostring(self.event.host_id) + .. " is linked to a rejected poller. Host is monitored from: " .. tostring(self.event.cache.poller) .. ". Rejected pollers are: " .. self.params.rejected_pollers) + return false + else + self.sc_logger:debug("[sc_event:is_valid_poller]: event for host with id: " .. tostring(self.event.host_id) + .. "matched poller: " .. accepted_poller_name) + end + + return true +end + +--- find_poller_in_list: compare accepted pollers from parameters with the event poller +-- @param pollers_list (string) a coma separated list of poller name +-- @return poller_name or false (string|boolean) the name of the first matching poller if found or false if not found +function ScEvent:find_poller_in_list(pollers_list) + if pollers_list == nil or pollers_list == "" then + return false + else + for _, poller_name in ipairs(self.sc_common:split(pollers_list, ",")) do + if poller_name == self.event.cache.poller then + return poller_name + end + end + end + return false +end + +--- is_valid_host_severity: checks if the host severity is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_host_severity() + -- initiate the severity table in the cache if it doesn't exist + if not self.event.cache.severity then + self.event.cache.severity = {} + end + + -- get severity of the host from broker cache + self.event.cache.severity.host = self.sc_broker:get_severity(self.event.host_id) + + -- return true if there is no severity filter + if self.params.host_severity_threshold == nil then + return true + end + + + -- return false if host severity doesn't match + if not self.sc_common:compare_numbers(self.params.host_severity_threshold, self.event.cache.severity.host, self.params.host_severity_operator) then + self.sc_logger:debug("[sc_event:is_valid_host_severity]: dropping event because host with id: " .. tostring(self.event.host_id) .. " has an invalid severity. Severity is: " + .. tostring(self.event.cache.severity.host) .. ". host_severity_threshold (" .. tostring(self.params.host_severity_threshold) .. ") is " .. self.params.host_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.severity.host) .. ")") + return false + end + + return true +end + +--- is_valid_service_severity: checks if the service severity is accepted +-- @return true|false (boolean) +function ScEvent:is_valid_service_severity() + -- initiate the severity table in the cache if it doesn't exist + if not self.event.cache.severity then + self.event.cache.severity = {} + end + + -- get severity of the host from broker cache + self.event.cache.severity.service = self.sc_broker:get_severity(self.event.host_id, self.event.service_id) + + -- return true if there is no severity filter + if self.params.service_severity_threshold == nil then + return true + end + + + + -- return false if service severity doesn't match + if not self.sc_common:compare_numbers(self.params.service_severity_threshold, self.event.cache.severity.service, self.params.service_severity_operator) then + self.sc_logger:debug("[sc_event:is_valid_service_severity]: dropping event because service with id: " .. tostring(self.event.service_id) .. " has an invalid severity. Severity is: " + .. tostring(self.event.cache.severity.service) .. ". service_severity_threshold (" .. tostring(self.params.service_severity_threshold) .. ") is " .. self.params.service_severity_operator + .. " to the severity of the host (" .. tostring(self.event.cache.severity.service) .. ")") + return false + end + + return true +end + +---is_valid_acknowledgement_event: checks if the event is a valid acknowledge event +-- @return true|false (boolean) +function ScEvent:is_valid_acknowledgement_event() + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_acknowledge_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- check if ack author is valid + if not self:is_valid_author() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: acknowledgement on host: " .. tostring(self.event.host_id) + .. "and service: " .. tostring(self.event.service_id) .. "(0 means ack is on host) is not made by a valid author. Author is: " + .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self:is_valid_host_severity() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Host has not an accepted severity") + return false + end + + local event_status = "" + -- service_id = 0 means ack is on a host + if self.event.type == 0 then + -- use dedicated ack host status configuration or host_status configuration + event_status = self.sc_common:ifnil_or_empty(self.params.ack_host_status, self.params.host_status) + + -- return false if event status is not accepted + if not self:is_valid_event_status(event_status) then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.host_status.id][self.event.state])) + return false + end + -- service_id != 0 means ack is on a service + else + -- return false if we can't get service description of service id is nil + if not self:is_valid_service() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- use dedicated ack host status configuration or host_status configuration + event_status = self.sc_common:ifnil_or_empty(self.params.ack_service_status, self.params.service_status) + + -- return false if event status is not accepted + if not self:is_valid_event_status(event_status) then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.params.bbdo.elements.service_status.id][self.event.state])) + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_acknowledgement_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + return true +end + +--- is_vaid_downtime_event: check if the event is a valid downtime event +-- return true|false (boolean) +function ScEvent:is_valid_downtime_event() + -- return false if the event is one of all the "fake" start or end downtime event received from broker + if not self:is_downtime_event_useless() then + self.sc_logger:debug("[sc_event:is_valid_downtime_event]: dropping downtime event because it is not a start nor end of downtime event.") + return false + end + + -- return false if we can't get hostname or host id is nil + if not self:is_valid_host() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) .. " hasn't been validated") + return false + end + + -- check if downtime author is valid + if not self:is_valid_author() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: downtime with internal ID: " .. tostring(self.event.internal_id) + .. " is not made by a valid author. Author is: " .. tostring(self.event.author) .. " Accepted authors are: " .. self.params.accepted_authors) + return false + end + + -- return false if host is not monitored from an accepted poller + if not self:is_valid_poller() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- this is a host event + if self.event.type == 2 then + -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method + self.event.state = self:get_downtime_host_status() + + -- checks if the current host downtime state is an accpeted status + if not self:is_valid_event_status(self.params.dt_host_status) then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: host_id: " .. tostring(self.event.host_id) + .. " do not have a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) + .. " Accepted states are: " .. tostring(self.params.dt_host_status)) + return false + end + else + -- return false if we can't get service description or service id is nil + if not self:is_valid_service() then + self.sc_logger:warning("[sc_event:is_valid_service_status_event]: service with id: " .. tostring(self.event.service_id) .. " hasn't been validated") + return false + end + + -- store the result in the self.event.state because doing that allow us to use the is_valid_event_status method + self.event.state = self:get_downtime_service_status() + + -- return false if event status is not accepted + if not self:is_valid_event_status(self.params.dt_service_status) then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service with id: " .. tostring(self.event.service_id) + .. " hasn't a validated status. Status: " .. tostring(self.params.status_mapping[self.event.category][self.event.element][self.event.type][self.event.state]) + .. " Accepted states are: " .. tostring(self.params.dt_service_status)) + return false + end + + -- return false if service has not an accepted severity + if not self:is_valid_service_severity() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service id: " .. tostring(self.event.service_id) + .. ". host_id: " .. tostring(self.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if service is not in an accepted servicegroup + if not self:is_valid_servicegroup() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) .. " is not in an accepted servicegroup") + return false + end + end + + -- return false if host is not in an accepted hostgroup + if not self:is_valid_hostgroup() then + self.sc_logger:warning("[sc_event:is_valid_downtime_event]: service_id: " .. tostring(self.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.event.host_id)) + return false + end + + return true +end + +--- is_valid_author: check if the author of a comment is valid based on contact alias in Centreon +-- return true|false (boolean) +function ScEvent:is_valid_author() + -- return true if options are not set or if both options are set + local accepted_authors_isnotempty = self.params.accepted_authors ~= "" + local rejected_authors_isnotempty = self.params.rejected_authors ~= "" + if (not accepted_authors_isnotempty and not rejected_authors_isnotempty) or (accepted_authors_isnotempty and rejected_authors_isnotempty) then + return true + end + + -- check if author is accepted + local accepted_author_name = self:find_author_in_list(self.params.accepted_authors) + local rejected_author_name = self:find_author_in_list(self.params.rejected_authors) + if accepted_authors_isnotempty and not accepted_author_name then + self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) + .. " is not in an accepted authors list. Accepted authors are: " .. self.params.accepted_authors) + return false + elseif rejected_authors_isnotempty and rejected_author_name then + self.sc_logger:debug("[sc_event:is_valid_author]: dropping event because author: " .. tostring(self.event.author) + .. " is in a rejected authors list. Rejected authors are: " .. self.params.rejected_authors) + return false + end + + return true +end + +--- find_author_in_list: compare accepted authors from parameters with the event author +-- @param authors_list (string) a coma separeted list of author name +-- @return accepted_alias or false (string|boolean) the alias of the first matching author if found or false if not found +function ScEvent:find_author_in_list(authors_list) + if authors_list == nil or authors_list == "" then + return false + else + for _, author_alias in ipairs(self.sc_common:split(authors_list, ",")) do + if author_alias == self.event.author then + return author_alias + end + end + end + return false +end + +--- get_downtime_host_status: retrieve the status of a host based on last_time_up/down dates found in cache (self.event.cache.host must be set) +-- return status (number) the status code of the host +function ScEvent:get_downtime_host_status() + -- if cache is not filled we can't get the state of the host + if not self.event.cache.host.last_time_up or not self.event.cache.host.last_time_down then + return "N/A" + end + + -- affect the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.host.last_time_up), + [1] = tonumber(self.event.cache.host.last_time_down) + } + + return self:get_most_recent_status_code(timestamp) +end + +--- get_downtime_service_status: retrieve the status of a service based on last_time_ok/warning/critical/unknown dates found in cache (self.event.cache.host must be set) +-- return status (number) the status code of the service +function ScEvent:get_downtime_service_status() + -- if cache is not filled we can't get the state of the service + if + not self.event.cache.service.last_time_ok + or not self.event.cache.service.last_time_warning + or not self.event.cache.service.last_time_critical + or not self.event.cache.service.last_time_unknown + then + return "N/A" + end + + -- affect the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_ok), + [1] = tonumber(self.event.cache.service.last_time_warning), + [2] = tonumber(self.event.cache.service.last_time_critical), + [3] = tonumber(self.event.cache.service.last_time_unknown) + } + + return self:get_most_recent_status_code(timestamp) +end + +--- get_most_recent_status_code: retrieve the last status code from a list of status and timestamp +-- @param timestamp (table) a table with the association of the last known timestamp of a status and its corresponding status code +-- @return status (number) the most recent status code of the object +function ScEvent:get_most_recent_status_code(timestamp) + + -- prepare the table in wich the latest known status timestamp and status code will be stored + local status_info = { + highest_timestamp = 0, + status = nil + } + + -- compare all status timestamp and keep the most recent one and the corresponding status code + for status_code, status_timestamp in ipairs(timestamp) do + if status_timestamp > status_info.highest_timestamp then + status_info.highest_timestamp = status_timestamp + status_info.status = status_code + end + end + + return status_info.status +end + +--- is_service_status_event_duplicated: check if the service event is the same than the last one (will not work for OK(H) -> CRITICAL(S) -> OK(H)) +-- @return true|false (boolean) +function ScEvent:is_service_status_event_duplicated() + -- return false if option is not activated + if self.params.enable_service_status_dedup ~= 1 then + self.sc_logger:debug("[sc_event:is_service_status_event_duplicated]: service status is not enabled option enable_service_status_dedup is set to: " .. tostring(self.params.enable_service_status_dedup)) + return false + end + + -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event + if self.event.last_hard_state_change == self.event.last_check or self.event.last_hard_state_change == self.event.last_update then + return false + end + + return true + --[[ + IT LOOKS LIKE THIS PIECE OF CODE IS USELESS + + -- map the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_ok), + [1] = tonumber(self.event.cache.service.last_time_warning), + [2] = tonumber(self.event.cache.service.last_time_critical), + [3] = tonumber(self.event.cache.service.last_time_unknown) + } + + -- if we find a last time status older than the last_hard_state_change then we are not facing a duplicated event + for status_code, status_timestamp in ipairs(timestamp) do + -- of course it needs to be a different status code than the actual one + if status_code ~= self.event.state and status_timestamp >= self.event.last_hard_state_change then + return false + end + end + + -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: + -- OK(H) --> NOT-OK(S) --> OK(H) + ]]-- +end + +--- is_host_status_event_duplicated: check if the host event is the same than the last one (will not work for UP(H) -> DOWN(S) -> UP(H)) +-- @return true|false (boolean) +function ScEvent:is_host_status_event_duplicated() + -- return false if option is not activated + if self.params.enable_host_status_dedup ~= 1 then + self.sc_logger:debug("[sc_event:is_host_status_event_duplicated]: host status is not enabled option enable_host_status_dedup is set to: " .. tostring(self.params.enable_host_status_dedup)) + return false + end + + -- if last check is the same than last_hard_state_change, it means the event just change its status so it cannot be a duplicated event + if self.event.last_hard_state_change == self.event.last_check or self.event.last_hard_state_change == self.event.last_update then + return false + end + + return true + --[[ + IT LOOKS LIKE THIS PIECE OF CODE IS USELESS + -- map the status known dates to their respective status code + local timestamp = { + [0] = tonumber(self.event.cache.service.last_time_up), + [1] = tonumber(self.event.cache.service.last_time_down), + [2] = tonumber(self.event.cache.service.last_time_unreachable), + } + + -- if we find a last time status older than the last_hard_state_change then we are not facing a duplicated event + for status_code, status_timestamp in ipairs(timestamp) do + -- of course it needs to be a different status code than the actual one + if status_code ~= self.event.state and status_timestamp >= self.event.last_hard_state_change then + return false + end + end + + -- at the end, it only remains two cases, the first one is a duplicated event. The second one is when we have: + -- UP(H) --> NOT-UP(S) --> UP(H) + ]]-- +end + + +--- is_downtime_event_useless: the purpose of this method is to filter out unnecessary downtime event. It appears that broker +-- is sending many downtime events before sending the one we want +-- @return true|false (boolean) +function ScEvent:is_downtime_event_useless() + -- return false if downtime event is not a valid start of downtime event + if self:is_valid_downtime_event_start() then + return true + end + + -- return false if downtime event is not a valid end of downtime event + if self:is_valid_downtime_event_end() then + return true + end + + return false +end + +--- is_valid_downtime_event_start: make sure that the event is the one notifying us that a downtime has just started +-- @return true|false (boolean) +function ScEvent:is_valid_downtime_event_start() + -- event is about the end of the downtime (actual_end_time key is not present in a start downtime bbdo2 event) + -- with bbdo3 value is set to -1 + if (self.bbdo_version > 2 and self.event.actual_end_time ~= -1) or (self.bbdo_version == 2 and self.event.actual_end_time) then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_end_time found in the downtime event and value equal to -1 or bbdo v2 in use. It can't be a downtime start event") + return false + end + + -- event hasn't actually started until the actual_start_time key is present in the start downtime bbdo 2 event + -- with bbdo3 donwtime is not started until value is a valid timestamp + if (not self.event.actual_start_time and self.bbdo_version == 2) or (self.event.actual_start_time == -1 and self.bbdo_version > 2) then + self.sc_logger:debug("[sc_event:is_valid_downtime_event_start]: actual_start_time not found in the downtime event (or value set to -1). The downtime hasn't yet started") + return false + end + + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.internal_id and self.event.id) then + self.event.internal_id = self.event.id + end + + if (not self.event.id and self.event.internal_id) then + self.event.id = self.event.internal_id + end + -- end compat patch + + return true +end + +--- is_valid_downtime_event_end: make sure that the event is the one notifying us that a downtime has just ended +-- @return true|false (boolean) +function ScEvent:is_valid_downtime_event_end() + -- event is about the end of the downtime (deletion_time key is only present in a end downtime event) + if (self.bbdo_version == 2 and self.event.deletion_time) or (self.bbdo_version > 2 and self.event.deletion_time ~= -1) then + -- start compat patch bbdo2 => bbdo 3 + if (not self.event.internal_id and self.event.id) then + self.event.internal_id = self.event.id + end + + if (not self.event.id and self.event.internal_id) then + self.event.id = self.event.internal_id + end + -- end compat patch + + return true + end + + -- any other downtime event is not about the actual end of a downtime so we return false + self.sc_logger:debug("[sc_event:is_valid_downtime_event_end]: deletion_time not found in the downtime event or equal to -1. The downtime event is not about the end of a downtime") + return false +end + +--- build_outputs: adds short_output and long_output entries in the event table. output entry will be equal to one or another depending on the use_longoutput param +function ScEvent:build_outputs() + -- build long output + if self.event.long_output and self.event.long_output ~= "" then + self.event.long_output = self.event.output .. "\n" .. self.event.long_output + else + self.event.long_output = self.event.output + end + + -- no short output if there is no line break + local short_output = string.match(self.event.output, "^(.*)\n") + if short_output then + self.event.short_output = short_output + else + self.event.short_output = self.event.output + end + + -- use short output if it exists + if self.params.use_long_output == 0 and short_output then + self.event.output = short_output + + -- replace line break if asked to and we are not already using a short output + elseif not short_output and self.params.remove_line_break_in_output == 1 then + self.event.output = string.gsub(self.event.output, "\n", self.params.output_line_break_replacement_character) + end + + if self.params.output_size_limit ~= "" then + self.event.output = string.sub(self.event.output, 1, self.params.output_size_limit) + self.event.short_output = string.sub(self.event.short_output, 1, self.params.output_size_limit) + end + +end + +--- is_valid_storage: DEPRECATED method, use NEB category to get metric data instead +-- @return true (boolean) +function ScEvent:is_valid_storage_event() + return true +end + +return sc_event diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua new file mode 100644 index 00000000000..223bf2aae17 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_flush.lua @@ -0,0 +1,222 @@ +#!/usr/bin/lua + +--- +-- Module that handles data queue for stream connectors +-- @module sc_flush +-- @alias sc_flush +local sc_flush = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local ScFlush = {} + +--- sc_flush.new: sc_flush constructor +-- @param params (table) the params table of the stream connector +-- @param [opt] sc_logger (object) a sc_logger object +function sc_flush.new(params, logger) + local self = {} + + -- create a default logger if it is not provided + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.sc_common = sc_common.new(self.sc_logger) + + self.params = params + self.last_global_flush = os.time() + + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + self.queues = { + [categories.neb.id] = {}, + [categories.storage.id] = {}, + [categories.bam.id] = {}, + global_queues_metadata = {} + } + + -- link events queues to their respective categories and elements + for element_name, element_info in pairs(self.params.accepted_elements_info) do + self.queues[element_info.category_id][element_info.element_id] = { + events = {}, + queue_metadata = { + category_id = element_info.category_id, + element_id = element_info.element_id + } + } + end + + setmetatable(self, { __index = ScFlush }) + return self +end + +--- add_queue_metadata: add specific metadata to a queue +-- @param category_id (number) the id of the bbdo category +-- @param element_id (number) the id of the bbdo element +-- @param metadata (table) a table with keys that are the name of the metadata and values the metadata values +function ScFlush:add_queue_metadata(category_id, element_id, metadata) + if not self.queues[category_id] then + self.sc_logger:warning("[ScFlush:add_queue_metadata]: can't add queue metadata for category: " .. self.params.reverse_category_mapping[category_id] + .. " (id: " .. category_id .. ") and element: " .. self.params.reverse_element_mapping[category_id][element_id] .. " (id: " .. element_id .. ")." + .. ". metadata name: " .. tostring(metadata_name) .. ", metadata value: " .. tostring(metadata_value) + .. ". You need to accept this category with the parameter 'accepted_categories'.") + return + end + + if not self.queues[category_id][element_id] then + self.sc_logger:warning("[ScFlush:add_queue_metadata]: can't add queue metadata for category: " .. self.params.reverse_category_mapping[category_id] + .. " (id: " .. category_id .. ") and element: " .. self.params.reverse_element_mapping[category_id][element_id] .. " (id: " .. element_id .. ")." + .. ". metadata name: " .. tostring(metadata_name) .. ", metadata value: " .. tostring(metadata_value) + .. ". You need to accept this element with the parameter 'accepted_elements'.") + return + end + + for metadata_name, metadata_value in pairs(metadata) do + self.queues[category_id][element_id].queue_metadata[metadata_name] = metadata_value + end +end + +--- flush_all_queues: tries to flush all queues according to accepted elements +-- @param build_payload_method (function) the function from the stream connector that will concatenate events in the payload +-- @param send_method (function) the function from the stream connector that will send the data to the wanted tool +-- @return boolean (boolean) if flush failed or not +function ScFlush:flush_all_queues(build_payload_method, send_method) + if self.params.send_mixed_events == 1 then + if not self:flush_mixed_payload(build_payload_method, send_method) then + return false + end + else + if not self:flush_homogeneous_payload(build_payload_method, send_method) then + return false + end + end + + self:reset_all_queues() + return true +end + +--- reset_all_queues: put all queues back to their initial state after flushing their events +function ScFlush:reset_all_queues() + for _, element_info in pairs(self.params.accepted_elements_info) do + self.queues[element_info.category_id][element_info.element_id].events = {} + end + + self.last_global_flush = os.time() +end + +--- get_queues_size: get the number of events stored in all the queues +-- @return queues_size (number) the number of events stored in all queues +function ScFlush:get_queues_size() + local queues_size = 0 + + for _, element_info in pairs(self.params.accepted_elements_info) do + queues_size = queues_size + #self.queues[element_info.category_id][element_info.element_id].events + self.sc_logger:debug("[sc_flush:get_queues_size]: size of queue for category " .. tostring(element_info.category_name) + .. " and element: " .. tostring(element_info.element_name) + .. " is: " .. tostring(#self.queues[element_info.category_id][element_info.element_id].events)) + end + + return queues_size +end + +--- flush_mixed_payload: flush a payload that contains various type of events (services mixed hosts for example) +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_mixed_payload(build_payload_method, send_method) + local payload = nil + local counter = 0 + + -- get all queues + for _, element_info in pairs(self.params.accepted_elements_info) do + -- get events from queues + for _, event in ipairs(self.queues[element_info.category_id][element_info.element_id].events) do + -- add event to the payload + payload = build_payload_method(payload, event) + counter = counter + 1 + + -- send events if max buffer size is reached + if counter >= self.params.max_buffer_size then + if not self:flush_payload(send_method, payload, self.queues.global_queues_metadata) then + return false + end + + -- reset payload and counter because events have been sent + payload = nil + counter = 0 + end + end + end + + -- we need to empty all queues to not mess with broker retention + if not self:flush_payload(send_method, payload, self.queues.global_queues_metadata) then + return false + end + + -- all events have been sent + return true +end + +--- flush_homogeneous_payload: flush a payload that contains a single type of events (services with services only and hosts with hosts only for example) +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_homogeneous_payload(build_payload_method, send_method) + local counter = 0 + local payload = nil + + -- get all queues + for _, element_info in pairs(self.params.accepted_elements_info) do + -- get events from queues + for _, event in ipairs(self.queues[element_info.category_id][element_info.element_id].events) do + -- add event to the payload + payload = build_payload_method(payload, event) + counter = counter + 1 + + -- send events if max buffer size is reached + if counter >= self.params.max_buffer_size then + if not self:flush_payload( + send_method, + payload, + self.queues[element_info.category_id][element_info.element_id].queue_metadata + ) then + return false + end + + -- reset payload and counter because events have been sent + counter = 0 + payload = nil + end + end + + -- make sure there are no events left inside a specific queue + if not self:flush_payload( + send_method, + payload, + self.queues[element_info.category_id][element_info.element_id].queue_metadata + ) then + return false + end + + -- reset payload to not mix events from different queues + payload = nil + end + + return true +end + +--- flush_payload: flush a given payload by sending it using the given send function +-- @param send_method (function) the function that will be used to send the payload +-- @param payload (any) the data that needs to be sent +-- @param metadata (table) all metadata for the payload +-- @return boolean (boolean) true or false depending on the success of the operation +function ScFlush:flush_payload(send_method, payload, metadata) + if payload then + if not send_method(payload, metadata) then + return false + end + end + + return true +end + +return sc_flush \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua new file mode 100644 index 00000000000..a41f7320eec --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_logger.lua @@ -0,0 +1,158 @@ +#!/usr/bin/lua + +--- +-- Logging module for centreon stream connectors +-- @module sc_logger +-- @alias sc_logger + +local sc_logger = {} + +--- build_message: prepare log message +-- @param severity (string) the severity of the message (WARNING, CRITIAL...) +-- @param message (string) the log message +-- @return ouput (string) the formated log message +local function build_message(severity, message) + local date = os.date("%a %b %d %H:%M:%S %Y") + local output = date .. ": " .. severity .. ": " .. message .. "\n" + + return output +end + + +--- write_message: write a message in a file +-- @param message (string) the message to write +-- @param logfile (string) the file in which the message will be written +local function write_message(message, logfile) + local file = io.open(logfile, "a") + io.output(file) + io.write(message) + io.close(file) +end + +--- file_logging: log message in a file +-- @param message (string) the message that need to be written +-- @param severity (string) the severity of the log +-- @param logfile (string) the ouput file +local function file_logging(message, severity, logfile) + write_message(build_message(severity, message), logfile) +end + +local ScLogger = {} + +--- sc_logger.new: sc_logger constructor +-- @param [opt] logfile (string) output file for logs +-- @param [opt] severity (integer) the accepted severity level +function sc_logger.new(logfile, severity) + local self = {} + self.severity = severity + + if type(severity) ~= "number" then + self.severity = 1 + end + + self.logfile = logfile or "/var/log/centreon-broker/stream-connector.log" + broker_log:set_parameters(self.severity, self.logfile) + + setmetatable(self, { __index = ScLogger }) + + return self +end + +--- error: write an error message +-- @param message (string) the message that will be written +function ScLogger:error(message) + broker_log:error(1, message) +end + +--- warning: write a warning message +-- @param message (string) the message that will be written +function ScLogger:warning(message) + broker_log:warning(2, message) +end + +--- notice: write a notice message +-- @param message (string) the message that will be written +function ScLogger:notice(message) + broker_log:info(1, message) +end + +-- info: write an informational message +-- @param message (string) the message that will be written +function ScLogger:info(message) + broker_log:info(2,message) +end + +--- debug: write a debug message +-- @param message (string) the message that will be written +function ScLogger:debug(message) + broker_log:info(3, message) +end + +--- log_curl_command: build a shell curl command based on given parameters and write it in the logfile +-- @param url (string) the url to which curl will send data +-- @param metadata (table) a table that contains headers information and http method for curl +-- @param params (table) the stream connector params table +-- @param data (string) [opt] the data that must be send by curl +-- @param basic_auth (table) [opt] a table that contains the username and the password if using basic auth ({"username" = username, "password" = password}) +function ScLogger:log_curl_command(url, metadata, params, data, basic_auth) + if params.log_curl_commands == 1 then + self:debug("[sc_logger:log_curl_command]: starting computing curl command") + local curl_string = "curl" + + -- handle proxy + self:debug("[sc_looger:log_curl_command]: proxy information: protocol: " .. params.proxy_protocol .. ", address: " + .. params.proxy_address .. ", port: " .. params.proxy_port .. ", user: " .. params.proxy_username .. ", password: " + .. tostring(params.proxy_password)) + local proxy_url + + if params.proxy_address ~= "" then + if params.proxy_username ~= "" then + proxy_url = params.proxy_protocol .. "://" .. params.proxy_username .. ":" .. params.proxy_password + .. "@" .. params.proxy_address .. ":" .. params.proxy_port + else + proxy_url = params.proxy_protocol .. "://" .. params.proxy_address .. ":" .. params.proxy_port + end + + curl_string = curl_string .. " --proxy '" .. proxy_url .. "'" + end + + -- handle certificate verification + if params.allow_insecure_connection == 1 then + curl_string = curl_string .. " -k" + end + + -- handle http method + if metadata.method then + curl_string = curl_string .. " -X " .. metadata.method + elseif data then + curl_string = curl_string .. " -X POST" + else + curl_string = curl_string .. " -X GET" + end + + -- handle headers + if metadata.headers then + for _, header in ipairs(metadata.headers) do + curl_string = curl_string .. " -H '" .. tostring(header) .. "'" + end + end + + curl_string = curl_string .. " '" .. tostring(url) .. "'" + + -- handle curl data + if data and data ~= "" then + curl_string = curl_string .. " -d '" .. data .. "'" + end + + -- handle http basic auth + if basic_auth then + curl_string = curl_string .. " -u '" .. basic_auth.username .. ":" .. basic_auth.password .. "'" + end + + self:notice("[sc_logger:log_curl_command]: " .. curl_string) + else + self:debug("[sc_logger:log_curl_command]: curl command not logged because log_curl_commands param is set to: " .. params.log_curl_commands) + end +end + +return sc_logger \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua new file mode 100644 index 00000000000..ebcaf51aa30 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_macros.lua @@ -0,0 +1,582 @@ +#!/usr/bin/lua + +--- +-- Module to handle centreon macros (e.g: $HOSTADDRESS$) and sc macros (e.g: {cache.host.address}) +-- @module sc_macros +-- @alias sc_macros +local sc_macros = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local ScMacros = {} + +--- sc_macros.new: sc_macros constructor +-- @param params (table) the stream connector parameter table +-- @param logger (object) object instance from sc_logger module +-- @param common (object) object instance from sc_common module +function sc_macros.new(params, logger, common) + local self = {} + + -- initiate mandatory libs + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.sc_common = common + if not self.sc_common then + self.sc_common = sc_common.new(self.sc_logger) + end + + -- initiate params + self.params = params + + -- mapping to help get "group" type macros value + self.group_macro_conversion = { + hg = function(event, format, regex) return self:get_hg_macro(event, format, regex) end, + sg = function(event, format, regex) return self:get_sg_macro(event, format, regex) end, + bv = function(event, format, regex) return self:get_bv_macro(event, format, regex) end + } + + -- mapping to help transform group macro values into a specific format + self.group_macro_format = { + table = function(data) return self:group_macro_format_table(data) end, + inline = function(data) return self:group_macro_format_inline(data) end + } + + -- mapping of macro that we will convert if asked + self.transform_macro = { + date = function (macro_value) return self:transform_date(macro_value) end, + type = function (macro_value) return self:transform_type(macro_value) end, + short = function (macro_value) return self:transform_short(macro_value) end, + state = function (macro_value, event) return self:transform_state(macro_value, event) end, + number = function (macro_value) return self:transform_number(macro_value) end, + string = function (macro_value) return self:transform_string(macro_value) end + } + + -- mapping of centreon standard macros to their stream connectors counterparts + self.centreon_macros = { + HOSTNAME = "{cache.host.name}", + HOSTDISPLAYNAME = "{cache.host.name}", + HOSTALIAS = "{cache.host.alias}", + HOSTADDRESS = "{cache.host.address}", + HOSTSTATE = "{cache.host.state_scstate}", + HOSTSTATEID = "{cache.host.state}", + LASTHOSTSTATE = "{cache.host.state_scstate}", + LASTHOSTSTATEID = "{cache.host.state}", + HOSTSTATETYPE = "{cache.host.state_type}", + HOSTATTEMPTS = "{cache.host.check_attempt}", + MAXHOSTATTEMPTS = "{cache.host.max_check_attempts}", + -- HOSTEVENTID doesn't exist + -- LASTHOSTEVENTID doesn't exist + -- HOSTPROBLEMID doesn't exist + -- LASTHOSTPROBLEMID doesn't exist + HOSTLATENCY = "{cache.host.latency}", + HOSTEXECUTIONTIME = "{cache.host.execution_time}", + -- HOSTDURATION doesn't exist + -- HOSTDURATIONSEC doesn't exist + HOSTDOWNTIME = "{cache.host.scheduled_downtime_depth}", + HOSTPERCENTCHANGE = "{percent_state_change}" , -- will be replaced by the service percent_state_change if event is about a service + -- HOSTGROUPNAME doesn't exist + -- HOSTGROUPNAMES doesn't exist + LASTHOSTCHECK = "{cache.host.last_check_value}", + LASTHOSTSTATECHANGE = "{cache.host.last_state_change}", + LASTHOSTUP = "{cache.host.last_time_up}", + LASTHOSTDOWN = "{cache.host.last_time_down}", + LASTHOSTUNREACHABLE = "{cache.host.last_time_unreachable}", + HOSTOUTPUT = "{cache.host.output_scshort}", + HOSTLONGOUTPUT = "{cache.host.output}", + HOSTPERFDATA = "{cache.host.perfdata}", + -- HOSTCHECKCOMMAND doesn't really exist + -- HOSTACKAUTHORS doesn't exist + -- HOSTACKAUTHORNAMES doesn't exist + -- HOSTACKAUTHORALIAS doesn't exist + -- HOSTACKAUTHORCOMMENT doesn't exist + HOSTACTIONURL = "{cache.host.action_url}", + HOSTNOTESURL = "{cache.host.notes_url}", + HOSTNOTES = "{cache.host.notes}", + -- TOTALHOSTSERVICES doesn't exist + -- TOTALHOSTSERVICESOK doesn't exist + -- TOTALHOSTSERVICESWARNING doesn't exist + -- TOTALHOSTSERVICESCRITICAL doesn't exist + -- TOTALHOSTSERVICESUNKNOWN doesn't exist + -- HOSTGROUPALIAS doesn't exist + -- HOSTGROUPMEMBERS doesn't exist + -- HOSTGROUPNOTES  doesn't exist + -- HOSTGROUPNOTESURL doesn't exist + -- HOSTGROUPACTIONURL doesn't exist + SERVICEDESC = "{cache.service.description}", + SERVICEDISPLAYNAME = "{cache.service.display_name}", + SERVICESTATE = "{cache.service.state_scstate}", + SERVICESTATEID = "{cache.service.state}", + LASTSERVICESTATE = "{cache.service.state_state}", + LASTSERVICESTATEID = "{cache.service.state}", + SERVICESTATETYPE = "{cache.service.state_type}", + SERVICEATTEMPT = "{cache.service.check_attempt}", + MAXSERVICEATTEMPTS = "{cache.service.max_check_attempts}", + SERVICEISVOLATILE = "{cache.service.volatile}", + -- SERVICEEVENTID doesn't exist + -- LASTSERVICEEVENTID doesn't exist + -- SERVICEPROBLEMID doesn't exist + -- LASTSERVICEPROBLEMID doesn't exist + SERVICELATENCY = "{cache.service.latency}", + SERVICEEXECUTIONTIME = "{cache.service.execution_time}", + -- SERVICEDURATION doesn't exist + -- SERVICEDURATIONSEC doesn't exist + SERVICEDOWNTIME = "{cache.service.scheduled_downtime_depth}", + SERVICEPERCENTCHANGE = "{percent_state_change}", + -- SERVICEGROUPNAME doesn't exist + -- SERVICEGROUPNAMES doesn't exist + LASTSERVICECHECK = "{cache.service.last_check_value}", + LASTSERVICESTATECHANGE = "{cache.service.last_state_change}", + LASTSERVICEOK = "{cache.service.last_time_ok}", + LASTSERVICEWARNING = "{cache.service.last_time_warning}", + LASTSERVICEUNKNOWN = "{cache.service.last_time_unknown}", + LASTSERVICECRITICAL = "{cache.service.last_time_critical}", + SERVICEOUTPUT = "{cache.service.output_scshort}", + LONGSERVICEOUTPUT = "{cache.service.output}", + SERVICEPERFDATA = "{cache.service.perfdata}", + -- SERVICECHECKCOMMAND doesn't exist + -- SERVICEACKAUTHOR doesn't exist + -- SERVICEACKAUTHORNAME  doesn't exist + -- SERVICEACKAUTHORALIAS doesn't exist + -- SERVICEACKCOMMENT doesn't exist + SERVICEACTIONURL = "{cache.service.action_url}", + SERVICENOTESURL = "{cache.service.notes_url}", + SERVICENOTES = "{cache.service.notes}" + -- SERVICEGROUPALIAS  doesn't exist + -- SERVICEGROUPMEMBERS  doesn't exist + -- SERVICEGROUPNOTES  doesn't exist + -- SERVICEGROUPNOTESURL doesn't exist + -- SERVICEGROUPACTIONURL doesn't exist + -- CONTACTNAME doesn't exist + -- CONTACTALIAS doesn't exist + -- CONTACTEMAIL doesn't exist + -- CONTACTPAGER doesn't exist + -- CONTACTADDRESS doesn't exist + -- CONTACTGROUPALIAS  doesn't exist + -- CONTACTGROUPMEMBERS  doesn't exist + -- TOTALHOSTSUP  doesn't exist + -- TOTALHOSTSDOWN  doesn't exist + -- TOTALHOSTSUNREACHABLE  doesn't exist + -- TOTALHOSTSDOWNUNHANDLED  doesn't exist + -- TOTALHOSTSUNREACHABLEUNHANDLED  doesn't exist + -- TOTALHOSTPROBLEMS  doesn't exist + -- TOTALHOSTPROBLEMSUNHANDLED  doesn't exist + -- TOTALSERVICESOK  doesn't exist + -- TOTALSERVICESWARNING  doesn't exist + -- TOTALSERVICESCRITICAL  doesn't exist + -- TOTALSERVICESUNKNOWN  doesn't exist + -- TOTALSERVICESWARNINGUNHANDLED  doesn't exist + -- TOTALSERVICESCRITICALUNHANDLED  doesn't exist + -- TOTALSERVICESUNKNOWNUNHANDLED  doesn't exist + -- TOTALSERVICEPROBLEMS  doesn't exist + -- TOTALSERVICEPROBLEMSUNHANDLED  doesn't exist + -- NOTIFICATIONTYPE doesn't exist + -- NOTIFICATIONRECIPIENTS doesn't exist + -- NOTIFICATIONISESCALATED doesn't exist + -- NOTIFICATIONAUTHOR doesn't exist + -- NOTIFICATIONAUTHORNAME doesn't exist + -- NOTIFICATIONAUTHORALIAS doesn't exist + -- NOTIFICATIONCOMMENT doesn't exist + -- HOSTNOTIFICATIONNUMBER doesn't exist + -- HOSTNOTIFICATIONID doesn't exist + -- SERVICENOTIFICATIONNUMBER doesn't exist + -- SERVICENOTIFICATIONID doesn't exist + } + + setmetatable(self, { __index = ScMacros }) + return self +end + +--- replace_sc_macro: replace any stream connector macro with it's value +-- @param string (string) the string in which there might be some stream connector macros to replace +-- @param event (table) the current event table +-- @param json_string (boolean) +-- @return converted_string (string) the input string but with the macro replaced with their json escaped values +function ScMacros:replace_sc_macro(string, event, json_string) + local cache_macro_value = false + local event_macro_value = false + local group_macro_value = false + local format = false + local converted_string = string + + -- find all macros for exemple the string: + -- {cache.host.name} is the name of host with id: {host_id} + -- will generate two macros {cache.host.name} and {host_id}) + for macro in string.gmatch(string, "{[%w_.%(%),%%%+%-%*%?%[%]%^%$]+}") do + self.sc_logger:debug("[sc_macros:replace_sc_macro]: found a macro, name is: " .. tostring(macro)) + + -- check if macro is in the cache + cache_macro_value = self:get_cache_macro(macro, event) + + -- replace all cache macro such as {cache.host.name} with their values + if cache_macro_value then + converted_string = self:build_converted_string_for_cache_and_event_macro(cache_macro_value, macro, converted_string) + else + -- if not in cache, try to find a matching value in the event itself + event_macro_value = self:get_event_macro(macro, event) + + -- replace all event macro such as {host_id} with their values + if event_macro_value then + converted_string = self:build_converted_string_for_cache_and_event_macro(event_macro_value, macro, converted_string) + else + -- if not event or cache macro, maybe it is a group macro + group_macro_value, format = self:get_group_macro(macro, event) + + -- replace all group macro such as {group(hg,table)} with their values + if group_macro_value then + group_macro_value = broker.json_encode(group_macro_value) + macro = self.sc_common:lua_regex_escape(macro) + + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a group macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(group_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string) + .. ". Applied format is: " .. tostring(format)) + + if string.match(converted_string, '"' .. macro .. '"') then + converted_string = string.gsub(converted_string, '"' .. macro .. '"', group_macro_value) + else + converted_string = string.gsub(converted_string, "(.*)" .. macro .. "(.*)", "%1" .. self.sc_common:json_escape(self.sc_common:trim(group_macro_value, '"')) .. "%2") + end + else + self.sc_logger:error("[sc_macros:replace_sc_macro]: macro: " .. tostring(macro) .. ", is not a valid stream connector macro or we didn't find a value for it" + .. ". For example a {cache.severity.service} macro that is perfectly valid but the service has no severity") + end + end + end + end + + -- the input string was a json, we decode the result + if json_string then + local decoded_json, error = broker.json_decode(converted_string) + + if error then + self.sc_logger:error("[sc_macros:replace_sc_macro]: couldn't decode json string: " .. tostring(converted_string) + .. ". Error is: " .. tostring(error)) + return converted_string + end + + self.sc_logger:debug("[sc_macros:replace_sc_macro]: decoded json: " .. self.sc_common:dumper(decoded_json)) + return decoded_json + end + + self.sc_logger:debug("[sc_macros:replace_sc_macro]: converted string: " .. tostring(converted_string)) + return converted_string +end + +--- get_cache_macro: check if the macro is a macro which value must be found in the cache +-- @param macro (string) the macro we want to check (for example: {cache.host.name}) +-- @param event (table) the event table (obivously, cache must be in the event table if we want to find something in it) +-- @return false (boolean) if the macro is not a cache macro ({host_id} instead of {cache.xxxx.yyy} for example) or we can't find the cache type or the macro in the cache +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_cache_macro(raw_macro, event) + + -- try to cut the macro in three parts + local cache, cache_type, macro = string.match(raw_macro, "^{(cache)%.(%w+)%.(.*)}") + + -- if cache is not set, it means that the macro wasn't a cache macro + if not cache then + self.sc_logger:info("[sc_macros:get_cache_macro]: macro: " .. tostring(raw_macro) .. " is not a cache macro") + return false + end + + -- make sure that the type of cache is in the event table (for example event.cache.host must exist if the macro is {cache.host.name}) + if event.cache[cache_type] then + -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag + local macro_value, flag = self:get_transform_flag(macro) + + -- check if the macro is in the cache + if event.cache[cache_type][macro_value] then + if flag then + self.sc_logger:info("[sc_macros:get_cache_macro]: macro has a flag associated. Flag is: " .. tostring(flag) + .. ", a macro value conversion will be done.") + -- convert the found value according to the flag that has been sent + return self.transform_macro[flag](event.cache[cache_type][macro_value], event) + else + -- just return the value if there is no conversion required + return event.cache[cache_type][macro_value] + end + end + end + + return false +end + +--- get_event_macro: check if the macro is a macro which value must be found in the event table (meaning not in the cache) +-- @param macro (string) the macro we want to check (for example: {host_id}) +-- @param event (table) the event table +-- @return false (boolean) if the macro is not found in the event +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_event_macro(macro, event) + -- isolate the name of the macro + macro = string.match(macro, "{(.*)}") + + -- check if it is asked to transform the macro and if so, separate the real macro from the transformation flag + local macro_value, flag = self:get_transform_flag(macro) + + -- check if the macro is in the event + if event[macro_value] then + if flag then + self.sc_logger:info("[sc_macros:get_event_macro]: macro has a flag associated. Flag is: " .. tostring(flag) + .. ", a macro value conversion will be done. Macro value is: " .. tostring(macro_value)) + -- convert the found value according to the flag that has been sent + return self.transform_macro[flag](event[macro_value], event) + else + -- just return the value if there is no conversion required + return event[macro_value] + end + end + + return false +end + +--- get_group_macro: check if the macro is a macro which value must be found in a group table (meaning it is a special kind of data in the event) +-- @param macro (string) the macro we want to check (for example: {group(hg,table)}) +-- @param event (table) the event table +-- @return false (boolean) if the macro is not found +-- @return macro_value (string|boolean|number) the value of the macro +function ScMacros:get_group_macro(macro, event) + -- try to cut the macro + local group_type, format, regex = string.match(macro, "^{groups%((%w+),(%w+),(.*)%)}") + + if not group_type or not format or not regex or not self.group_macro_conversion[group_type] then + self.sc_logger:info("[sc_macros:get_group_macro]: macro: " .. tostring(macro) .. " is not a valid group macro") + return false + end + + local data, index_name = self.group_macro_conversion[group_type](event) + local code, converted_data = self:build_group_macro_value(data, index_name, format, regex) + + if not code then + self.sc_logger:error("[sc_macros:get_group_macro]: couldn't convert data for group type: " .. tostring(group_type) + .. ". Desired format: " .. tostring(format) .. ". Filtering using regex: " .. tostring(regex)) + return false + end + + return converted_data, format +end + +--- get_hg_macro: retrieve hostgroup information and make it available as a macro +-- @param event (table) all the event information +-- @return hostgroups (table) all the hostgroups linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the hostgroup +function ScMacros:get_hg_macro(event) + return event.cache.hostgroups, "group_name" +end + +--- get_sg_macro: retrieve servicegroup information and make it available as a macro +-- @param event (table) all the event information +-- @return servicegroups (table) all the servicegroups linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the servicegroup +function ScMacros:get_sg_macro(event) + return event.cache.servicegroups, "group_name" +end + +--- get_bv_macro: retrieve BV information and make it available as a macro +-- @param event (table) all the event information +-- @return bvs (table) all the BVS linked to the event +-- @return index_name (string) the name of the index that is linked to the name of the BV +function ScMacros:get_bv_macro(event) + return event.cache.bvs, "bv_name" +end + +--- build_group_macro_value: build the value that must replace the macro (it will also put it in the desired format) +-- @param data (table) the data from the group (hg, sg or bvs) +-- @param index_name (string) the name of the index at which we will find the relevant data (most of the time, the name of hg, sg or bv) +-- @param format (string) the output format we want (can be table or inline) +-- @param regex (string) the regex that is going to be used to filter unwanted hg, sg or bv (use wildcard .* to accepte everything) +-- @return boolean (boolean) false if asked format is unknown, true otherwise +-- @return macro_value (string|table) the value that will replace the macro (the type of returned value depends on the asked format) +function ScMacros:build_group_macro_value(data, index_name, format, regex) + local result = {} + for _, group_info in pairs(data) do + if string.match(group_info[index_name], regex) then + table.insert(result, group_info[index_name]) + end + end + + if not self.group_macro_format[format] then + self.sc_logger:error("[sc_macros:build_group_macro_value]: unknown format for group macro. Format provided: " .. tostring(format)) + return false + end + + return true, self.group_macro_format[format](result) +end + +--- group_macro_format_table: transform the value behind the macro into a table +-- @param data (table) the values linked to the macro +-- @return data (table) the values linked to the macro stored inside a table +function ScMacros:group_macro_format_table(data) + -- data is already a table, nothing to do + return data +end + +--- group_macro_format_inline: transform the value behind the macro into a single line string separated using a coma +-- @param data (table) the values linked to the macro +-- @return result (string) the values linked to the macro stored inside a coma separated string +function ScMacros:group_macro_format_inline(data) + local result = "" + + for _, value in pairs(data) do + if result == "" then + result = value + else + result = result .. "," .. value + end + end + + return result +end + +--- convert_centreon_macro: replace a centreon macro with its value +-- @param string (string) the string that may contain centreon macros +-- @param event (table) the event table +-- @return converted_string (string) the input string with its macros replaced with their values +function ScMacros:convert_centreon_macro(string, event) + local centreon_macro = false + local sc_macro_value = false + local converted_string = string + + -- get all standard macros + for macro in string.gmatch(string, "$%w$") do + self.sc_logger:debug("[sc_macros:convert_centreon_macro]: found a macro, name is: " .. tostring(macro)) + -- try to find the macro in the mapping table table self.centreon_macro + centreon_macro = self:get_centreon_macro(macro) + + -- if the macro has been found, try to get its value + if centreon_macro then + sc_macro_value = self:replace_sc_macro(centreon_macro, event) + + -- if a value has been found, replace the macro with the value + if sc_macro_value then + self.sc_logger:debug("[sc_macros:replace_sc_macro]: macro is a centreon macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(sc_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + converted_string = string.gsub(converted_string, centreon_macro, sc_macro_value) + end + else + self.sc_logger:error("[sc_macros:convert_centreon_macro]: macro: " .. tostring(macro) .. " is not a valid centreon macro") + end + end + + return converted_string +end + +--- get_centreon_macro: try to find the macro in the centreon_macro mapping table +-- @param macro_name (string) the name of the macro ($HOSTNAME$ for example) +-- @return string (string) the value of the macro +-- @return false (boolean) if the macro is not in the mapping table +function ScMacros:get_centreon_macro(macro_name) + return self.centreon_macro[string.gsub(macro_name, "%$", "")] or false +end + +--- get_transform_flag: check if there is a tranformation flag linked to the macro and separate them +-- @param macro (string) the macro that needs to be checked +-- @return macro_value (string) the macro name ONLY if there is a flag +-- @return flag (string) the flag name if there is one +-- @return macro (string) the original macro if no flag were found +function ScMacros:get_transform_flag(macro) + -- separate macro and flag + local macro_value, flag = string.match(macro, "(.*)_sc(%w+)$") + + -- if there was a flag in the macro name, return the real macro name and its flag + if macro_value then + return macro_value, flag + end + + -- if no flag were found, just return the original macro + return macro +end + +--- transform_date: convert a timestamp macro into a human readable date using the format set in the timestamp_conversion_format parameter +-- @param macro_value (number) the timestamp that needs to be converted +-- @return date (string) the converted timestamp +function ScMacros:transform_date(macro_value) + return os.date(self.params.timestamp_conversion_format, os.time(os.date("!*t", macro_value + self.params.local_time_diff_from_utc))) +end + +--- transform_short: mostly used to convert the event output into a short output by keeping only the data before the new line +-- @param macro_value (string) the string that needs to be shortened +-- @return string (string) the input string with only the first lne +function ScMacros:transform_short(macro_value) + return string.match(macro_value, "^(.*)\n") +end + +--- transform_type: convert a 0, 1 value into SOFT or HARD +-- @param macro_value (number) the number that indicates a SOFT or HARD state +-- @return string (string) HARD or SOFT +function ScMacros:transform_type(macro_value) + if macro_value == 0 then + return "SOFT" + else + return "HARD" + end +end + +--- transform_state: convert the number that represent the event status with its human readable counterpart +-- @param macro_value (number) the number that represents the status of the event +-- @param event (table) the event table +-- @return string (string) the status of the event in a human readable format (e.g: OK, WARNING) +function ScMacros:transform_state(macro_value, event) + -- acknowledgement events are special, the state can be for a host or a service. + -- We force the element to be host_status or service_status in order to properly convert the state + if event.element == 1 and event.service_id == 0 then + return self.params.status_mapping[event.category][event.element].host_status[macro_value] + elseif event.element == 1 and event.service_id ~= 0 then + return self.params.status_mapping[event.category][event.element].service_status[macro_value] + end + + return self.params.status_mapping[event.category][event.element][macro_value] +end + +--- transform_number: convert a string to a number +-- @param macro_value (string) the string that needs to be converted +-- @return number (number) a number based on the provided string +function ScMacros:transform_number(macro_value) + local result = tonumber(macro_value) + return result +end + +--- transform_number: convert a something to a number +-- @param macro_value (any) the value that needs to be converted +-- @return string (string) a string based on the provided input +function ScMacros:transform_string(macro_value) + return tostring(macro_value) +end + +--- build_converted_string: replace macros in the string that contains macros +-- @param macro_value (any): the value of the macro that must be replaced +-- @param macro (string): the macro name +-- @param converted_string (string): the string in which a macro must be replaced +-- @return converted_string (string): the string with the macro replaced +function ScMacros:build_converted_string_for_cache_and_event_macro(macro_value, macro, converted_string) + -- need to escape % characters or else it will break the string.gsub that is done later + local clean_macro_value, _ = string.gsub(macro_value, "%%", "%%%%") + local clean_macro_value_json = "" + + self.sc_logger:debug("[sc_macros:build_converted_string_for_cache_and_event_macro]: macro is a cache macro. Macro name: " + .. tostring(macro) .. ", value is: " .. tostring(clean_macro_value) .. ", trying to replace it in the string: " .. tostring(converted_string)) + + --[[ + to have the best json possible, we try to remove double quotes. + "service_severity": "{cache.severity.service}" must become "service_severity": 1 and not "service_severity": "1" + "service_severity": "my service severity is: {cache.severity.service}" must become "service_severity": "my service severity is: 1" + ]]-- + if string.match(converted_string, '"' .. macro .. '"') then + -- we don't need to json encode numbers and booleans, if people want them as a string, they need to use the _scstring flag in their macro + if type(clean_macro_value) == "number" or type(clean_macro_value) == "boolean" then + clean_macro_value_json = clean_macro_value + else + clean_macro_value_json = broker.json_encode(clean_macro_value) + end + + converted_string = string.gsub(converted_string, '"' .. macro .. '"', clean_macro_value_json) + else + -- if the macro is in a middle of a string we can't directly json encode it because it will break the final json if we don't escape characters. (and doing that will result in an ugly json) + converted_string = string.gsub(converted_string, "(.*)" .. macro .. "(.*)", "%1" .. self.sc_common:json_escape(clean_macro_value) .. "%2") + end + + return converted_string +end + +return sc_macros diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua new file mode 100644 index 00000000000..3fc65356b37 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_metrics.lua @@ -0,0 +1,283 @@ +#!/usr/bin/lua + +--- +-- Module that handles event metrics for stream connectors +-- @module sc_metrics +-- @alias sc_metrics +local sc_metrics = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_params = require("centreon-stream-connectors-lib.sc_params") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +local ScMetrics = {} + +--- sc_metrics.new: sc_metrics constructor +-- @param event (table) the current event +-- @param params (table) the params table of the stream connector +-- @param common (object) a sc_common instance +-- @param broker (object) a sc_broker instance +-- @param [opt] sc_logger (object) a sc_logger instance +function sc_metrics.new(event, params, common, broker, logger) + self = {} + + -- create a default logger if it is not provided + self.sc_logger = logger + if not self.sc_logger then + self.sc_logger = sc_logger.new() + end + + self.sc_common = common + self.params = params + self.sc_broker = broker + + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + + -- store metric validation functions inside a table linked to category/element + self.metric_validation = { + [categories.neb.id] = { + [elements.host.id] = function () return self:is_valid_host_metric_event() end, + [elements.host_status.id] = function() return self:is_valid_host_metric_event() end, + [elements.service.id] = function () return self:is_valid_service_metric_event() end, + [elements.service_status.id] = function () return self:is_valid_service_metric_event() end + }, + [categories.bam.id] = { + [elements.kpi_event.id] = function () return self:is_valid_kpi_metric_event() end + } + } + +-- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation +-- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics +-- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key +-- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) +-- splunk [^a-zA-Z0-9_] + + if self.params.metrics_name_custom_regex and self.params.metrics_name_custom_regex ~= "" then + self.metrics_name_operations.custom.regex = self.params.metrics_custom_regex + end + + if self.params.metrics_name_custom_replacement_character then + self.metrics_name_operations.custom.replacement_character = self.params.metrics_name_custom_replacement_character + end + + -- initiate metrics table + self.metrics = {} + -- initiate sc_event object + self.sc_event = sc_event.new(event, self.params, self.sc_common, self.sc_logger, self.sc_broker) + + setmetatable(self, { __index = ScMetrics }) + return self +end + +--- is_valid_bbdo_element: checks if the event category and element are valid according to parameters and bbdo protocol +-- @return true|false (boolean) depending on the validity of the event category and element +function ScMetrics:is_valid_bbdo_element() + -- initiate variables with shorter name + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + local event_category = self.sc_event.event.category + local event_element = self.sc_event.event.element + -- self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element]: event cat: " .. tostring(event_category) .. ". Event element: " .. tostring(event_element)) + + -- drop event if event category is not accepted + if not self.sc_event:find_in_mapping(self.params.category_mapping, self.params.accepted_categories, event_category) then + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with category: " .. tostring(event_category) .. " is not an accepted category") + return false + else + -- drop event if accepted category is not supposed to be used for a metric stream connector + if event_category ~= categories.neb.id and event_category ~= categories.bam.id then + self.sc_logger:warning("[sc_metrics:is_valid_bbdo_element] Configuration error. accepted categories from paramters are: " + .. tostring(self.params.accepted_categories) .. ". Only bam and neb can be used for metrics") + return false + else + -- drop event if element is not accepted + if not self.sc_event:find_in_mapping(self.params.element_mapping[event_category], self.params.accepted_elements, event_element) then + self.sc_logger:debug("[sc_metrics:is_valid_bbdo_element] event with element: " .. tostring(event_element) .. " is not an accepted element") + return false + else + -- drop event if element is not an element that carries perfdata + if event_element ~= elements.host_status.id + and event_element ~= elements.service_status.id + and event_element ~= elements.kpi_event.id + then + self.sc_logger:warning("[sc_metrics:is_valid_bbdo_element] Configuration error. accepted elements from paramters are: " + .. tostring(self.params.accepted_elements) .. ". Only host_status, service_status and kpi_event can be used for metrics") + return false + end + end + end + + return true + end +end + +--- is_valid_metric_event: makes sure that the event is a valid event for metric usage +-- @return true|false (boolean) depending on the validity of the metric event +function ScMetrics:is_valid_metric_event() + category = self.sc_event.event.category + element = self.sc_event.event.element + + self.sc_logger:debug("[sc_metrics:is_valid_metric_event]: starting validation for event with category: " + .. tostring(category) .. ". And element: " .. tostring(element)) + return self.metric_validation[category][element]() +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the host are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_host_metric_event() + -- return false if we can't get hostname or host id is nil + if not self.sc_event:is_valid_host() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self.sc_event:is_valid_poller() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self.sc_event:is_valid_host_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self.sc_event:is_valid_hostgroup() then + self.sc_logger:warning("[sc_metrics:is_valid_host_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " is not in an accepted hostgroup") + return false + end + + -- return false if there is no perfdata or it can't be parsed + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_vaild_host_metric_event]: host_id: " + .. tostring(self.sc_event.event.host_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perf_data)) + return false + end + + return true +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the service are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_service_metric_event() + -- return false if we can't get hostname or host id is nil + if not self.sc_event:is_valid_host() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: host_id: " .. tostring(self.sc_event.event.host_id) .. " hasn't been validated") + return false + end + + -- return false if we can't get service description of service id is nil + if not self.sc_event:is_valid_service() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service with id: " .. tostring(self.sc_event.event.service_id) .. " hasn't been validated") + return false + end + + -- return false if host is not monitored from an accepted poller + if not self.sc_event:is_valid_poller() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. " is not monitored from an accepted poller") + return false + end + + -- return false if host has not an accepted severity + if not self.sc_event:is_valid_host_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Host has not an accepted severity") + return false + end + + -- return false if service has not an accepted severity + if not self.sc_event:is_valid_service_severity() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service id: " .. tostring(self.sc_event.event.service_id) + .. ". host_id: " .. tostring(self.sc_event.event.host_id) .. ". Service has not an accepted severity") + return false + end + + -- return false if host is not in an accepted hostgroup + if not self.sc_event:is_valid_hostgroup() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) + .. " is not in an accepted hostgroup. Host ID is: " .. tostring(self.sc_event.event.host_id)) + return false + end + + -- return false if service is not in an accepted servicegroup + if not self.sc_event:is_valid_servicegroup() then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " .. tostring(self.sc_event.event.service_id) .. " is not in an accepted servicegroup") + return false + end + + -- return false if there is no perfdata or they it can't be parsed + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_valid_service_metric_event]: service_id: " + .. tostring(self.sc_event.event.service_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perfdata)) + return false + end + + return true +end + +--- is_valid_host_metric_event: makes sure that the metric and the event from the KPI are valid according to the stream connector parameters +-- @return true|false (boolean) depening on the validity of the event +function ScMetrics:is_valid_kpi_metric_event() + if not self:is_valid_perfdata(self.sc_event.event.perfdata) then + self.sc_logger:warning("[sc_metrics:is_vaild_kpi_metric_event]: kpi_id: " + .. tostring(self.sc_event.event.kpi_id) .. " is not sending valid perfdata. Received perfdata: " .. tostring(self.sc_event.event.perf_data)) + return false + end + + return true +end + +--- is_valid_perfdata: makes sure that the perfdata string is a valid one +-- @param perfdata (string) a string that contains perfdata +-- @return true|false (boolean) depending on the validity of the perfdata +function ScMetrics:is_valid_perfdata(perfdata) + -- drop event if perfdata is nil or empty + if not perfdata or perfdata == "" then + return false + end + + -- parse perfdata + local metrics_info, error = broker.parse_perfdata(perfdata, true) + + -- drop event if parsing failed + if not metrics_info then + self.sc_logger:error("[sc_metrics:is_valid_perfdata]: couldn't parse perfdata. Error is: " + .. tostring(error) .. ". Perfdata string is: " .. tostring(perfdata)) + return false + end + + -- store data from parsed perfdata inside a metrics table + self.metrics_info = metrics_info + + return true +end + +-- to name a few : +-- open metric (prometheus) : metric name = [a-zA-Z0-9_:], labels [a-zA-Z0-9_] https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#protocol-negotiation +-- datadog : metric_name = [a-zA-Z0-9_.] https://docs.datadoghq.com/fr/metrics/custom_metrics/#naming-custom-metrics +-- dynatrace matric name [a-zA-Z0-9-_.] https://dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol#metric-key +-- metric 2.0 (carbon/grafite/grafana) [a-zA-Z0-9-_./] http://metrics20.org/spec/ (see Data Model section) + +--- build_metric: use the stream connector format method to parse every metric in the event and remove unwanted metrics based on their name +-- @param format_metric (function) the format method from the stream connector +function ScMetrics:build_metric(format_metric) + local metrics_info = self.metrics_info + + for metric, metric_data in pairs(self.metrics_info) do + if string.match(metric_data.metric_name, self.params.accepted_metrics) then + metrics_info[metric].metric_name = string.gsub(metric_data.metric_name, self.params.metric_name_regex, self.params.metric_replacement_character) + -- use stream connector method to format the metric event + format_metric(metrics_info[metric]) + else + self.sc_logger:debug("[ScMetric:build_metric]: metric name is filtered out: " .. tostring(metric_data.metric_name) .. ". Metric name filter is: " .. tostring(self.params.accepted_metrics)) + end + end +end + +return sc_metrics \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua new file mode 100644 index 00000000000..356410aba63 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_params.lua @@ -0,0 +1,1210 @@ +#!/usr/bin/lua + +--- +-- Module to help initiate a stream connector with all paramaters +-- @module sc_params +-- @alias sc_params + +local sc_params = {} + +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +local ScParams = {} + +--- sc_params.new: sc_params constructor +-- @param common (object) object instance from sc_common module +-- @param logger (object) object instance from sc_logger module +function sc_params.new(common, logger) + local self = {} + + -- initiate mandatory libs + self.logger = logger + if not self.logger then + self.logger = sc_logger.new() + end + self.common = common + + -- get the version of the bbdo protocol (only the first digit, nothing else matters) + self.bbdo_version = self.common:get_bbdo_version() + + -- initiate params + self.params = { + -- filter broker events + accepted_categories = "neb,bam", -- could be: neb,storage,bam (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#event-categories) + accepted_elements = "host_status,service_status,ba_status", -- could be: metric,host_status,service_status,ba_event,kpi_event" (https://docs.centreon.com/docs/centreon-broker/en/latest/dev/bbdo.html#neb) + + -- filter status + host_status = "0,1,2", -- = ok, down, unreachable + service_status = "0,1,2,3", -- = ok, warning, critical, unknown, + ba_status = "0,1,2", -- = ok, warning, critical + ack_host_status = "", -- will use host_status if empty + ack_service_status = "", -- will use service_status if empty + dt_host_status = "", -- will use host_status if empty + dt_service_status = "", -- will use service_status if empty + + -- filter state type + hard_only = 1, + acknowledged = 0, + in_downtime = 0, + flapping = 0, + + -- objects filter + accepted_hostgroups = "", + rejected_hostgroups = "", + accepted_servicegroups = "", + rejected_servicegroups = "", + accepted_hosts = "", + accepted_services = "", + accepted_hosts_enable_split_pattern = 0, + accepted_services_enable_split_pattern = 0, + accepted_hosts_split_character = ",", + accepted_services_split_character = ",", + accepted_bvs = "", + rejected_bvs = "", + accepted_pollers = "", + rejected_pollers = "", + accepted_authors = "", + rejected_authors = "", + accepted_metrics = ".*", + service_severity_threshold = nil, + service_severity_operator = ">=", + host_severity_threshold = nil, + host_severity_operator = ">=", + + -- filter anomalous events + skip_anon_events = 1, + skip_nil_id = 1, + + -- enable or disable dedup + enable_host_status_dedup = 1, + enable_service_status_dedup = 1, + + -- communication parameters + max_buffer_size = 1, + max_buffer_age = 5, --deprecated + max_all_queues_age = 60, + send_mixed_events = 1, + + -- connection parameters + connection_timeout = 60, + allow_insecure_connection = 0, + + -- proxy parameters + proxy_address = "", + proxy_port = "", + proxy_username = "", + proxy_password = "", + proxy_protocol = "http", + + -- event formatting parameters + format_file = "", + use_long_output = 1, + remove_line_break_in_output = 1, + output_line_break_replacement_character = " ", + output_size_limit = "", + + -- custom code parameters + custom_code_file = "", + + -- time parameters + local_time_diff_from_utc = os.difftime(os.time(), os.time(os.date("!*t", os.time()))), + timestamp_conversion_format = "%Y-%m-%d %X", -- will print 2021-06-11 10:43:38 + + -- internal parameters + __internal_ts_last_flush = os.time(), + __internal_last_global_flush_date = os.time(), + + -- testing parameters + send_data_test = 0, + + -- logging parameters + logfile = "", + log_level = "", + log_curl_commands = 0, + + -- metric + metric_name_regex = "", + metric_replacement_character = "_", + + -- initiate mappings + element_mapping = {}, + status_mapping = {}, + state_type_mapping = { + [0] = "SOFT", + [1] = "HARD" + }, + validatedEvents = {}, + + -- FIX BROKER ISSUE + max_stored_events = 10 -- do not use values above 100 + } + + -- maps categories name and id + self.params.bbdo = { + categories = { + neb = { + id = 1, + name = "neb" + }, + storage = { + id = 3, + name = "storage" + }, + bam = { + id = 6, + name = "bam" + } + } + } + + local categories = self.params.bbdo.categories + + local bbdo2_bbdo3_compat_mapping = { + [2] = { + host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 14, + name = "host_status" + }, + service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 24, + name = "service_status" + }, + acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 1, + name = "acknowledgement" + }, + downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 5, + name = "downtime" + }, + ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 1, + name = "ba_status" + } + }, + [3] = { + host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 32, + name = "pb_host_status" + }, + service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 29, + name = "pb_service_status" + }, + acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 45, + name = "pb_acknowledgement" + }, + downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 36, + name = "pb_downtime" + }, + ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 19, + name = "pb_ba_status" + } + } + } + + self.params.bbdo.elements = { + acknowledgement = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["acknowledgement"], + comment = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 2, + name = "comment" + }, + custom_variable = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 3, + name = "custom_variable" + }, + custom_variable_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 4, + name = "custom_variable_status" + }, + downtime = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["downtime"], + event_handler = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 6, + name = "event_handler" + }, + flapping_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 7, + name = "flapping_status" + }, + host_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 8, + name = "host_check" + }, + host_dependency = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 9, + name = "host_dependency" + }, + host_group = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 10, + name = "host_group" + }, + host_group_member = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 11, + name = "host_group_member" + }, + host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 12, + name = "host" + }, + host_parent = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 13, + name = "host_parent" + }, + host_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["host_status"], + instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 15, + name = "instance" + }, + instance_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 16, + name = "instance_status" + }, + log_entry = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 17, + name = "log_entry" + }, + module = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 18, + name = "module" + }, + service_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 19, + name = "service_check" + }, + service_dependency = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 20, + name = "service_dependency" + }, + service_group = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 21, + name = "service_group" + }, + service_group_member = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 22, + name = "service_group_member" + }, + service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 23, + name = "service" + }, + service_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["service_status"], + instance_configuration = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 25, + name = "instance_configuration" + }, + responsive_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 26, + name = "responsive_instance" + }, + pb_service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 27, + name = "pb_service" + }, + pb_adaptive_service = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 28, + name = "pb_adaptive_service" + }, + pb_service_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 29, + name = "pb_service_status" + }, + pb_host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 30, + name = "pb_host" + }, + pb_adaptive_host = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 31, + name = "pb_adaptive_host" + }, + pb_host_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 32, + name = "pb_host_status" + }, + pb_severity = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 33, + name = "pb_severity" + }, + pb_tag = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 34, + name = "pb_tag" + }, + pb_comment = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 35, + name = "pb_comment" + }, + pb_downtime = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 36, + name = "pb_downtime" + }, + pb_custom_variable = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 37, + name = "pb_custom_variable" + }, + pb_custom_variable_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 38, + name = "pb_custom_variable_status" + }, + pb_host_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 39, + name = "pb_host_check" + }, + pb_service_check = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 40, + name = "pb_host_check" + }, + pb_log_entry = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 41, + name = "pb_log_entry" + }, + pb_instance_status = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 42, + name = "pb_instance_status" + }, + pb_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 44, + name = "pb_instance" + }, + pb_acknowledgement = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 45, + name = "pb_acknowledgement" + }, + pb_responsive_instance = { + category_id = categories.neb.id, + category_name = categories.neb.name, + id = 46, + name = "pb_responsive_instance" + }, + metric = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 1, + name = "metric" + }, + rebuild = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 2, + name = "rebuild" + }, + remove_graph = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 3, + name = "remove_graph" + }, + status = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 4, + name = "status" + }, + index_mapping = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 5, + name = "index_mapping" + }, + metric_mapping = { + category_id = categories.storage.id, + category_name = categories.storage.name, + id = 6, + name = "metric_mapping" + }, + ba_status = bbdo2_bbdo3_compat_mapping[self.bbdo_version]["ba_status"], + kpi_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 2, + name = "kpi_status" + }, + meta_service_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 3, + name = "meta_service_status" + }, + ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 4, + name = "ba_event" + }, + kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 5, + name = "kpi_event" + }, + ba_duration_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 6, + name = "ba_duration_event" + }, + dimension_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 7, + name = "dimension_ba_event" + }, + dimension_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 8, + name = "dimension_kpi_event" + }, + dimension_ba_bv_relation_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 9, + name = "dimension_ba_bv_relation_event" + }, + dimension_bv_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 10, + name = "dimension_bv_event" + }, + dimension_truncate_table_signal = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 11, + name = "dimension_truncate_table_signal" + }, + bam_rebuild = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 12, + name = "bam_rebuild" + }, + dimension_timeperiod = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 13, + name = "dimension_timeperiod" + }, + dimension_ba_timeperiod_relation = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 14, + name = "dimension_ba_timeperiod_relation" + }, + dimension_timeperiod_exception = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 15, + name = "dimension_timeperiod_exception" + }, + dimension_timeperiod_exclusion = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 16, + name = "dimension_timeperiod_exclusion" + }, + inherited_downtime = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 17, + name = "inherited_downtime" + }, + pb_inherited_downtime = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 18, + name = "pb_inherited_downtime" + }, + pb_ba_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 19, + name = "pb_ba_status" + }, + pb_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 20, + name = "pb_ba_event" + }, + pb_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 20, + name = "pb_kpi_event" + }, + pb_dimension_bv_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 21, + name = "pb_dimension_bv_event" + }, + pb_dimension_ba_bv_relation_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 22, + name = "pb_dimension_ba_bv_relation_event" + }, + pb_dimension_timeperiod = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 23, + name = "pb_dimension_timeperiod" + }, + pb_dimension_ba_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 24, + name = "pb_dimension_ba_event" + }, + pb_dimension_kpi_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 25, + name = "pb_dimension_kpi_event" + }, + pb_kpi_status = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 26, + name = "pb_kpi_status" + }, + pb_ba_duration_event = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 27, + name = "pb_ba_duration_event" + }, + pb_dimension_ba_timeperiod_relation = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 28, + name = "pb_dimension_ba_timeperiod_relation" + }, + pb_dimension_truncate_table_signal = { + category_id = categories.bam.id, + category_name = categories.bam.name, + id = 29, + name = "pb_dimension_truncate_table_signal" + } + } + + local elements = self.params.bbdo.elements + + -- initiate category and element mapping + self.params.element_mapping = { + [categories.neb.id] = {}, + [categories.storage.id] = {}, + [categories.bam.id] = {} + } + + -- maps category id with element name and element id + -- neb elements + self.params.element_mapping[categories.neb.id].acknowledgement = elements.acknowledgement.id + self.params.element_mapping[categories.neb.id].comment = elements.comment.id + self.params.element_mapping[categories.neb.id].custom_variable = elements.custom_variable.id + self.params.element_mapping[categories.neb.id].custom_variable_status = elements.custom_variable_status.id + self.params.element_mapping[categories.neb.id].downtime = elements.downtime.id + self.params.element_mapping[categories.neb.id].event_handler = elements.event_handler.id + self.params.element_mapping[categories.neb.id].flapping_status = elements.flapping_status.id + self.params.element_mapping[categories.neb.id].host_check = elements.host_check.id + self.params.element_mapping[categories.neb.id].host_dependency = elements.host_dependency.id + self.params.element_mapping[categories.neb.id].host_group = elements.host_group.id + self.params.element_mapping[categories.neb.id].host_group_member = elements.host_group_member.id + self.params.element_mapping[categories.neb.id].host = elements.host.id + self.params.element_mapping[categories.neb.id].host_parent = elements.host_parent.id + self.params.element_mapping[categories.neb.id].host_status = elements.host_status.id + self.params.element_mapping[categories.neb.id].instance = elements.instance.id + self.params.element_mapping[categories.neb.id].instance_status = elements.instance_status.id + self.params.element_mapping[categories.neb.id].log_entry = elements.log_entry.id + self.params.element_mapping[categories.neb.id].module = elements.module.id + self.params.element_mapping[categories.neb.id].service_check = elements.service_check.id + self.params.element_mapping[categories.neb.id].service_dependency = elements.service_dependency.id + self.params.element_mapping[categories.neb.id].service_group = elements.service_group.id + self.params.element_mapping[categories.neb.id].service_group_member = elements.service_group_member.id + self.params.element_mapping[categories.neb.id].service = elements.service.id + self.params.element_mapping[categories.neb.id].service_status = elements.service_status.id + self.params.element_mapping[categories.neb.id].instance_configuration = elements.instance_configuration.id + self.params.element_mapping[categories.neb.id].responsive_instance = elements.responsive_instance.id + self.params.element_mapping[categories.neb.id].pb_service = elements.pb_service.id + self.params.element_mapping[categories.neb.id].pb_adaptive_service = elements.pb_adaptive_service.id + self.params.element_mapping[categories.neb.id].pb_service_status = elements.pb_service_status.id + self.params.element_mapping[categories.neb.id].pb_host = elements.pb_host.id + self.params.element_mapping[categories.neb.id].pb_adaptive_host = elements.pb_adaptive_host.id + self.params.element_mapping[categories.neb.id].pb_host_status = elements.pb_host_status.id + self.params.element_mapping[categories.neb.id].pb_severity = elements.pb_severity.id + self.params.element_mapping[categories.neb.id].pb_tag = elements.pb_tag.id + + -- metric elements mapping + self.params.element_mapping[categories.storage.id].metric = elements.metric.id + self.params.element_mapping[categories.storage.id].rebuild = elements.rebuild.id + self.params.element_mapping[categories.storage.id].remove_graph = elements.remove_graph.id + self.params.element_mapping[categories.storage.id].status = elements.status.id + self.params.element_mapping[categories.storage.id].index_mapping = elements.index_mapping.id + self.params.element_mapping[categories.storage.id].metric_mapping = elements.metric_mapping.id + + -- bam elements mapping + self.params.element_mapping[categories.bam.id].ba_status = elements.ba_status.id + self.params.element_mapping[categories.bam.id].kpi_status = elements.kpi_status.id + self.params.element_mapping[categories.bam.id].meta_service_status = elements.meta_service_status.id + self.params.element_mapping[categories.bam.id].ba_event = elements.ba_event.id + self.params.element_mapping[categories.bam.id].kpi_event = elements.kpi_event.id + self.params.element_mapping[categories.bam.id].ba_duration_event = elements.ba_duration_event.id + self.params.element_mapping[categories.bam.id].dimension_ba_event = elements.dimension_ba_event.id + self.params.element_mapping[categories.bam.id].dimension_kpi_event = elements.dimension_kpi_event.id + self.params.element_mapping[categories.bam.id].dimension_ba_bv_relation_event = elements.dimension_ba_bv_relation_event.id + self.params.element_mapping[categories.bam.id].dimension_bv_event = elements.dimension_bv_event.id + self.params.element_mapping[categories.bam.id].dimension_truncate_table_signal = elements.dimension_truncate_table_signal.id + self.params.element_mapping[categories.bam.id].bam_rebuild = elements.bam_rebuild.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod = elements.dimension_timeperiod.id + self.params.element_mapping[categories.bam.id].dimension_ba_timeperiod_relation = elements.dimension_ba_timeperiod_relation.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod_exception = elements.dimension_timeperiod_exception.id + self.params.element_mapping[categories.bam.id].dimension_timeperiod_exclusion = elements.dimension_timeperiod_exclusion.id + self.params.element_mapping[categories.bam.id].inherited_downtime = elements.inherited_downtime.id + + self.params.reverse_element_mapping = { + [categories.neb.id] = { + [elements.acknowledgement.id] = "acknowledgement", + [elements.comment.id] = "comment", + [elements.custom_variable.id] = "custom_variable", + [elements.custom_variable_status.id] = "custom_variable_status", + [elements.downtime.id] = "downtime", + [elements.event_handler.id] = "event_handler", + [elements.flapping_status.id] = "flapping_status", + [elements.host_check.id] = "host_check", + [elements.host_dependency.id] = "host_dependency", + [elements.host_group.id] = "host_group", + [elements.host_group_member.id] = "host_group_member", + [elements.host.id] = "host", + [elements.host_parent.id] = "host_parent", + [elements.host_status.id] = "host_status", + [elements.instance.id] = "instance", + [elements.instance_status.id] = "instance_status", + [elements.log_entry.id] = "log_entry", + [elements.module.id] = "module", + [elements.service_check.id] = "service_check", + [elements.service_dependency.id] = "service_dependency", + [elements.service_group.id] = "service_group", + [elements.service_group_member.id] = "service_group_member", + [elements.service.id] = "service", + [elements.service_status.id] = "service_status", + [elements.instance_configuration.id] = "instance_configuration", + [elements.pb_service.id] = "pb_service", + [elements.pb_adaptive_service.id] = "pb_adaptive_service", + [elements.pb_service_status.id] = "pb_service_status", + [elements.pb_host.id] = "pb_host", + [elements.pb_adaptive_host.id] = "pb_adaptive_host", + [elements.pb_host_status.id] = "pb_host_status", + [elements.pb_severity.id] = "pb_severity", + [elements.pb_tag] = "pb_tag" + }, + [categories.storage.id] = { + [elements.metric.id] = "metric", + [elements.rebuild.id] = "rebuild", + [elements.remove_graph.id] = "remove_graph", + [elements.status.id] = "status", + [elements.index_mapping.id] = "index_mapping", + [elements.metric_mapping.id] = "metric_mapping" + }, + [categories.bam.id] = { + [elements.ba_status.id] = "ba_status", + [elements.kpi_status.id] = "kpi_status", + [elements.meta_service_status.id] = "meta_service_status", + [elements.ba_event.id] = "ba_event", + [elements.kpi_event.id] = "kpi_event", + [elements.ba_duration_event.id] = "ba_duration_event", + [elements.dimension_ba_event.id] = "dimension_ba_event", + [elements.dimension_kpi_event.id] = "dimension_kpi_event", + [elements.dimension_ba_bv_relation_event.id] = "dimension_ba_bv_relation_event", + [elements.dimension_bv_event.id] = "dimension_bv_event", + [elements.dimension_truncate_table_signal.id] = "dimension_truncate_table_signal", + [elements.bam_rebuild.id] = "bam_rebuild", + [elements.dimension_timeperiod.id] = "dimension_timeperiod", + [elements.dimension_ba_timeperiod_relation.id] = "dimension_ba_timeperiod_relation", + [elements.dimension_timeperiod_exception.id] = "dimension_timeperiod_exception", + [elements.dimension_timeperiod_exclusion.id] = "dimension_timeperiod_exclusion", + [elements.inherited_downtime.id] = "inherited_downtime" + } + } + + self.params.reverse_category_mapping = { + [categories.neb.id] = categories.neb.name, + [2] = "bbdo", + [categories.storage.id] = categories.storage.id, + [4] = "correlation", + [5] = "dumper", + [categories.bam.id] = categories.bam.name, + [7] = "extcmd" + } + + self.params.category_mapping = { + [categories.neb.name] = categories.neb.id, + bbdo = 2, + [categories.storage.name] = categories.storage.id, + correlation = 4, + dumper = 5, + [categories.bam.name] = categories.bam.id, + extcmd = 7 + } + + -- initiate category and status mapping + self.params.status_mapping = { + [categories.neb.id] = { + [elements.acknowledgement.id] = { + host_status = {}, + service_status = {} + }, + [elements.downtime.id] = { + [1] = {}, + [2] = {} + }, + [elements.host_status.id] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + }, + [elements.service_status.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" + }, + [elements.pb_host_status.id] = { + [0] = "UP", + [1] = "DOWN", + [2] = "UNREACHABLE" + }, + [elements.pb_service_status.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL", + [3] = "UNKNOWN" + } + }, + [categories.bam.id] = { + [0] = "OK", + [1] = "WARNING", + [2] = "CRITICAL" + } + } + + self.params.format_template = { + [categories.neb.id] = {}, + [categories.bam.id] = {} + } + + -- downtime status mapping + self.params.status_mapping[categories.neb.id][elements.downtime.id][1] = self.params.status_mapping[categories.neb.id][elements.service_status.id] + self.params.status_mapping[categories.neb.id][elements.downtime.id][2] = self.params.status_mapping[categories.neb.id][elements.host_status.id] + + -- acknowledgement status mapping + self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].host_status = self.params.status_mapping[categories.neb.id][elements.host_status.id] + self.params.status_mapping[categories.neb.id][elements.acknowledgement.id].service_status = self.params.status_mapping[categories.neb.id][elements.service_status.id] + + setmetatable(self, { __index = ScParams }) + return self +end + +--- deprecated_params: check if param_name provides from the web configuration is deprecated or not +-- @param param_name (string) the name of a parameter from the web interface +-- @return if a match had been found with deprecated parameter : new_param_name (string) the right name of the parameter to avoid deprecated ones. Else, param_name is return. +local function deprecated_params(param_name) + -- initiate deprecated parameters table + local deprecated_params = { + -- max_buffer_age param had been replace by max_all_queues_age + ["max_buffer_age"] = "max_all_queues_age" + } + + for deprecated_param_name, new_param_name in pairs(deprecated_params) do + if param_name == deprecated_param_name then + return new_param_name + end + end + return param_name + +end + +--- param_override: change default param values with the one provides from the web configuration +-- @param user_params (table) the table of all parameters from the web interface +function ScParams:param_override(user_params) + if type(user_params) ~= "table" then + self.logger:error("User parameters are not a table. Using default parameters instead") + return + end + + for param_name, param_value in pairs(user_params) do + if self.params[param_name] or string.find(param_name, "^_sc") ~= nil then + + -- Check if the param is deprecated + local param_name_verified = deprecated_params(param_name) + if param_name_verified ~= param_name then + self.logger:notice("[sc_params:param_override]: following parameter: " .. tostring(param_name) .. " is deprecated and had been replace by: " .. tostring(param_name_verified)) + end + + self.params[param_name_verified] = param_value + self.logger:notice("[sc_params:param_override]: overriding parameter: " .. tostring(param_name_verified) .. " with value: " .. tostring(param_value)) + else + self.logger:notice("[sc_params:param_override]: User parameter: " .. tostring(param_name_verified) .. " is not handled by this stream connector") + end + end +end + +--- check_params: check standard params syntax +function ScParams:check_params() + self.params.hard_only = self.common:check_boolean_number_option_syntax(self.params.hard_only, 1) + self.params.acknowledged = self.common:check_boolean_number_option_syntax(self.params.acknowledged, 0) + self.params.in_downtime = self.common:check_boolean_number_option_syntax(self.params.in_downtime, 0) + self.params.flapping = self.common:check_boolean_number_option_syntax(self.params.flapping, 0) + self.params.skip_anon_events = self.common:check_boolean_number_option_syntax(self.params.skip_anon_events, 1) + self.params.skip_nil_id = self.common:check_boolean_number_option_syntax(self.params.skip_nil_id, 1) + self.params.accepted_authors = self.common:if_wrong_type(self.params.accepted_authors, "string", "") + self.params.rejected_authors = self.common:if_wrong_type(self.params.rejected_authors, "string", "") + self.params.accepted_hostgroups = self.common:if_wrong_type(self.params.accepted_hostgroups, "string", "") + self.params.rejected_hostgroups = self.common:if_wrong_type(self.params.rejected_hostgroups, "string", "") + self.params.accepted_servicegroups = self.common:if_wrong_type(self.params.accepted_servicegroups, "string", "") + self.params.rejected_servicegroups = self.common:if_wrong_type(self.params.rejected_servicegroups, "string", "") + self.params.accepted_bvs = self.common:if_wrong_type(self.params.accepted_bvs, "string", "") + self.params.rejected_bvs = self.common:if_wrong_type(self.params.rejected_bvs, "string", "") + self.params.accepted_pollers = self.common:if_wrong_type(self.params.accepted_pollers, "string", "") + self.params.rejected_pollers = self.common:if_wrong_type(self.params.rejected_pollers, "string", "") + self.params.host_severity_threshold = self.common:if_wrong_type(self.params.host_severity_threshold, "number", nil) + self.params.service_severity_threshold = self.common:if_wrong_type(self.params.service_severity_threshold, "number", nil) + self.params.host_severity_operator = self.common:if_wrong_type(self.params.host_severity_operator, "string", ">=") + self.params.service_severity_operator = self.common:if_wrong_type(self.params.service_severity_operator, "string", ">=") + self.params.ack_host_status = self.common:ifnil_or_empty(self.params.ack_host_status, self.params.host_status) + self.params.ack_service_status = self.common:ifnil_or_empty(self.params.ack_service_status, self.params.service_status) + self.params.dt_host_status = self.common:ifnil_or_empty(self.params.dt_host_status, self.params.host_status) + self.params.dt_service_status = self.common:ifnil_or_empty(self.params.dt_service_status, self.params.service_status) + self.params.enable_host_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_host_status_dedup, 0) + self.params.enable_service_status_dedup = self.common:check_boolean_number_option_syntax(self.params.enable_service_status_dedup, 0) + self.params.send_data_test = self.common:check_boolean_number_option_syntax(self.params.send_data_test, 0) + self.params.proxy_address = self.common:if_wrong_type(self.params.proxy_address, "string", "") + self.params.proxy_protocol = self.common:if_wrong_type(self.params.proxy_protocol, "string", "http") + self.params.proxy_port = self.common:if_wrong_type(self.params.proxy_port, "number", "") + self.params.proxy_username = self.common:if_wrong_type(self.params.proxy_username, "string", "") + self.params.proxy_password = self.common:if_wrong_type(self.params.proxy_password, "string", "") + self.params.connection_timeout = self.common:if_wrong_type(self.params.connection_timeout, "number", 60) + self.params.allow_insecure_connection = self.common:number_to_boolean(self.common:check_boolean_number_option_syntax(self.params.allow_insecure_connection, 0)) + self.params.logfile = self.common:ifnil_or_empty(self.params.logfile, "/var/log/centreon-broker/stream-connector.log") + self.params.log_level = self.common:ifnil_or_empty(self.params.log_level, 1) + self.params.log_curl_commands = self.common:check_boolean_number_option_syntax(self.params.log_curl_commands, 0) + self.params.use_long_output = self.common:check_boolean_number_option_syntax(self.params.use_longoutput, 1) + self.params.remove_line_break_in_output = self.common:check_boolean_number_option_syntax(self.params.remove_line_break_in_output, 1) + self.params.output_line_break_replacement_character = self.common:if_wrong_type(self.params.output_line_break_replacement_character, "string", " ") + self.params.metric_name_regex = self.common:if_wrong_type(self.params.metric_name_regex, "string", "") + self.params.metric_replacement_character = self.common:ifnil_or_empty(self.params.metric_replacement_character, "_") + self.params.output_size_limit = self.common:if_wrong_type(self.params.output_size_limit, "number", "") + + if self.params.accepted_hostgroups ~= '' and self.params.rejected_hostgroups ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_hostgroups and rejected_hostgroups cannot be used together. None will be used.") + end + if self.params.accepted_servicegroups ~= '' and self.params.rejected_servicegroups ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_servicegroups and rejected_servicegroups cannot be used together. None will be used.") + end + if self.params.accepted_bvs ~= '' and self.params.rejected_bvs ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_bvs and rejected_bvs cannot be used together. None will be used.") + end + if self.params.accepted_pollers ~= '' and self.params.rejected_pollers ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_pollers and rejected_pollers cannot be used together. None will be used.") + end + if self.params.accepted_authors ~= '' and self.params.rejected_authors ~= '' then + self.logger:error("[sc_params:check_params]: Parameters accepted_authors and rejected_authors cannot be used together. None will be used.") + end + + -- handle some dedicated parameters that can use lua pattern (such as accepted_hosts and accepted_services) + self:build_and_validate_filters_pattern({"accepted_hosts", "accepted_services"}) +end + +--- get_kafka_params: retrieve the kafka parameters and store them the self.params.kafka table +-- @param kafka_config (object) object instance of kafka_config +-- @param params (table) the list of parameters from broker web configuration +function ScParams:get_kafka_params(kafka_config, params) + for param_name, param_value in pairs(params) do + -- check if param starts with sc_kafka (meaning it is a parameter for kafka) + if string.find(param_name, "^_sc_kafka_") ~= nil then + -- remove the _sc_kafka_ prefix and store the param in a dedicated kafka table + kafka_config[string.gsub(param_name, "_sc_kafka_", "")] = param_value + self.logger:notice("[sc_param:get_kafka_params]: " .. tostring(param_name) + .. " parameter with value " .. tostring(param_value) .. " added to kafka_config") + end + end +end + +--- is_mandatory_config_set: check if the mandatory parameters required by a stream connector are set +-- @param mandatory_params (table) the list of mandatory parameters +-- @param params (table) the list of parameters from broker web configuration +-- @eturn true|false (boolean) +function ScParams:is_mandatory_config_set(mandatory_params, params) + for index, mandatory_param in ipairs(mandatory_params) do + if not params[mandatory_param] or params[mandatory_param] == "" then + self.logger:error("[sc_param:is_mandatory_config_set]: " .. tostring(mandatory_param) + .. " parameter is not set in the stream connector web configuration (or value is empty)") + return false + end + + -- add the mandatory param name in the list of the standard params and set its value to the user provided param value + self.params[mandatory_param] = params[mandatory_param] + end + + return true +end + +--- load_event_format_file: load a json file which purpose is to serve as a template to format events +-- @param json_string [opt] (boolean) convert template from a lua table to a json string +-- @return true|false (boolean) if file is a valid template file or not +function ScParams:load_event_format_file(json_string) + -- return if there is no file configured + if self.params.format_file == "" or self.params.format_file == nil then + return false + end + + local retval, content = self.common:load_json_file(self.params.format_file) + + -- return if we couldn't load the json file + if not retval then + return false + end + + -- initiate variables + local categories = self.params.bbdo.categories + local elements = self.params.bbdo.elements + local tpl_category + local tpl_element + + -- store format template in their appropriate category/element table + for cat_el, format in pairs(content) do + tpl_category, tpl_element = string.match(cat_el, "^(%w+)_(.*)") + + -- convert back to json if + if json_string then + format = broker.json_encode(format) + end + + self.params.format_template[categories[tpl_category].id][elements[tpl_element].id] = format + end + + return true +end + +--- load_custom_code_file: load a custom code which purpose is to enhance stream connectors possibilities without having to edit any standard code +-- @param file (string) the file that needs to be loaded (example: /etc/centreon-broker/sc-custom-code.lua) +-- @return true|false (boolean) if file is a valid custom code file or not +function ScParams:load_custom_code_file(custom_code_file) + -- return if there is no file configured + if self.params.custom_code_file == "" or self.params.custom_code_file == nil then + return true + end + + local file = io.open(custom_code_file, "r") + + -- return false if we can't open the file + if not file then + self.logger:error("[sc_params:load_custom_code_file]: couldn't open file " + .. tostring(custom_code_file) .. ". Make sure your file is there and that it is readable by centreon-broker") + return false + end + + -- get content of the file + local file_content = file:read("*a") + io.close(file) + + -- check if it returns self, true or self, false + for return_value in string.gmatch(file_content, "return (.-)\n") do + if return_value ~= "self, true" and return_value ~= "self, false" then + self.logger:error("[sc_params:load_custom_code_file]: your custom code file: " .. tostring(custom_code_file) + .. " is returning wrong values (" .. tostring(return_value) .. "). It must only return 'self, true' or 'self, false'") + return false + end + end + + -- check if it is valid lua code + local custom_code, error = loadfile(custom_code_file) + + if not custom_code then + self.logger:error("[sc_params:load_custom_code_file]: custom_code_file doesn't contain valid lua code. Error is: " .. tostring(error)) + return false + end + + self.params.custom_code = custom_code + return true +end + +function ScParams:build_accepted_elements_info() + local categories = self.params.bbdo.categories + self.params.accepted_elements_info = {} + + -- list all accepted elements + for _, accepted_element in ipairs(self.common:split(self.params.accepted_elements, ",")) do + -- try to find element in known categories + for category_name, category_info in pairs(categories) do + if self.params.element_mapping[category_info.id][accepted_element] then + -- if found, store information in a dedicated table + self.params.accepted_elements_info[accepted_element] = { + category_id = category_info.id, + category_name = category_name, + element_id = self.params.element_mapping[category_info.id][accepted_element], + element_name = accepted_element + } + end + end + end +end + +--- validate_pattern_param: check if paramater has a valid lua pattern +-- @param param_name (string) the name of the parameter +-- @param param_value (string) the Lua pattern to test +-- @return param_value (string) either the param value if pattern is valid, empty string otherwise +function ScParams:validate_pattern_param(param_name, param_value) + if not self.common:validate_pattern(param_value) then + self.logger:error("[sc_params:validate_pattern_param]: couldn't validate Lua pattern: " .. tostring(param_value) + .. " for parameter: " .. tostring(param_name) .. ". The filter will be reset to an empty value.") + return "" + end + + return param_value +end + +--- build_and_validate_filters_pattern: make sure lua patterns are valid and build a table of pattern according to the +-- @param param_list (table) a list of all parameters that must be checked. +--[[ + exemple: self.params.accepted_hosts value is "foo.*,.*bar.*" + this method will generate the following parameter + self.params.accepted_hosts_pattern_list = { + "foo.*", + ".*bar.*" + } +]]-- +function ScParams:build_and_validate_filters_pattern(param_list) + local temp_pattern_table + + -- we need to build a table containing all patterns for each filter compatible with this feature + for index, param_name in ipairs(param_list) do + self.params[param_name .. "_pattern_list"] = {} + + -- we try to split the pattern in multiple sub patterns if option is enabled + -- this option is here to overcome the lack of alternation operator ("|" character in POSIX regex) in Lua regex + if self.params[param_name .. "_enable_split_pattern"] == 1 then + temp_pattern_table = self.common:split(self.params[param_name], self.params[param_name .. "_split_character"]) + + for index, temp_pattern in ipairs(temp_pattern_table) do + -- each sub pattern must be a valid standalone pattern. We are not here to develop regex in Lua + if self.common:is_valid_pattern(temp_pattern) then + table.insert(self.params[param_name .. "_pattern_list"], temp_pattern) + self.logger:notice("[sc_params:build_accepted_filters_pattern]: adding " .. tostring(temp_pattern) + .. " to the list of filtering patterns for parameter: " .. param_name) + else + -- if the sub pattern is not valid, just ignore it + self.logger:error("[sc_params:build_accepted_filters_pattern]: ignoring pattern for param: " + .. param_name .. " because after splitting the string:" .. param_name + .. ", we end up with the following pattern: " .. tostring(temp_pattern) .. " which is not a valid Lua pattern") + end + end + else + table.insert(self.params[param_name .. "_pattern_list"], self.params[param_name]) + end + end +end + +return sc_params \ No newline at end of file diff --git a/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua b/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua new file mode 100644 index 00000000000..d82e076b397 --- /dev/null +++ b/stream-connectors/modules/centreon-stream-connectors-lib/sc_test.lua @@ -0,0 +1,31 @@ +#!/usr/bin/lua + +--- +-- Test module to help check modules reliability +-- @module sc_test +-- @alias sc_test + +local sc_test = {} + +function sc_test.compare_result(expected, result) + local state = '' + if expected == result then + state = '\27[32mOK\27[0m' + else + state = '\27[31mNOK\27[0m' + end + + return "[EXPECTED] " .. tostring(expected) .. " [RESULT] " .. tostring(result) .. ' ' .. state +end + +function sc_test.compare_tables(expected, result) + for i, v in pairs(expected) do + if v ~= result[i] then + return 'tables are not equal \27[31mNOK\27[0m' + end + end + + return 'tables are equal \27[32mOK\27[0m' +end + +return sc_test \ No newline at end of file diff --git a/stream-connectors/modules/docs/README.md b/stream-connectors/modules/docs/README.md new file mode 100644 index 00000000000..74dfdc050e8 --- /dev/null +++ b/stream-connectors/modules/docs/README.md @@ -0,0 +1,215 @@ +# Stream Connectors lib documentation + +- [Stream Connectors lib documentation](#stream-connectors-lib-documentation) + - [Libraries list](#libraries-list) + - [sc\_common methods](#sc_common-methods) + - [sc\_logger methods](#sc_logger-methods) + - [sc\_broker methods](#sc_broker-methods) + - [sc\_param methods](#sc_param-methods) + - [sc\_event methods](#sc_event-methods) + - [sc\_macros methods](#sc_macros-methods) + - [sc\_flush methods](#sc_flush-methods) + - [sc\_metrics methods](#sc_metrics-methods) + - [google.bigquery.bigquery methods](#googlebigquerybigquery-methods) + - [google.auth.oauth methods](#googleauthoauth-methods) + - [Additionnal documentations](#additionnal-documentations) + +## Libraries list + +| Lib name | Content | Usage | Documentation | +| ------------------------ | ------------------------------------------------ | ------------------------------------------------------------------------- | -------------------------------------------- | +| sc_common | basic methods for lua | you can use it when you want to simplify your code | [Documentation](sc_common.md) | +| sc_logger | methods that handle logging with centreon broker | When you want to log a message from your stream connector | [Documentation](sc_logger.md) | +| sc_broker | wrapper methods for broker cache | when you need something from the broker cache | [Documentation](sc_broker.md) | +| sc_param | handles parameters for stream connectors | when you want to initiate a stream connector with all standard parameters | [Documentation](sc_param.md) | +| sc_event | methods to help you interact with a broker event | when you want to check event data | [Documentation](sc_event.md) | +| sc_macros | methods to help you convert macros | when you want to use macros in your stream connector | [Documentation](sc_macros.md) | +| sc_flush | methods to help you handle queues of event | when you want to flush queues of various kind of events | [Documentation](sc_flush.md) | +| sc_metrics | methods to help you handle metrics | when you want to send metrics and not just events | [Documentation](sc_metrics.md) | +| google.bigquery.bigquery | methods to help you handle bigquery data | when you want to generate tables schema for bigquery | [Documentation](google/bigquery/bigquery.md) | +| google.auth.oauth | methods to help you authenticate to google api | when you want to authenticate yourself on the google api | [Documentation](google/auth/oauth.md) | + +## sc_common methods + +| Method name | Method description | Link | +| ---------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| ifnil_or_empty | check if a variable is empty or nil and replace it with a default value if it is the case | [Documentation](sc_common.md#ifnil_or_empty-method) | +| if_wrong_type | check the type of a variable, if it is wrong, replace the variable with a default value | [Documentation](sc_common.md#if_wrong_type-method) | +| boolean_to_number | change a true/false boolean to a 1/0 value | [Documentation](sc_common.md#boolean_to_number-method) | +| number_to_boolean | change a 0/1 number to a false/true value | [Documentation](sc_common.md#number_to_boolean-method) | +| check_boolean_number_option_syntax | make sure that a boolean is 0 or 1, if that's not the case, replace it with a default value | [Documentation](sc_common.md#check_boolean_number_option_syntax-method) | +| split | split a string using a separator (default is ",") and store each part in a table | [Documentation](sc_common.md#split-method) | +| compare_numbers | compare two numbers using the given mathematical operator and return true or false | [Documentation](sc_common.md#compare_numbers-method) | +| generate_postfield_param_string | convert a table of parameters into an url encoded parameters string | [Documentation](sc_common.md#generate_postfield_param_string-method) | +| load_json_file | method loads a json file and parse it | [Documentation](sc_common.md#load_json_file-method) | +| json_escape | escape json characters in a string | [Documentation](sc_common.md#json_escape-method) | +| xml_escape | escape xml characters in a string | [Documentation](sc_common.md#xml_escape-method) | +| lua_regex_escape | escape lua regex special characters in a string | [Documentation](sc_common.md#lua_regex_escape-method) | +| dumper | dump any variable for debug purpose | [Documentation](sc_common.md#dumper-method) | +| trim | trim spaces (or provided character) at the beginning and the end of a string | [Documentation](sc_common.md#trim-method) | +| get_bbdo_version | returns the first digit of the bbdo protocol version | [Documentation](sc_common.md#get_bbdo_version-method) | +| is_valid_pattern | check if a Lua pattern is valid | [Documentation](sc_common.md#is_valid_pattern-method) | + +## sc_logger methods + +| Method name | Method description | Link | +| ---------------- | ----------------------------------------------------- | ----------------------------------------------------- | +| error | write an error message in the log file | [Documentation](sc_logger.md#error-method) | +| warning | write a warning message in the log file | [Documentation](sc_logger.md#warning-method) | +| notice | write a notice/info message in the log file | [Documentation](sc_logger.md#notice-method) | +| info | write an info message in the log file | [Documentation](sc_logger.md#info-method) | +| debug | write a debug message in the log file | [Documentation](sc_logger.md#debug-method) | +| log_curl_command | creates and log a curl command using given parameters | [Documentation](sc_logger.md#log_curl_command-method) | + +## sc_broker methods + +| Method name | Method description | Link | +| --------------------- | -------------------------------------------------------------------------------- | ---------------------------------------------------------- | +| get_host_all_infos | retrieve all informations about a host from the broker cache | [Documentation](sc_broker.md#get_host_all_infos-method) | +| get_service_all_infos | retrieve all informations about a service from the broker cache | [Documentation](sc_broker.md#get_service_all_infos-method) | +| get_host_infos | retrieve one or more specific informations about a host from the broker cache | [Documentation](sc_broker.md#get_host_infos-method) | +| get_service_infos | retrieve one or more specific informations about a service from the broker cache | [Documentation](sc_broker.md#get_service_infos-method) | +| get_hostgroups | retrieve the hostgroups linked to a host from the broker cache | [Documentation](sc_broker.md#get_hostgroups-method) | +| get_servicegroups | retrieve the servicegroups linked to a service from the broker cache | [Documentation](sc_broker.md#get_servicegroups-method) | +| get_severity | retrieve the severity of a host or a service from the broker cache | [Documentation](sc_broker.md#get_severity-method) | +| get_instance | retrieve the name of the poller using the instance id from the broker cache | [Documentation](sc_broker.md#get_instance-method) | +| get_ba_infos | retrieve the name and description of a BA from the broker cache | [Documentation](sc_broker.md#get_ba_infos-method) | +| get_bvs_infos | retrieve the name and description of all BV linked to a BA | [Documentation](sc_broker.md#get_bvs_infos-method) | + +## sc_param methods + +| Method name | Method description | Link | +| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | +| param_override | replace default values of params with the ones provided by users in the web configuration of the stream connector | [Documentation](sc_param.md#param_override-method) | +| check_params | make sure that the default stream connectors params provided by the user from the web configuration are valid. If not, uses the default value | [Documentation](sc_param.md#check_params-method) | +| is_mandatory_config_set | check that all mandatory parameters for a stream connector are set | [Documentation](sc_param.md#is_mandatory_config_set-method) | +| get_kafka_params | retreive Kafka dedicated parameters from the parameter list and put them in the provided kafka_config object | [Documentation](sc_param.md#get_kafka_params-method) | +| load_event_format_file | load a file that serves as a template for formatting events | [Documentation](sc_param.md#load_event_format_file-method) | +| build_accepted_elements_info | build a table that store information about accepted elements | [Documentation](sc_param.md#build_accepted_elements_info-method) | +| validate_pattern_param | check if a parameter has a valid Lua pattern as a value | [Documentation](sc_param.md#validate_pattern_param-method) | +| build_and_validate_filters_pattern | build a table that stores information about patterns for compatible parameters | [Documentation](sc_param.md#build_and_validate_filters_pattern-method) | + +## sc_event methods + +| Method name | Method description | Link | +| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------- | +| is_valid_category | check if the category of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_category-method) | +| is_valid_element | check if the element of the event is accepted according to the stream connector params | [Documentation](sc_event.md#is_valid_element-method) | +| is_valid_event | check if the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event-method) | +| is_valid_neb_event | check if the neb event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_neb_event-method) | +| is_valid_host_status_event | check the "host status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host_status_event-method) | +| is_valid_service_status_event | check the "servce status" event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service_status_event-method) | +| is_valid_host | check if the host name and/or ID are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_host-method) | +| is_valid_service | check if the service description and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_service-method) | +| is_valid_event_states | check if the state (HARD/SOFT), acknowledgement state and downtime state are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_states-method) | +| is_valid_event_status | check if the status (OK, DOWN...) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_status-method) | +| is_valid_event_state_type | check if the state (HARD/SOFT) of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_state_type-method) | +| is_valid_event_acknowledge_state | check if the acknowledgement state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_acknowledge_state-method) | +| is_valid_event_downtime_state | check if the downtime state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_downtime_state-method) | +| is_valid_event_flapping_state | check if the flapping state of the event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_event_flapping_state-method) | +| is_valid_hostgroup | check if the host is in an accepted hostgroup according to the stream connector params | [Documentation](sc_event.md#is_valid_hostgroup-method) | +| find_hostgroup_in_list | check if one of the hostgroups of the event is in the list of accepted hostgroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_hostgroup_in_list-method) | +| is_valid_servicegroup | check if the service is in an accepted servicegroup according to the stream connector params | [Documentation](sc_event.md#is_valid_servicegroup-method) | +| find_servicegroup_in_list | check if one of the servicegroups of the event is in the list of accepted servicegroups provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_servicegroup_in_list-method) | +| is_valid_bam_event | check if the BAM event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_bam_event-method) | +| is_valid_ba | check if the BA name and/or ID are are valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba-method) | +| is_valid_ba_status_event | check if the "ba status" (OK, WARNING, CRITICAL) event is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_status_event-method) | +| is_valid_ba_downtime_state | check if the BA downtime state is valid according to the stream connector params | [Documentation](sc_event.md#is_valid_ba_downtime_state-method) | +| is_valid_ba_acknowledge_state | DOES NOTHING | [Documentation](sc_event.md#is_valid_ba_acknowledge_state-method) | +| is_valid_bv | check if the BA is in an accepted BV according to the stream connector params | [Documentation](sc_event.md#is_valid_bv-method) | +| find_bv_in_list | check if one of the BV of the event is in the list of accepted BV provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_bv_in_list-method) | +| is_valid_poller | check if the host is monitored from an accepted poller according to the stream connector params | [Documentation](sc_event.md#is_valid_poller-method) | +| find_poller_in_list | check if the poller that monitores the host is in the list of accepted pollers provided in the stream connector configuration. Stops at first match | [Documentation](sc_event.md#find_poller_in_list-method) | +| is_valid_host_severity | check if a host has a valid severity | [Documentation](sc_event.md#is_valid_host_severity-method) | +| is_valid_service_severity | check if a service has a valid severity | [Documentation](sc_event.md#is_valid_service_severity-method) | +| is_valid_acknowledgement_event | check if the acknowledgement event is valid | [Documentation](sc_event.md#is_valid_acknowledgement_event-method) | +| is_valid_author | check if the author of a comment is accepted | [Documentation](sc_event.md#is_valid_author-method) | +| is_valid_downtime_event | check if the downtime event is valid | [Documentation](sc_event.md#is_valid_downtime_event-method) | +| is_host_status_event_duplicated | check if the host_status event is duplicated | [Documentation](sc_event.md#is_host_status_event_duplicated-method) | +| is_service_status_event_duplicated | check if the service_status event is duplicated | [Documentation](sc_event.md#is_service_status_event_duplicated-method) | +| is_downtime_event_useless | checks if the downtime event is a usefull one. Meaning that it carries valuable data regarding the actual end or start of the downtime | [Documentation](sc_event.md#is_downtime_event_useless-method) | +| is_valid_downtime_event_start | checks that the downtime event is about the actual start of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_start-method) | +| is_valid_downtime_event_end | checks that the downtime event is about the actual end of the downtime | [Documentation](sc_event.md#is_valid_downtime_event_end-method) | +| is_valid_storage_event | DO NOTHING (deprecated, you should use neb event to send metrics) | [Documentation](sc_event.md#is_valid_storage_event-method) | + +## sc_macros methods + +| Method name | Method description | Link | +| ------------------------------------------------ | --------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| replace_sc_macro | replace a stream connector macro with its value | [Documentation](sc_macros.md#replace_sc_macro-method) | +| get_cache_macro | retrieve a macro value in the cache | [Documentation](sc_macros.md#get_cache_macro-method) | +| get_event_macro | retrieve a macro value in the event | [Documentation](sc_macros.md#get_event_macro-method) | +| get_group_macro | retrieve a macro from groups (hostgroups, servicegroups, business views) | [Documentation](sc_macros.md#get_group_macro-method) | +| convert_centreon_macro | replace a Centreon macro with its value | [Documentation](sc_macros.md#convert_centreon_macro-method) | +| get_centreon_macro | transform a Centreon macro into a stream connector macro | [Documentation](sc_macros.md#get_centreon_macro-method) | +| get_transform_flag | try to find a transformation flag in the macro name | [Documentation](sc_macros.md#get_transform_flag-method) | +| transform_date | transform a timestamp into a human readable format | [Documentation](sc_macros.md#transform_date-method) | +| transform_short | keep the first line of a string | [Documentation](sc_macros.md#transform_short-method) | +| transform_type | convert 0 or 1 into SOFT or HARD | [Documentation](sc_macros.md#transform_type-method) | +| transform_state | convert a status code into its matching human readable status (OK, WARNING...) | [Documentation](sc_macros.md#transform_state-method) | +| transform_number | convert a string into a number | [Documentation](sc_macros.md#transform_number-method) | +| transform_string | convert anything into a string | [Documentation](sc_macros.md#transform_string-method) | +| get_hg_macro | retrieves hostgroup information and make it available as a macro | [Documentation](sc_macros.md#get_hg_macro-method) | +| get_sg_macro | retrieves servicegroup information and make it available as a macro | [Documentation](sc_macros.md#get_sg_macro-method) | +| get_bv_macro | retrieves business view information and make it available as a macro | [Documentation](sc_macros.md#get_bv_macro-method) | +| build_group_macro_value | build the value that must replace the macro (it will also put it in the desired format) | [Documentation](sc_macros.md#build_group_macro_value-method) | +| group_macro_format_table | transforms the given macro value into a table | [Documentation](sc_macros.md#group_macro_format_table-method) | +| group_macro_format_inline | transforms the give macro value into a string with values separated using comas | [Documentation](sc_macros.md#group_macro_format_inline-method) | +| build_converted_string_for_cache_and_event_macro | replace event or cache macro in a string that may contain them | [Documentation](sc_macros.md#build_converted_string_for_cache_and_event_macro-method) | + +## sc_flush methods + +| Method name | Method description | Link | +| ------------------------- | ---------------------------------------------------------------------- | ------------------------------------------------------------- | +| add_queue_metadata | add specific metadata to a queue | [Documentation](sc_flush.md#add_queue_metadata-method) | +| flush_all_queues | try to flush all queues according to accepted elements | [Documentation](sc_flush.md#flush_all_queues-method) | +| reset_all_queues | put all queues back to their initial state after flushing their events | [Documentation](sc_flush.md#reset_all_queues-method) | +| get_queues_size | get the number of events stored in all the queues | [Documentation](sc_flush.md#get_queues_size-method) | +| flush_mixed_payload | flush a payload that contains various type of events | [Documentation](sc_flush.md#flush_mixed_payload-method) | +| flush_homogeneous_payload | flush a payload that contains a single type of events | [Documentation](sc_flush.md#flush_homogeneous_payload-method) | +| flush_payload | flush a payload | [Documentation](sc_flush.md#flush_payload-method) | + +## sc_metrics methods + +| Method name | Method description | Link | +| ----------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------- | +| is_valid_bbdo_element | checks if the event is in an accepted category and is an appropriate element | [Documentation](sc_metrics.md#is_valid_bbdo_element-method) | +| is_valid_metric_event | makes sure that the metric event is valid if it is a **host, service, service_status or kpi_event** event | [Documentation](sc_metrics.md#is_valid_metric_event-method) | +| is_valid_host_metric_event | makes sure that the metric event is valid host metric event | [Documentation](sc_metrics.md#is_valid_host_metric_event-method) | +| is_valid_service_metric_event | makes sure that the metric event is valid service metric event | [Documentation](sc_metrics.md#is_valid_service_metric_event-method) | +| is_valid_kpi_metric_event | makes sure that the metric event is valid KPI metric event | [Documentation](sc_metrics.md#is_valid_kpi_metric_event-method) | +| is_valid_perfdata | makes sure that the performance data is valid | [Documentation](sc_metrics.md#is_valid_perfdata-method) | +| build_metric | use the stream connector format method to parse every metric in the event | [Documentation](sc_metrics.md#build_metric-method) | + +## google.bigquery.bigquery methods + +| Method name | Method description | Link | +| ---------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------------------------- | +| get_tables_schema | create all tables schema depending on the configuration | [Documentation](google/bigquery/bigquery.md#get_tables_schema-method) | +| default_host_table_schema | create the default table schema for host_status events | [Documentation](google/bigquery/bigquery.md#default_host_table_schema-method) | +| default_service_table_schema | create the default table schema for service_status events | [Documentation](google/bigquery/bigquery.md#default_service_table_schema-method) | +| default_ack_table_schema | create the default table schema for acknowledgement events | [Documentation](google/bigquery/bigquery.md#default_ack_table_schema-method) | +| default_dt_table_schema | create the default table schema for downtime events | [Documentation](google/bigquery/bigquery.md#default_dt_table_schema-method) | +| default_ba_table_schema | create the default table schema for ba_status events | [Documentation](google/bigquery/bigquery.md#default_ba_table_schema-method) | +| load_tables_schema_file | create tables schema based on a json file | [Documentation](google/bigquery/bigquery.md#load_tables_schema_file-method) | +| build_table_schema | create tables schema based on stream connector parameters | [Documentation](google/bigquery/bigquery.md#build_table_schema-method) | + +## google.auth.oauth methods + +| Method name | Method description | Link | +| ---------------- | ------------------------------------------- | ------------------------------------------------------------- | +| create_jwt_token | create a jwt token | [Documentation](google/auth/oauth.md#create_jwt_token-method) | +| get_key_file | retrieve information from a key file | [Documentation](google/auth/oauth.md#get_key_file-method) | +| create_jwt_claim | create the claim for the jwt token | [Documentation](google/auth/oauth.md#create_jwt_claim-method) | +| create_signature | create the signature for the jwt token | [Documentation](google/auth/oauth.md#create_signature-method) | +| get_access_token | get a google access token using a jwt token | [Documentation](google/auth/oauth.md#get_access_token-method) | +| curl_google | use curl to get an access token | [Documentation](google/auth/oauth.md#curl_google-method) | + +## Additionnal documentations + +| Description | Link | +| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| learn how to create a custom format using a format file | [Documentation](./templating.md) | +| learn how to create custom code for your stream connector | [Documentation](./custom_code.md) | +| have a look at all the available mappings and how to use them | [Documentation](./mappings.md) | +| have a look at the event structure | [Documentation](./broker_data_structure.md) and [Documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/) | diff --git a/stream-connectors/modules/docs/broker_data_structure.md b/stream-connectors/modules/docs/broker_data_structure.md new file mode 100644 index 00000000000..c0864a3db4e --- /dev/null +++ b/stream-connectors/modules/docs/broker_data_structure.md @@ -0,0 +1,208 @@ +# Broker data structure documentation + +- [Broker data structure documentation](#broker-data-structure-documentation) + - [Introduction](#introduction) + - [NEB Category](#neb-category) + - [Service_status](#service_status) + - [Host_status](#host_status) + - [Downtime](#downtime) + - [Downtime actual start](#downtime-actual-start) + - [Downtime actual end](#downtime-actual-end) + - [Acknowledgements](#acknowledgements) + - [Acknowledgement actual start](#acknowledgement-actual-start) + - [Acknowledgement actual end](#acknowledgement-actual-end) + +## Introduction + +The purpose of this documentation is to provide a quick overview of what data structure you should expect from a broker event. +This documentation will not explain the meaning of the structures. It is mostly a guide to help writing centreon lua modules and stream connectors + +## NEB Category + +### Service_status + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#service-status) + +| index | type | +| ------------------------ | ------- | +| acknowledged | boolean | +| acknowledgement_type | number | +| active_checks | boolean | +| category | number | +| check_attempt | number | +| check_command | string | +| check_interval | number | +| check_period | string | +| check_type | number | +| checked | boolean | +| element | number | +| enabled | boolean | +| event_handler | string | +| event_handler_enabled | boolean | +| execution_time | number | +| flap_detection | boolean | +| flapping | boolean | +| host_id | number | +| last_check | number | +| last_hard_state | number | +| last_hard_state_change | number | +| last_state_change | number | +| last_time_up | number | +| last_update | number | +| latency | number | +| max_check_attempts | number | +| next_check | number | +| no_more_notifications | boolean | +| notification_number | number | +| notify | boolean | +| obsess_over_host | boolean | +| output | string | +| passive_checks | boolean | +| percent_state_change | number | +| perfdata | string | +| retry_interval | number | +| scheduled_downtime_depth | number | +| should_be_scheduled | boolean | +| state | number | +| state_type | number | + +### Host_status + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#host-status) + +| index | type | +| ------------------------ | ------- | +| acknowledged | boolean | +| acknowledgement_type | number | +| active_checks | boolean | +| category | number | +| check_attempt | number | +| check_command | string | +| check_interval | number | +| check_period | string | +| check_type | number | +| checked | boolean | +| element | number | +| enabled | boolean | +| event_handler | string | +| event_handler_enabled | boolean | +| execution_time | number | +| flap_detection | boolean | +| flapping | boolean | +| host_id | number | +| last_check | number | +| last_hard_state | number | +| last_hard_state_change | number | +| last_state_change | number | +| last_time_ok | number | +| last_update | number | +| latency | number | +| max_check_attempts | number | +| next_check | number | +| no_more_notifications | boolean | +| notification_number | number | +| notify | boolean | +| obsess_over_service | boolean | +| output | string | +| passive_checks | boolean | +| percent_state_change | number | +| perfdata | string | +| retry_interval | number | +| scheduled_downtime_depth | number | +| service_id | number | +| should_be_scheduled | boolean | +| state | number | +| state_type | number | + +### Downtime + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#downtime) + +if you are using the [**is_valid_downtime_event method**](sc_event.md#is_valid_downtime_event-method) you'll also have access to a `state` index that will give you the status code of the host or service and a `cache` table. + +#### Downtime actual start + +| index | type | +| ----------------- | ------- | +| actual_start_time | number | +| author | string | +| cancelled | boolean | +| category | number | +| comment_data | string | +| duration | number | +| element | number | +| end_time | number | +| entry_time | number | +| fixed | boolean | +| host_id | number | +| instance_id | number | +| internal_id | number | +| service_id | number | +| start_time | number | +| started | boolean | +| type | number | + +#### Downtime actual end + +| index | type | +| ----------------- | ------- | +| actual_end_time | number | +| actual_start_time | number | +| author | string | +| cancelled | boolean | +| category | number | +| comment_data | string | +| deletion_time | number | +| duration | number | +| element | number | +| end_time | number | +| entry_time | number | +| fixed | boolean | +| host_id | number | +| instance_id | number | +| internal_id | number | +| service_id | number | +| start_time | number | +| started | boolean | +| type | number | + +### Acknowledgements + +[BBDO documentation](https://docs.centreon.com/current/en/developer/developer-broker-mapping.html#acknowledgement) + +#### Acknowledgement actual start + +| index | type | +| ------------------ | ------- | +| author | string | +| category | number | +| comment_data | string | +| element | number | +| entry_time | number | +| host_id | number | +| instance_id | number | +| notify_contacts | boolean | +| persistent_comment | boolean | +| service_id | number | +| state | number | +| sticky | boolean | +| type | number | + +#### Acknowledgement actual end + +| index | type | +| ------------------ | ------- | +| author | string | +| category | number | +| comment_data | string | +| deletion_time | number | +| element | number | +| entry_time | number | +| host_id | number | +| instance_id | number | +| notify_contacts | boolean | +| persistent_comment | boolean | +| service_id | number | +| state | number | +| sticky | boolean | +| type | number | diff --git a/stream-connectors/modules/docs/custom_code.md b/stream-connectors/modules/docs/custom_code.md new file mode 100644 index 00000000000..9e74aaf5afd --- /dev/null +++ b/stream-connectors/modules/docs/custom_code.md @@ -0,0 +1,190 @@ +# Stream connectors and custom code + +- [Stream connectors and custom code](#stream-connectors-and-custom-code) + - [Introduction](#introduction) + - [When is it needed?](#when-is-it-needed) + - [How to configure your stream connector](#how-to-configure-your-stream-connector) + - [Mandatory code](#mandatory-code) + - [Available data for your custom code](#available-data-for-your-custom-code) + - [Macros, templating and custom code](#macros-templating-and-custom-code) + - [Filter events](#filter-events) + - [Use all the above chapters](#use-all-the-above-chapters) + - [Add methods from other modules](#add-methods-from-other-modules) + - [Add custom macros](#add-custom-macros) + +## Introduction + +Stream connectors offer the possibility to write custom code. The idea is to let people fully customize how their stream connector behave while still using the Centreon standard stream connector. +Thanks to this feature, you will no longer have a customized stream connector and you will not fear updating it to get access to the latest features. + +## When is it needed? + +It is needed in two cases (mostly) + +- you need more filters than the default one. For example you want to filter out hosts that do not have *notes* +- you need to add data to your event payload + +## How to configure your stream connector + +In your stream connector configuration (broker output), you can add the following option + +| option name | value | type | +| ---------------- | --------------------------------------- | ------ | +| custom_code_file | /etc/centreon-broker/my-custom-code.lua | string | + +## Mandatory code + +Your custom code must respect three rules if you want it to work. + +It must starts with + +```lua +local self = ... +``` + +It must ends with a return the self variable and a boolean followed by a new line. + +```lua +return self, true +-- new line after true +``` + +you can't do: + +```lua +-- ✘ bad, no space after the coma +return self,true +-- new line after true +``` + +nor + +```lua +-- ✘ bad, no new line after the return line +return self, true -- no new line after true +``` + +## Available data for your custom code + +Everything has been made to grant you access to all the useful information. It means that you can: + +- access the [params table](sc_param.md#default-parameters) and the parameters that are dedicated to the stream connector that you are using +- access the [event table](broker_data_structure.md) (you can also take a look at our [broker documentation](https://docs.centreon.com/docs/developer/developer-broker-mapping/)) +- access all the methods from: [event module](sc_event.md), [params module](sc_param.md), [logger module](sc_logger.md), [common module](sc_common.md), [broker module](sc_broker.md) and if you are using a metric stream connector [metrics module](sc_metrics.md) +- access all the broker daemon methods that are listed [here](https://docs.centreon.com/docs/developer/developer-broker-stream-connector/#the-broker-table) + +## Macros, templating and custom code + +Since stream connectors have been thought to be highly customizable, we have made a tool to change the data that you are sending. To do so, you use a custom format file ([documentation](templating.md)). In this file you can use macros ([documentation](sc_macros.md)). + +By using custom code you can create your own macros and it is very easy to do! Let's take a look at that. + +```lua +local self = ... + +self.event.my_custom_macro = "my custom macro value" + +return self, true +-- new line after true +``` + +Thanks to the above code, we are now able to use `{my_custom_macro}` as a new macro. And it will be replaced by the string `my custom macro value`. + +To sum up what we have seen. Just add a new entry in the `self.event` table. It is going to be the name of you custom macro and that is it. + +## Filter events + +As explained [at the beginning](#when-is-it-needed), you can add your own filters to your data. Find below a rundown of the major steps that are done when using a stream connector + +1. stream connector init (only done on cbd reload or restart) +2. filter events +3. format event +4. put event in a queue +5. send all events stored in the queue + +The second step has a set of filters but they may not be enough for you. This is where a custom code file can be useful. + +Let's keep our idea of filtering events with hosts that do not have **notes** and see what it will looks like with real code + +```lua +local self = ... + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +## Use all the above chapters + +### Add methods from other modules + +What if we start logging what our custom code does? To do so, we can use [the warning method](sc_logger.md#warning-method) + +```lua +local self = ... + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method of from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +Maybe you want a closer look at what is inside the `self.event` table. To do so, we can dump it in our logfile using [the Dumper method](sc_common.md#dumper-method) + +```lua +local self = ... + +-- we dump the event table to have a closer look to all the available data from the event itself +-- and all the things that are in the cache that we may want to use +self.sc_logger:notice("[custom_code]: self.event table data: " .. self.sc_common:dumper(self.event)) + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` + +### Add custom macros + +```lua +local self = ... + +-- we dump the event table to have a closer look to all the available data from the event itself +-- and all the things that are in the cache that we may want to use +self.sc_logger:notice("[custom_code]: self.event table data: " .. self.sc_common:dumper(self.event)) + +if not self.event.cache.host.notes or self.event.cache.host.notes == "" then + -- use the warning method from the logger module + self.sc_logger:warning("[custom_code]: host: " + .. tostring(self.event.cache.host.name) .. " do not have notes, therefore, we drop the event") + -- the boolean part of the return is here to tell the stream connector to ignore the event + return self, false +end + +-- let say we can extract the origin of our host by using the first three letters of its name +self.event.origin = string.sub(tostring(self.event.cache.host.name), 1, 3) +-- we now have a custom macro called {origin} + +-- if the host has a note then we let the stream connector continue his work on this event +return self, true +-- new line after true +``` diff --git a/stream-connectors/modules/docs/google/auth/oauth.md b/stream-connectors/modules/docs/google/auth/oauth.md new file mode 100644 index 00000000000..fae2476084a --- /dev/null +++ b/stream-connectors/modules/docs/google/auth/oauth.md @@ -0,0 +1,217 @@ +# Documentation of the google oauth module + +- [Documentation of the google oauth module](#documentation-of-the-google-oauth-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [create_jwt_token method](#create_jwt_token-method) + - [create_jwt_token: returns](#create_jwt_token-returns) + - [create_jwt_token: example](#create_jwt_token-example) + - [get_key_file method](#get_key_file-method) + - [get_key_file: returns](#get_key_file-returns) + - [get_key_file: example](#get_key_file-example) + - [create_jwt_claim method](#create_jwt_claim-method) + - [create_jwt_claim: returns](#create_jwt_claim-returns) + - [create_jwt_claim: example](#create_jwt_claim-example) + - [create_signature method](#create_signature-method) + - [create_signature: returns](#create_signature-returns) + - [create_signature: example](#create_signature-example) + - [get_access_token method](#get_access_token-method) + - [get_access_token: returns](#get_access_token-returns) + - [get_access_token: example](#get_access_token-example) + - [curl_google method](#curl_google-method) + - [curl_google: parameters](#curl_google-parameters) + - [curl_google: returns](#curl_google-returns) + - [curl_google: example](#curl_google-example) + +## Introduction + +The google oauth module provides methods to help with google api authentication. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with three parameters, if the third one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_common. This is an instance of the sc_common module +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local oauth = require("centreon-stream-connecotrs-lib.google.auth.oauth") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_common and sc_logger module +local test_logger = sc_logger.new(logfile, severity) +local test_common = sc_common.new(test_logger) + +-- some stream connector params +local params = { + my_param = "my_value" +} + +-- create a new instance of the google oauth param module +local test_oauth = oauth.new(params, test_common, test_logger) +``` + +## create_jwt_token method + +The **create_jwt_token** method create a jwt token. More information about the google JWT token [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +head over the following chapters for more information + +- [get_key_file](#get_key_file-method) +- [create_jwt_claim](#create_jwt_claim-method) +- [create_signature](#create_signature-method) + +### create_jwt_token: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------------------------------------------- | +| true or false | boolean | yes | true if jwt token is created, false otherwise | + +### create_jwt_token: example + +```lua + +local result = test_oauth:create_jwt_token() +--> result is true or false +--> jwt token is stored in test_oauth.jwt_token if result is true +``` + +## get_key_file method + +The **get_key_file** method get information set in the key file. To do so, the **key_file_path** parameter must be set. + +### get_key_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------------- | +| true or false | boolean | yes | true if key file information is retrieved, false otherwise | + +### get_key_file: example + +```lua + +local result = test_oauth:get_key_file() +--> result is true or false +--> key file data is stored in test_oauth.key_table if result is true +``` + +## create_jwt_claim method + +The **create_jwt_claim** method create the claim for a jwt token. To do so, the **scope_list** and **project_id** paramters must be set. More information about the google JWT token and claim [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +### create_jwt_claim: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------------------------------------------- | +| true or false | boolean | yes | true if jwt claim is created, false otherwise | + +### create_jwt_claim: example + +```lua + +local result = test_oauth:create_jwt_claim() +--> result is true or false +--> jwt token is stored in test_oauth.jwt_claim if result is true +``` + +## create_signature method + +The **create_signature** method create the signature of the JWT claim and JWT header. To match google needs, the hash protocol used is **sha256WithRSAEncryption**. More information about the google JWT token [here](https://developers.google.com/identity/protocols/oauth2/service-account#authorizingrequests) + +### create_signature: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------- | +| true or false | boolean | yes | true if the signature has been done, false otherwise | + +### create_signature: example + +```lua + +local result = test_oauth:create_signature() +--> result is true or false +--> signature is stored in test_oauth.signature if result is true +``` + +## get_access_token method + +The **get_access_token** method get an access token from the google api using a jwt token. It will use an existing one if it founds one. Access token life span is one hour. This method will generate a new one if the access token is at least 59 minutes old. To generate a new access token, this method will need to create a new jwt token. + +head over the following chapters for more information + +- [get_jwt_token](#get_jwt_token-method) + +### get_access_token: returns + +| return | type | always | condition | +| ------------ | ------- | ------ | ------------------------------- | +| false | boolean | no | if it can't get an access token | +| access_token | string | no | if it can get an access token | + +### get_access_token: example + +```lua + +local result = test_oauth:get_access_token() +--> result is "dzadz93213daznc321OGRK" or false if access token is not retrieved +``` + +## curl_google method + +The **curl_google** method send data to the google api for authentication. + +### curl_google: parameters + +| parameter | type | optional | default value | +| ---------------------------- | ------ | -------- | ------------- | +| the url of the google api | string | no | | +| the curl headers | table | no | | +| data that needs to be posted | string | yes | | + +### curl_google: returns + +| return | type | always | condition | +| --------------- | ------- | ------ | ------------------------------------------------------------------------ | +| false | boolean | no | if it can't get an access token http code 200 or can't access google api | +| result from api | string | no | if the query went well | + +### curl_google: example + +```lua +-- set up headers +local headers = { + 'Content-Type: application/x-www-form-urlencoded' +} + +-- set up data +local data = { + grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer", + assertion = test_oauth.jwt_token +} + +-- set up url +local url = test_oauth.key_table.uri + +-- convert data so it can be sent as url parameters +local url_encoded_data = test_common:generate_postfield_param_string(data) + +local result = test_oauth:curl_google() +--> result false or data (should be json most of the time) +``` diff --git a/stream-connectors/modules/docs/google/bigquery/bigquery.md b/stream-connectors/modules/docs/google/bigquery/bigquery.md new file mode 100644 index 00000000000..c1d1f956cdb --- /dev/null +++ b/stream-connectors/modules/docs/google/bigquery/bigquery.md @@ -0,0 +1,334 @@ +# Documentation of the google bigquery module + +- [Documentation of the google bigquery module](#documentation-of-the-google-bigquery-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [get_tables_schema method](#get_tables_schema-method) + - [get_tables_schema: returns](#get_tables_schema-returns) + - [get_tables_schema: example](#get_tables_schema-example) + - [default_host_table_schema method](#default_host_table_schema-method) + - [default_host_table_schema: returns](#default_host_table_schema-returns) + - [default_host_table_schema: example](#default_host_table_schema-example) + - [default_service_table_schema method](#default_service_table_schema-method) + - [default_service_table_schema: returns](#default_service_table_schema-returns) + - [default_service_table_schema: example](#default_service_table_schema-example) + - [default_ack_table_schema method](#default_ack_table_schema-method) + - [default_ack_table_schema: returns](#default_ack_table_schema-returns) + - [default_ack_table_schema: example](#default_ack_table_schema-example) + - [default_dt_table_schema method](#default_dt_table_schema-method) + - [default_dt_table_schema: returns](#default_dt_table_schema-returns) + - [default_dt_table_schema: example](#default_dt_table_schema-example) + - [default_ba_table_schema method](#default_ba_table_schema-method) + - [default_ba_table_schema: returns](#default_ba_table_schema-returns) + - [default_ba_table_schema: example](#default_ba_table_schema-example) + - [load_tables_schema_file method](#load_tables_schema_file-method) + - [load_tables_schema_file: returns](#load_tables_schema_file-returns) + - [load_tables_schema_file: example](#load_tables_schema_file-example) + - [build_table_schema method](#build_table_schema-method) + - [build_table_schema: parameters](#build_table_schema-parameters) + - [build_table_schema: example](#build_table_schema-example) + +## Introduction + +The google bigquery module provides methods to handle table schemas . It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameters, if the second one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_bq = require("centreon-stream-connecotrs-lib.google.bigquery.bigquery") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- some stream connector params +local params = { + my_param = "my_value" +} + +-- create a new instance of the google bigquery module +local test_bq = sc_bq.new(params, test_logger) +``` + +## get_tables_schema method + +The **get_tables_schema** method retrieves the schemas for host_status, service_status, downtime, acknowledgement and BA events. Depending on the configuration, it creates them from a default configuration, a JSON configuration file or straight from the stream connector parameters + +head over the following chapters for more information + +For the default tables schema that are provided: + +- [default_host_table_schema](#default_host_table_schema-method) +- [default_service_table_schema](#default_service_table_schema-method) +- [default_ack_table_schema](#default_ack_table_schema-method) +- [default_dt_table_schema](#default_dt_table_schema-method) +- [default_ba_table_schema](#default_ba_table_schema-method) + +For the other methods: + +- [load_tables_schema_file](#load_tables_schema_file-method) +- [build_table_schema](#build_table_schema-method) + +### get_tables_schema: returns + +| return | type | always | condition | +| ------ | ------- | ------ | --------- | +| true | boolean | yes | | + +### get_tables_schema: example + +```lua +local result = test_bq:get_tables_schema() +--> result is true +--> schemas are stored in test_bq.schemas[][] +``` + +## default_host_table_schema method + +The **default_host_table_schema** method retrieves the schemas for host_status events + +### default_host_table_schema: returns + +| return | type | always | condition | +| ----------------- | ----- | ------ | --------- | +| host schema table | table | yes | | + +### default_host_table_schema: example + +```lua +local result = test_bq:default_host_table_schema() +--> result is : +--[[ + { + host_id = "{host_id}", + host_name = "{cache.host.name}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +]]-- +``` + +## default_service_table_schema method + +The **default_service_table_schema** method retrieves the schemas for service_status events + +### default_service_table_schema: returns + +| return | type | always | condition | +| -------------------- | ----- | ------ | --------- | +| service schema table | table | yes | | + +### default_service_table_schema: example + +```lua +local result = test_bq:default_service_table_schema() +--> result is : +--[[ + { + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + last_check = "{last_check}", + output = "{output}", + instance_id = "{cache.host.instance_id}" + } +]]-- +``` + +## default_ack_table_schema method + +The **default_ack_table_schema** method retrieves the schemas for acknowledgement events + +### default_ack_table_schema: returns + +| return | type | always | condition | +| ---------------- | ----- | ------ | --------- | +| ack schema table | table | yes | | + +### default_ack_table_schema: example + +```lua +local result = test_bq:default_ack_table_schema() +--> result is : +--[[ + { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + entry_time = "{entry_time}" + } +]]-- +``` + +## default_dt_table_schema method + +The **default_dt_table_schema** method retrieves the schemas for downtime events + +### default_dt_table_schema: returns + +| return | type | always | condition | +| --------------------- | ----- | ------ | --------- | +| downtime schema table | table | yes | | + +### default_dt_table_schema: example + +```lua +local result = test_bq:default_dt_table_schema() +--> result is : +--[[ + { + author = "{author}", + host_id = "{host_id}", + host_name = "{cache.host.name}", + service_id = "{service_id}", + service_description = "{cache.service.description}", + status = "{state}", + output = "{output}", + instance_id = "{cache.host.instance_id}", + actual_start_time = "{actual_start_time}", + actual_end_time = "{deletion_time}" + } +]]-- +``` + +## default_ba_table_schema method + +The **default_ba_table_schema** method retrieves the schemas for ba_status events + +### default_ba_table_schema: returns + +| return | type | always | condition | +| --------------- | ----- | ------ | --------- | +| BA schema table | table | yes | | + +### default_ba_table_schema: example + +```lua +local result = test_bq:default_ba_table_schema() +--> result is : +--[[ + { + ba_id = "{ba_id}", + ba_name = "{cache.ba.ba_name}", + status = "{state}" + } +]]-- +``` + +## load_tables_schema_file method + +The **load_tables_schema_file** method retrieves the schemas from a json file. The json file must have the following structure + +```json +{ + "host": { + "column_1": "value_1", + "column_2": "value_2" + }, + "service": { + "column_1": "value_1", + "column_2": "value_2" + }, + "ack": { + "column_1": "value_1", + "column_2": "value_2" + }, + "dt": { + "column_1": "value_1", + "column_2": "value_2" + }, + "ba": { + "column_1": "value_1", + "column_2": "value_2" + } +} +``` + +If you only want to send service_status events, you can just put the service part of the json. + +### load_tables_schema_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------------------------------------------- | +| true or false | boolean | yes | false if we can't open the configuration file or it is not a valid json file, true otherwise | + +### load_tables_schema_file: example + +```lua +local result = test_bq:load_tables_schema_file() +--> result is true or false +--> if true, schemas are stored in test_bq.schemas[][] +``` + +## build_table_schema method + +The **build_table_schema** method create tables schema using stream connector parameters. +Parameters must have the following syntax to be interpreted + +For host_status events: +`_sc_gbq_host_column_` +For service_status events: +`_sc_gbq_service_column_` +For acknowledgement events: +`_sc_gbq_ack_column_` +For downtime events: +`_sc_gbq_dt_column_` +For ba_status events: +`_sc_gbq_ba_column_` + +### build_table_schema: parameters + +| parameter | type | optional | default value | +| ----------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| regex, the regex to identify the stream connector parameter that is about a column | string | no | | +| substract, the prefix that must be excluded from the parameter name to only get the column name | table | no | | +| structure, the table in which the retrieved column and value are going to be stored | string | yes | | + +### build_table_schema: example + +```lua +self.params._sc_gbq_host_column_MYNAME = "MYVALUE" +self.params._sc_gbq_host_column_OTHERNAME = "OTHERVALUE" +self.params.something = "hello" + +-- any parameter starting with _sc_gbq_host_column is going to be computed +-- any matching parameter is going to have _sc_gbq_host_column removed from its name +-- created table schema will be stored in self.schema[1][14] (1 because host_status is neb event, and 14 because host_status is the element 14 of the neb events table) +test_bq:build_table_schema("^_sc_gbq_host_column", "_sc_gbq_host_column", self.schemas[1][14]) +--> self.schemas[1][14] is +--[[ + { + MYNAME = "MYVALUE", + OTHERNAME = "OTHERVALUE" + } +]]-- +``` diff --git a/stream-connectors/modules/docs/mappings.md b/stream-connectors/modules/docs/mappings.md new file mode 100644 index 00000000000..a820c1a07dd --- /dev/null +++ b/stream-connectors/modules/docs/mappings.md @@ -0,0 +1,164 @@ +# mappings documentation + +- [mappings documentation](#mappings-documentation) + - [Introduction](#introduction) + - [Categories](#categories) + - [get category ID from name](#get-category-id-from-name) + - [get category name from ID](#get-category-name-from-id) + - [Elements](#elements) + - [get element ID from name](#get-element-id-from-name) + - [get element name from ID](#get-element-name-from-id) + - [get the category ID from an element name](#get-the-category-id-from-an-element-name) + - [get the category name from an element name](#get-the-category-name-from-an-element-name) + - [get the element ID from a category ID and an element name](#get-the-element-id-from-a-category-id-and-an-element-name) + - [States](#states) + - [get state type name from state type ID](#get-state-type-name-from-state-type-id) + - [get state name from category ID, element ID and state ID](#get-state-name-from-category-id-element-id-and-state-id) + - [Tips and tricks](#tips-and-tricks) + +## Introduction + +Every mappings is made available trough a params table that is only available if you have created an instance of the sc_params module. To create such instance, head over [**the sc_params documentation**](sc_param.md#Module-initialization) + +## Categories + +Each event is linked to a category. To help you work with that, there are a bunch of mappings available + +### get category ID from name + +To get the ID of a category based on its name, you can use the following mapping table + +```lua +-- get the id of the neb category +local category_id = params_object.params.bbdo.categories["neb"].id +--> category_id is 1 + +-- you can also get its name from this table but it shouldn't be very useful +local category_name = params_object.params.bbdo.categories["neb"].name +``` + +### get category name from ID + +To get the name of a category based on its ID, you can use the following mapping table + +```lua +-- get the name of the category 6 +local category_name = params_object.params.reverse_category_mapping[6] +--> category_name is "bam" +``` + +## Elements + +Each event is linked to an element. To help you work with that, there are a bunch of mappings available + +### get element ID from name + +```lua +-- get the ID of the element host_status +local element_id = params_object.params.bbdo.elements["host_status"].id +--> element_id is 14 + +-- you can also get its name from this table but it shouldn't be very useful +local element_name = params_object.params.bbdo.elements["host_status"].name +--> element_name is "host_status" +``` + +### get element name from ID + +You can't get a element name from its ID only. You must have its category too. For example, there are many elements that shares the ID 1. Because each category has its own elements and their ID start at 1. +For example, the **acknowledgement** element and the **ba_status** element have 1 as an element ID. The first element is part of the **neb category**, the second one is part of the **bam category** + +```lua +-- category is neb +local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +-- element is service_status +local element_id = 24 + +local element_name = params_object.params.reverse_element_mapping[category_id][element_id] +--> element_name is "service_status" +``` + +### get the category ID from an element name + +```lua +local category_id = params_object.params.bbdo.elements["host_status"].category_id +--> category_id is 1 +``` + +### get the category name from an element name + +```lua +local category_name = params_object.params.bbdo.elements["host_status"].category_name +--> category_name is neb +``` + +### get the element ID from a category ID and an element name + +This one is a bit redundant with the [**get the category ID from an element name**](#get-the-category-ID-from-an-element-name) mapping. It should be deprecated but in a world where two elements from different categories could share the same name, it is better to keep this possibility + +```lua +local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +local element_name = "host_status" + +local element_id = params_object.params.element_mapping[category_id][element_name] +--> element_id is 14 +``` + +## States + +### get state type name from state type ID + +```lua +local state_type_name = params_object.state_type_mapping[0] +--> state_type_name is "SOFT" + +state_type_name = params_object.state_type_mapping[1] +--> state_type_name is "HARD" +``` + +### get state name from category ID, element ID and state ID + +```lua +local category_id = local category_id = params_object.params.bbdo.categories["neb"].id -- it is better to use the mapping instead of hard coding the ID if you know it. +local element_id = params_object.params.bbdo.elements["host_status"].id -- it is better to use the mapping instead of hard coding the ID if you know it. + +local state_name = params_object.params.status_mapping[category_id][element_id][1] +--> state_name is "DOWN" +``` + +## Tips and tricks + +- When you want to use the ID of the neb category for example + +```lua +-- ✘ bad +local neb_category_id = 1 + +-- ✓ good +local neb_category_id = params_object.params.bbdo.categories.neb.id +``` + +- When you want to use the ID of the host_status element for example + +```lua +-- ✘ bad +local host_status_element_id = 14 + +-- ✓ good +local host_status_element_id = params_object.params.bbdo.elements.host_status.id +``` + +- When working on a downtime event, you can get the human readable state using a hidden mapping table. Because this event is shared between services and hosts you don't know if the ID 1 means DOWN or WARNING + +```lua +local categories = params_object.params.bbdo.categories +local elements = params_object.params.bbdo.elements + +-- 2 = host, 1 is the ID code of the state +local host_state_downtime = params.status_mapping[categories.neb.id][elements.downtime.id][2][1] +--> host_state_downtime is "DOWN" + +-- 1 = service, 2 is the ID code the state +local service_state_downtime = params.status_mapping[categories.neb.id][elements.downtime.id][1][2] +--> service_state_downtime is "CRITICAL" +``` diff --git a/stream-connectors/modules/docs/sc_broker.md b/stream-connectors/modules/docs/sc_broker.md new file mode 100644 index 00000000000..d28faabe95c --- /dev/null +++ b/stream-connectors/modules/docs/sc_broker.md @@ -0,0 +1,616 @@ +# Documentation of the sc_broker module + +- [Documentation of the sc_broker module](#documentation-of-the-sc_broker-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [get_host_all_infos method](#get_host_all_infos-method) + - [get_host_all_infos: parameters](#get_host_all_infos-parameters) + - [get_host_all_infos: returns](#get_host_all_infos-returns) + - [get_host_all_infos: example](#get_host_all_infos-example) + - [get_service_all_infos method](#get_service_all_infos-method) + - [get_service_all_infos: parameters](#get_service_all_infos-parameters) + - [get_service_all_infos: returns](#get_service_all_infos-returns) + - [get_service_all_infos: example](#get_service_all_infos-example) + - [get_host_infos method](#get_host_infos-method) + - [get_host_infos: parameters](#get_host_infos-parameters) + - [get_host_infos: returns](#get_host_infos-returns) + - [get_host_infos: example](#get_host_infos-example) + - [get_service_infos method](#get_service_infos-method) + - [get_service_infos: parameters](#get_service_infos-parameters) + - [get_service_infos: returns](#get_service_infos-returns) + - [get_service_infos: example](#get_service_infos-example) + - [get_hostgroups method](#get_hostgroups-method) + - [get_hostgroups: parameters](#get_hostgroups-parameters) + - [get_hostgroups: returns](#get_hostgroups-returns) + - [get_hostgroups: example](#get_hostgroups-example) + - [get_servicegroups method](#get_servicegroups-method) + - [get_servicegroups: parameters](#get_servicegroups-parameters) + - [get_servicegroups: returns](#get_servicegroups-returns) + - [get_servicegroups: example](#get_servicegroups-example) + - [get_severity method](#get_severity-method) + - [get_severity: parameters](#get_severity-parameters) + - [get_severity: returns](#get_severity-returns) + - [get_severity: example](#get_severity-example) + - [get_instance method](#get_instance-method) + - [get_instance: parameters](#get_instance-parameters) + - [get_instance: returns](#get_instance-returns) + - [get_instance: example](#get_instance-example) + - [get_ba_infos method](#get_ba_infos-method) + - [get_ba_infos: parameters](#get_ba_infos-parameters) + - [get_ba_infos: returns](#get_ba_infos-returns) + - [get_ba_infos: example](#get_ba_infos-example) + - [get_bvs_infos method](#get_bvs_infos-method) + - [get_bvs_infos: parameters](#get_bvs_infos-parameters) + - [get_bvs_infos: returns](#get_bvs_infos-returns) + - [get_bvs_infos: example](#get_bvs_infos-example) + +## Introduction + +The sc_broker module provides wrapper methods for broker cache. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- sc_logger. This is an instance of the sc_logger module + +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_broker.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_broker = sc_broker.new(test_logger) +``` + +## get_host_all_infos method + +The **get_host_all_infos** method returns all the broker cache information about a host using its ID + +### get_host_all_infos: parameters + +| parameter | type | optional | default value | +| ------------------ | ------ | -------- | ------------- | +| the ID of the host | number | no | | + +### get_host_all_infos: returns + +| return | type | always | condition | +| -------------------------------------------- | ------- | ------ | ------------------------------------------- | +| a table with all cache information from host | table | no | host id must be found in broker cache | +| false | boolean | no | if host id wasn't found in the broker cache | + +### get_host_all_infos: example + +```lua +local host_id = 2712 + +local result = test_broker:get_host_all_infos(host_id) + +--[[ + --> result structure is: + { + _type = 65548, + acknowledged = false, + acknowledgement_type = 0, + action_url = "", + active_checks = true, + address = "10.30.2.85", + alias = "NRPE", + category = 1, + check_attempt = 1, + check_command = "base_host_alive", + check_freshness = false, + check_interval = 5, + check_period = "24x7", + check_type = 0, + checked = true, + default_active_checks = true, + default_event_handler_enabled = true, + default_flap_detection = true, + default_notify = true, + default_passive_checks = false, + display_name = "NRPE", + element = 12, + enabled = true, + event_handler = "", + event_handler_enabled = true, + execution_time = 0.002, + first_notification_delay = 0, + flap_detection = true, + flap_detection_on_down = true, + flap_detection_on_unreachable = true, + flap_detection_on_up = true, + flapping = false, + freshness_threshold = 0, + high_flap_threshold = 0, + host_id = 2712, + icon_image = "ppm/operatingsystems-linux-snmp-linux-128.png", + icon_image_alt = "", + instance_id = 1, + last_check_value = 1619727378, + last_hard_state = 0, + last_hard_state_change = 1616086363, + last_time_down = 1617692579, + last_time_up = 1619727370, + last_update = 1619727378, + last_state_change = 1617692639, + latency = 0.903, + low_flap_threshold = 0, + max_check_attempts = 3, + name = "NRPE", + next_check = 1619727378, + no_more_notification = false, + notes = "", + notes_url = "", + notification_interval = 0, + notification_number = 0, + notification_period = "24x7", + notify = true, + notify_on_down = true, + notify_on_downtime = true, + notify_on_flapping = true, + notify_on_recovery = true, + notify_on_unreachable = true, + passive_checks = false, + percent_state_change = 0, + perfdata = "rta=0,263ms;3000,000;5000,000;0; rtmax=0,263ms;;;; rtmin=0,263ms;;;; pl=0%;80;100;0;100", + obsess_over_host = true, + output = "OK - 10.30.2.85 rta 0,263ms lost 0%", + retain_nonstatus_information = true, + retain_status_information = true, + retry_interval = 1, + scheduled_downtime_depth = 0, + should_be_scheduled = true, + stalk_on_down = false, + stalk_on_unreachable = false, + stalk_on_up = false, + state = 0, + state_type = 1, + statusmap_image = "", + timezone = "" + } + + --> result.output is "OK - 10.30.2.85 rta 0,263ms lost 0%" +--]] +``` + +## get_service_all_infos method + +The **get_service_all_infos** method returns all the broker cache information about a service using its host and service ID + +### get_service_all_infos: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | + +### get_service_all_infos: returns + +| return | type | always | condition | +| ----------------------------------------------- | ------- | ------ | ------------------------------------------------------ | +| a table with all cache information from service | table | no | host id and service id must be found in broker cache | +| false | boolean | no | if host or service ID wasn't found in the broker cache | + +### get_service_all_infos: example + +```lua +local host_id = 2712 +local service_id = 1991 + +local result = test_broker:get_service_all_infos(host_id, service_id) + +--[[ + --> result structure is: + { + _type = 65559, + action_url = "", + acknowledged = false, + acknowledgement_type = 0, + active_checks = true, + category = 1, + check_attempt = 1, + check_command = base_centreon_ping, + check_freshness = false, + check_interval = 5, + check_period = 24x7, + check_type = 0, + checked = true, + default_active_checks = true, + default_event_handler_enabled = true, + default_flap_detection = true, + default_passive_checks = false, + description = Ping, + display_name = Ping, + default_notify = true, + element = 23, + enabled = true, + event_handler = "", + event_handler_enabled = true, + execution_time = 0.004, + first_notification_delay = 0, + flap_detection = true, + flap_detection_on_critical = true, + flap_detection_on_ok = true, + flap_detection_on_unknown = true, + flap_detection_on_warning = true, + flapping = false, + freshness_threshold = 0, + high_flap_threshold = 0, + host_id = 2712, + icon_image = "", + icon_image_alt = "", + last_check = 1619730350, + last_hard_state = 0, + last_hard_state_change = 1609343081, + last_state_change = 1609343081, + last_time_critical = 1609342781, + last_time_ok = 1619730350, + last_update = 1619730437, + latency = 0.76, + low_flap_threshold = 0, + max_check_attempts = 3, + next_check = 1619730910, + no_more_notifications = false, + notes = "", + notes_url = "", + notification_interval = 0, + notification_number = 0, + notification_period = 24x7, + notify = true, + notify_on_critical = true, + notify_on_downtime = true, + notify_on_flapping = true, + notify_on_recovery = true, + notify_on_unknown = true, + notify_on_warning = true, + obsess_over_service = true, + output = OK - 10.30.2.15 rta 0,110ms lost 0% + passive_checks = false, + percent_state_change = 0, + perfdata = rta=0,110ms;200,000;400,000;0; rtmax=0,217ms;;;; rtmin=0,079ms;;;; pl=0%;20;50;0;100, + retain_nonstatus_information = true, + retain_status_information = true, + retry_interval = 1, + scheduled_downtime_depth = 0, + service_id = 1991, + should_be_scheduled = true, + state_type = 1, + stalk_on_critical = false, + stalk_on_ok = false, + stalk_on_unknown = false, + stalk_on_warning = false, + state = 0, + volatile = false + } + + --> result.output is: "OK - 10.30.2.15 rta 0,110ms lost 0%" +--]] +``` + +## get_host_infos method + +The **get_host_infos** method returns asked information about a host from the broker cache using the host ID. + +### get_host_infos: parameters + +| parameter | type | optional | default value | +| ----------------------- | --------------- | -------- | ------------- | +| the ID of the host | number | no | | +| the desired information | string or table | no | | + +### get_host_infos: returns + +| return | type | always | condition | +| ------------------------------------- | ------- | ------ | -------------------------- | +| a table with the desired informations | table | no | it must be a valid host id | +| false | boolean | no | if host ID is nil or empty | + +### get_host_infos: example + +```lua +local host_id = 2712 +local desired_infos = {"retain_nonstatus_information", "obsess_over_host"} +-- if you want a single information you can also use = "retain_nonstatus_information" or {"retain_nonstatus_information"} + +local result = test_broker:get_host_infos(host_id, desired_infos) + +--[[ + --> result structure is: + { + host_id = 2712, + retain_nonstatus_information = false, + obsess_over_host = true + } + + --> result.obsess_over_host is: true +]] +``` + +## get_service_infos method + +The **get_service_infos** method returns asked information about a service from the broker cache using the host and service ID. + +### get_service_infos: parameters + +| parameter | type | optional | default value | +| ----------------------- | --------------- | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | +| the desired information | string or table | no | | + +### get_service_infos: returns + +| return | type | always | condition | +| ------------------------------------- | ------- | ------ | ------------------------------------- | +| a table with the desired informations | table | no | it must be a valid host or service id | +| false | boolean | no | if host or service ID is nil or empty | + +### get_service_infos: example + +```lua +local host_id = 2712 +local service_id = 1991 +local desired_infos = {"description", "obsess_over_service"} +-- if you want a single information you can also use = "retain_nonstatus_information" or {"retain_nonstatus_information"} + +local result = test_broker:get_host_infos(host_id, service_id, desired_infos) + +--[[ + --> result structure is: + { + host_id = 2712, + service_id = 1991, + description = "Ping", + obsess_over_service = true + } + + --> result.obsess_over_service is: true +]] +``` + +## get_hostgroups method + +The **get_hostgroups** method retrieves hostgroups linked to a host from the broker cache using the host ID. + +### get_hostgroups: parameters + +| parameter | type | optional | default value | +| ------------------ | ------ | -------- | ------------- | +| the ID of the host | number | no | | + +### get_hostgroups: returns + +| return | type | always | condition | +| ---------------------------------------------------------- | ------- | ------ | ---------------------------------------------------------------- | +| a table with all hostgroups information linked to the host | table | no | host id must have linked hostgroups found in broker cache | +| false | boolean | no | if host ID is invalid (empty or nil) or no hostgroups were found | + +### get_hostgroups: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local host_id = 2712 + +local result = test_broker:get_hostgroups(host_id) + +--[[ + --> result structure is: + { + [1] = { + group_id = 2, + group_name = "NetworkSecurity" + }, + [2] = { + group_id = 9, + group_name = "Archimede_Sydney" + } + } + + --> result[2].group_name is: "Archimede_Sydney" +--]] +``` + +## get_servicegroups method + +The **get_servicegroups** method retrieves servicegroups linked to a service from the broker cache using the host and service ID + +### get_servicegroups: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | no | | + +### get_servicegroups: returns + +| return | type | always | condition | +| ---------------------------------------------------------------- | ------- | ------ | ------------------------------------------------------------------------------ | +| a table with all servicegroups information linked to the service | table | no | service must have linked servicegroups found in broker cache | +| false | boolean | no | if host or service ID is invalid (empty or nil) or no servicegroups were found | + +### get_servicegroups: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local host_id = 2712 +local service_id = 1991 + +local result = test_broker:get_servicegroups(host_id, service_id) + +--[[ + --> result structure is: + { + [1] = { + group_id = 2, + group_name = "Net_Services" + }, + [2] = { + group_id = 5, + group_name = "Another_SG" + } + } + + --> result[2].group_name is: "Another_SG" +--]] +``` + +## get_severity method + +The **get_severity** method retrieves the severity of a host or service from the broker cache using the ID of the service or host. + +### get_severity: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the ID of the host | number | no | | +| the ID of the service | number | yes | | + +### get_severity: returns + +| return | type | always | condition | +| ----------------------------------------- | ------- | ------ | ------------------------------------------------------------------------ | +| the severity level of the host or service | number | no | service or host must have a severity found in broker cache | +| false | boolean | no | if host or service ID is invalid (empty or nil) or no severity was found | + +### get_severity: example + +```lua +-- severity for a host +local host_id = 2712 + +local result = test_broker:get_severity(host_id) +--> result is: 2 + +-- severity for a service +local service_id = 1991 + +result = test_broker:get_severity(host_id, service_id) +--> result is: 42 +``` + +## get_instance method + +The **get_instance** method returns the poller name using the instance ID. + +### get_instance: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the ID of the instance | number | no | | + +### get_instance: returns + +| return | type | always | condition | +| --------------- | ------- | ------ | -------------------------------------------------------------------------------- | +| the poller name | string | no | instance ID must be found in broker cache | +| false | boolean | no | if instance ID is invalid (empty or nil) or ID was not found in the broker cache | + +### get_instance: example + +```lua +local instance_id = 2712 + +local result = test_broker:get_instance(instance_id) +--> result is: "awesome-poller" +``` + +## get_ba_infos method + +The **get_ba_infos** method retrieves the name and description of a BA from the broker cache using its ID. + +### get_ba_infos: parameters + +| parameter | type | optional | default value | +| ---------------- | ------ | -------- | ------------- | +| the ID of the BA | number | no | | + +### get_ba_infos: returns + +| return | type | always | condition | +| ----------------------------------------------- | ------- | ------ | -------------------------------------------------------------------------- | +| a table with the name and description of the BA | table | no | BA ID must be found in the broker cache | +| false | boolean | no | if BA ID is invalid (empty or nil) or ID was not found in the broker cache | + +### get_ba_infos: example + +```lua +local ba_id = 2712 + +local result = test_broker:get_ba_infos(ba_id) +--[[ + --> result structure is: + { + ba_id = 2712, + ba_name = "awesome-BA", + ba_description = "awesome-BA-description" + } + + --> result.ba_name is: "awesome-BA" + +--]] +``` + +## get_bvs_infos method + +The **get_bvs_infos** method retrieves the name and description of all BVs linked to a BA from the broker cache. + +### get_bvs_infos: parameters + +| parameter | type | optional | default value | +| ---------------- | ------ | -------- | ------------- | +| the ID of the BA | number | no | | + +### get_bvs_infos: returns + +| return | type | always | condition | +| --------------------------------------------------- | ------- | ------ | --------------------------------------------------------------------------- | +| a table with the name and description of all the BV | table | no | There must be BV found in the broker cache | +| false | boolean | no | if BA ID is invalid (empty or nil) or no BVs were found in the broker cache | + +### get_bvs_infos: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local ba_id = 2712 + +local result = test_broker:get_ba_infos(ba_id) +--[[ + --> result structure is: + { + [1] = { + bv_id = 9, + bv_name = "awesome-BV", + bv_description = "awesome-BV-description" + }, + [2] = { + bv_id = 33, + bv_name = "another-BV", + bv_description = "another-BV-description" + } + } + + --> result[2].bv_name is: "another-BV" +--]] +``` diff --git a/stream-connectors/modules/docs/sc_common.md b/stream-connectors/modules/docs/sc_common.md new file mode 100644 index 00000000000..547724a2de0 --- /dev/null +++ b/stream-connectors/modules/docs/sc_common.md @@ -0,0 +1,595 @@ +# Documentation of the sc_common module + +- [Documentation of the sc\_common module](#documentation-of-the-sc_common-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [ifnil\_or\_empty method](#ifnil_or_empty-method) + - [ifnil\_or\_empty: parameters](#ifnil_or_empty-parameters) + - [ifnil\_or\_empty: returns](#ifnil_or_empty-returns) + - [ifnil\_empty: example](#ifnil_empty-example) + - [if\_wrong\_type method](#if_wrong_type-method) + - [if\_wrong\_type: parameters](#if_wrong_type-parameters) + - [if\_wrong\_type: returns](#if_wrong_type-returns) + - [if\_wrong\_type: example](#if_wrong_type-example) + - [boolean\_to\_number method](#boolean_to_number-method) + - [boolean\_to\_number: parameters](#boolean_to_number-parameters) + - [boolean\_to\_number: returns](#boolean_to_number-returns) + - [boolean\_to\_number: example](#boolean_to_number-example) + - [number\_to\_boolean method](#number_to_boolean-method) + - [number\_to\_boolean: parameters](#number_to_boolean-parameters) + - [number\_to\_boolean: returns](#number_to_boolean-returns) + - [number\_to\_boolean: example](#number_to_boolean-example) + - [check\_boolean\_number\_option\_syntax method](#check_boolean_number_option_syntax-method) + - [check\_boolean\_number\_option\_syntax: parameters](#check_boolean_number_option_syntax-parameters) + - [check\_boolean\_number\_option\_syntax: returns](#check_boolean_number_option_syntax-returns) + - [check\_boolean\_number\_option\_syntax: example](#check_boolean_number_option_syntax-example) + - [split method](#split-method) + - [split: parameters](#split-parameters) + - [split: returns](#split-returns) + - [split: example](#split-example) + - [compare\_numbers method](#compare_numbers-method) + - [compare\_numbers: parameters](#compare_numbers-parameters) + - [compare\_numbers: returns](#compare_numbers-returns) + - [compare\_numbers: example](#compare_numbers-example) + - [generate\_postfield\_param\_string method](#generate_postfield_param_string-method) + - [generate\_postfield\_param\_string: parameters](#generate_postfield_param_string-parameters) + - [generate\_postfield\_param\_string: returns](#generate_postfield_param_string-returns) + - [generate\_postfield\_param\_string: example](#generate_postfield_param_string-example) + - [load\_json\_file method](#load_json_file-method) + - [load\_json\_file: parameters](#load_json_file-parameters) + - [load\_json\_file: returns](#load_json_file-returns) + - [load\_json\_file: example](#load_json_file-example) + - [json\_escape method](#json_escape-method) + - [json\_escape: parameters](#json_escape-parameters) + - [json\_escape: returns](#json_escape-returns) + - [json\_escape: example](#json_escape-example) + - [xml\_escape method](#xml_escape-method) + - [xml\_escape: parameters](#xml_escape-parameters) + - [xml\_escape: returns](#xml_escape-returns) + - [xml\_escape: example](#xml_escape-example) + - [lua\_regex\_escape method](#lua_regex_escape-method) + - [lua\_regex\_escape: parameters](#lua_regex_escape-parameters) + - [lua\_regex\_escape: returns](#lua_regex_escape-returns) + - [lua\_regex\_escape: example](#lua_regex_escape-example) + - [dumper method](#dumper-method) + - [dumper: parameters](#dumper-parameters) + - [dumper: returns](#dumper-returns) + - [dumper: example](#dumper-example) + - [trim method](#trim-method) + - [trim: parameters](#trim-parameters) + - [trim: returns](#trim-returns) + - [trim: example](#trim-example) + - [get\_bbdo\_version method](#get_bbdo_version-method) + - [get\_bbdo\_version: returns](#get_bbdo_version-returns) + - [get\_bbdo\_version: example](#get_bbdo_version-example) + - [is\_valid\_pattern method](#is_valid_pattern-method) + - [is\_valid\_pattern: parameters](#is_valid_pattern-parameters) + - [is\_valid\_pattern: returns](#is_valid_pattern-returns) + - [is\_valid\_pattern: example](#is_valid_pattern-example) + +## Introduction + +The sc_common module provides methods to help with common needs when writing stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- sc_logger. This is an instance of the sc_logger module + +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_common = require("centreon-stream-connectors-lib.sc_common") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) +``` + +## ifnil_or_empty method + +The **ifnil_or_empty** method checks if the first parameter is empty or nil and returns the second parameter if that is the case. Otherwise, it will return the first parameter. + +### ifnil_or_empty: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| the variable you want to check | string, number | no | | +| the default value to return | any | no | | + +### ifnil_or_empty: returns + +| return | type | always | condition | +| -------------------- | --------------------- | ------ | -------------------------------------- | +| the first parameter | first parameter type | no | if first parameter is not empty or nil | +| the second parameter | second parameter type | no | if first paramter is empty or nil | + +### ifnil_empty: example + +```lua +local first_param = "hello" +local second_param = "goodbye" + +local result = test_common:ifnil_or_empty(first_param, second_param) +--> result is "hello" + +first_param = "" +result = test_common:ifnil_or_empty(first_param, second_param) +--> result is "goodbye" +``` + +## if_wrong_type method + +This **if_wrong_type** method checks if the first parameter type is equal to the given type in the second parameter. If that is not the case, it returns the third parameter as a default value + +### if_wrong_type: parameters + +| parameter | type | optional | default value | +| ----------------------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the variable you want its type to be checked | any | no | | +| the type that you want your variable to match | string | no | | +| the default value you want to return if the type of the first parameter doesn't match your second parameter | any | no | | + +### if_wrong_type: returns + +| return | type | always | condition | +| ------------------- | ---- | ------ | ------------------------------------------------------------------------ | +| the first parameter | any | no | if the type of the first parameter is equal to your second parameter | +| the third parameter | any | no | if the type of the first parameter is not equal to your second parameter | + +### if_wrong_type: example + +```lua +local first_param = "i am a string" +local second_param = "string" +local third_param = "my default value" + +local result = test_common:if_wrong_type(first_param, second_param, third_param) +--> result is "i am a string" + +first_param = 3 +result = test_common:if_wrong_type(first_param, second_param, third_param) +--> result is "my default value" +``` + +## boolean_to_number method + +The **boolean_to_number** method converts a boolean to its number equivalent. + +### boolean_to_number: parameters + +| parameter | type | optional | default value | +| ------------------ | ------- | -------- | ------------- | +| a boolean variable | boolean | no | | + +### boolean_to_number: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| a number (0 or 1) | number | yes | | + +### boolean_to_number: example + +```lua +local my_boolean = true + +local result = test_common:boolean_to_number(my_boolean) +--> result is 1 +``` + +## number_to_boolean method + +The **number_to_boolean** method converts a number to its boolean equivalent. + +### number_to_boolean: parameters + +| parameter | type | optional | default value | +| ----------------- | ------ | -------- | ------------- | +| a number (0 or 1) | number | no | | + +### number_to_boolean: returns + +| return | type | always | condition | +| ------------------------- | ------- | ------ | -------------------------- | +| a boolean (true or false) | boolean | no | if parameter is 0 or 1 | +| nil | nil | no | if parameter is not 0 or 1 | + +### number_to_boolean: example + +```lua +local my_number = 1 + +local result = test_common:number_to_boolean(my_number) +--> result is true +``` + +## check_boolean_number_option_syntax method + +The **check_boolean_number_option_syntax** method checks if the first paramter is a boolean number (0 or 1) and if that is not the case, returns the second parameter + +### check_boolean_number_option_syntax: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------- | ---- | -------- | ------------- | +| the variable you want to check | any | no | | +| a default value to return if the first parameter is wrong | any | no | | + +### check_boolean_number_option_syntax: returns + +| return | type | always | condition | +| -------------------- | ------ | ------ | -------------------------------------------- | +| the first parameter | number | no | the first parameter must be a boolean number | +| the second parameter | any | no | the first parameter is not a boolean number | + +### check_boolean_number_option_syntax: example + +```lua +local first_parameter = 1 +local second_parameter = "a default return value" + +local result = test_common:check_boolean_number_option_syntax(first_parameter, second_parameter) +--> result is 1 + +first_parameter = "not a boolean number" +result = test_common:check_boolean_number_option_syntax(first_parameter, second_parameter) +--> result is "a default return value" +``` + +## split method + +The **split** method split a string using a separator and returns a table of all the splitted parts + +### split: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| the string you need to split | string | no | | +| the separator you want to use | string | yes | "," | + +### split: returns + +| return | type | always | condition | +| ------------------------------- | ------- | ------ | ------------------------------------------- | +| a table with all splitted parts | table | no | the string to split mustn't be empty or nil | +| false | boolean | no | if the string to split is empty or nil | + +### split: example + +***notice: to better understand the result, you need to know that, by convention, a table starts at index 1 in lua and not 0 like it is in most languages*** + +```lua +local my_string = "split;using;semicolon" +local separator = ";" + +local result = test_common:split(my_string, separator) +--[[ + + --> result structure is: + { + [1] = "split", + [2] = "using", + [3] = "semicolon" + } + + --> result[2] is "using" + +--]] + +my_string = "" +result = test_common:split(my_string, separator) +--> result is false +``` + +## compare_numbers method + +The **compare_numbers** method compare a first number with a second one using the provided mathematical operator + +### compare_numbers: parameters + +| parameter | type | optional | default value | +| ----------------------------------------- | ------ | -------- | ------------- | +| the first number you need to compare | number | no | | +| the second number you need to compare | number | no | | +| the mathematical operator you want to use | string | no | | + +accepted operators: <, >, >=, <=, ==, ~= + +### compare_numbers: returns + +| return | type | always | condition | +| --------- | ------- | ------ | ----------------------------------------------------------------------------------- | +| a boolean | boolean | no | both numbers must be numbers and the mathematical operator must be a valid operator | +| nil | nil | no | if one of the number is not a number or the mathematical operator is not valid | + +### compare_numbers: example + +```lua +local first_number = 4 +local second_number = 12 +local operator = "==" + +local result = test_common:compare_numbers(first_number, second_number, operator) +--> result is false (4 is not equal to 12) + +operator = "~=" +result = test_common:compare_numbers(first_number, second_number, operator) +--> result is true + +first_number = "hello my friend" +result = test_common:compare_numbers(first_number, second_number, operator) +--> result is nil ("hello my friend" is not a valid number) +``` + +## generate_postfield_param_string method + +The **generate_postfield_param_string** method generate an url encoded param string based on a table with said params. + +### generate_postfield_param_string: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------------------------- | ----- | -------- | ------------- | +| the table with all the parameters to convert in a parameters string | table | no | | + +### generate_postfield_param_string: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | ---------------------------------------------------------------------------------- | +| false | boolean | no | if the method parameter is not a table | +| string_params | string | no | if the method parameter is a table, it will return an url encoded string of params | + +### generate_postfield_param_string: example + +```lua +local param_table = { + key = "321Xzd", + option = "full" + name = "John Doe" +} + +local result = test_common:generate_postfield_param_string(param_table) +--> result is "key=321Xzd&option=full&name=John%20Doe" +``` + +## load_json_file method + +The **load_json_file** method loads a json file and parse it. + +### load_json_file: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------- | ------ | -------- | ------------- | +| the path to the json file (must be readable by centreon-broker) | string | no | | + +### load_json_file: returns + +| return | type | always | condition | +| ------------------------------ | ----- | ------- | -------------------------- | +| true | false | boolean | yes | false if the json file couldn't be loaded or parsed, true otherwise | +| the parsed content of the json | table | no | only when true is returned | + +### load_json_file: example + +```lua +local json_file = "/etc/centreon-broker/sc_config.json" + +local result, content = test_common:load_json_file(json_file) +--> result is true, content is a table + +json_file = 3 +result, content = test_common:load_json_file(json_file) +--> result is false, content is nil +``` + +## json_escape method + +The **json_escape** method escape json special characters. + +### json_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### json_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### json_escape: example + +```lua +local string = 'string with " and backslashes \\ and tab:\tend tab' +--> string is 'string with " and backslashes \ and tab: end tab' + +local result = test_common:json_escape(string) +--> result is 'string with \" and backslashes \\ and tab:\tend tab' +``` + +## xml_escape method + +The **xml_escape** method escape xml special characters. + +### xml_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### xml_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### xml_escape: example + +```lua +local string = 'string with " and < and >' +--> string is 'string with " and < and >' + +local result = test_common:xml_escape(string) +--> result is 'string with " and < and >' +``` + +## lua_regex_escape method + +The **lua_regex_escape** method escape lua regex special characters. + +### lua_regex_escape: parameters + +| parameter | type | optional | default value | +| ----------------------------- | ------ | -------- | ------------- | +| a string that must be escaped | string | no | | + +### lua_regex_escape: returns + +| return | type | always | condition | +| ---------------------------------------------------------------------- | -------------------------------- | ------ | --------- | +| an escaped string (or the raw parameter if it was nil or not a string) | string (or input parameter type) | yes | | + +### lua_regex_escape: example + +```lua +local string = 'string with % and . and *' +--> string is 'string with % and . and *' + +local result = test_common:lua_regex_escape(string) +--> result is 'string with %% and %. and %*' +``` + +## dumper method + +The **dumper** method dumps variables for debug purpose + +### dumper: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the variable that must be dumped | any | no | | +| the string that contains the dumped variable. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE | string | yes | | +| the string that contains the tab character. ONLY USED INTERNALLY FOR RECURSIVE PURPOSE (and design) | string | yes | | + +### dumper: returns + +| return | type | always | condition | +| ------------------- | ------ | ------ | --------- | +| the dumped variable | string | yes | | + +### dumper: example + +```lua +local best_city = { + name = "mont-de-marsan", + geocoord = { + lat = 43.89446, + lon = -0.4964242 + } +} + +local result = "best city info: " .. test_common:dumper(best_city) +--> result is +--[[ + best city info: + [table] + [string] name: mont-de-marsan + [table] geocoord: + [number] lon: -0.4964242 + [number] lat: 43.89446 +]]-- +``` + +## trim method + +The **trim** methods remove spaces (or the specified character) at the beginning and the end of a string + +### trim: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the string that must be trimmed | string | no | | +| the character the must be removed (if not provided, will remove space characters) | string | yes | | + +### trim: returns + +| return | type | always | condition | +| -------------------- | ------ | ------ | --------- | +| the trimmed variable | string | yes | | + +### trim: example + +```lua +local string = " I'm a space maaaaan " + +local result = test_common:trim(string) +--> result is: "I'm a space maaaaan" + +local string = ";;;;;;I'm no longer a space maaaaan;;;;;;;;;;;;;;" + +local result = test_common:trim(string, ";") +--> result is: "I'm no longer a space maaaaan" +``` + +## get_bbdo_version method + +The **get_bbdo_version** method returns the first digit of the bbdo protocol version. + +### get_bbdo_version: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | --------- | +| the bbdo version | number | yes | | + +### get_bbdo_version: example + +```lua +local result = test_common:get_bbdo_version() +--> result is: 3 +``` + +## is_valid_pattern method + +The **is_valid_pattern** method checks if a Lua pattern is valid or not. + +### is_valid_pattern: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the pattern that must be checked | string | no | | + +### is_valid_pattern: returns + +| return | type | always | condition | +| ------------------- | ------ | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_pattern: example + +```lua +local good_pattern = "a random pattern .*" + +local result = test_common:is_valid_pattern(good_pattern) +--> result is: true + +local wrong_pattern = "a random pattern %2" + +local result = test_common:is_valid_pattern(wrong_pattern) +--> result is: false +``` diff --git a/stream-connectors/modules/docs/sc_event.md b/stream-connectors/modules/docs/sc_event.md new file mode 100644 index 00000000000..2d44affbf62 --- /dev/null +++ b/stream-connectors/modules/docs/sc_event.md @@ -0,0 +1,1169 @@ +# Documentation of the sc_param module + +- [Documentation of the sc\_param module](#documentation-of-the-sc_param-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [is\_valid\_category method](#is_valid_category-method) + - [is\_valid\_category: returns](#is_valid_category-returns) + - [is\_valid\_category: example](#is_valid_category-example) + - [is\_valid\_element method](#is_valid_element-method) + - [is\_valid\_element: returns](#is_valid_element-returns) + - [is\_valid\_element: example](#is_valid_element-example) + - [is\_valid\_event method](#is_valid_event-method) + - [is\_valid\_event: returns](#is_valid_event-returns) + - [is\_valid\_event: example](#is_valid_event-example) + - [is\_valid\_neb\_event method](#is_valid_neb_event-method) + - [is\_valid\_neb\_event: returns](#is_valid_neb_event-returns) + - [is\_valid\_neb\_event: example](#is_valid_neb_event-example) + - [is\_valid\_host\_status\_event method](#is_valid_host_status_event-method) + - [is\_valid\_host\_status\_event: returns](#is_valid_host_status_event-returns) + - [is\_valid\_host\_status\_event: example](#is_valid_host_status_event-example) + - [is\_valid\_service\_status\_event method](#is_valid_service_status_event-method) + - [is\_valid\_service\_status\_event: returns](#is_valid_service_status_event-returns) + - [is\_valid\_service\_status\_event: example](#is_valid_service_status_event-example) + - [is\_valid\_host method](#is_valid_host-method) + - [is\_valid\_host: returns](#is_valid_host-returns) + - [is\_valid\_host: example](#is_valid_host-example) + - [is\_valid\_service method](#is_valid_service-method) + - [is\_valid\_service: returns](#is_valid_service-returns) + - [is\_valid\_service: example](#is_valid_service-example) + - [is\_valid\_event\_states method](#is_valid_event_states-method) + - [is\_valid\_event\_states: returns](#is_valid_event_states-returns) + - [is\_valid\_event\_states: example](#is_valid_event_states-example) + - [is\_valid\_event\_status method](#is_valid_event_status-method) + - [is\_valid\_event\_status: parameters](#is_valid_event_status-parameters) + - [is\_valid\_event\_status: returns](#is_valid_event_status-returns) + - [is\_valid\_event\_status: example](#is_valid_event_status-example) + - [is\_valid\_event\_state\_type method](#is_valid_event_state_type-method) + - [is\_valid\_event\_state\_type: returns](#is_valid_event_state_type-returns) + - [is\_valid\_event\_state\_type: example](#is_valid_event_state_type-example) + - [is\_valid\_event\_acknowledge\_state method](#is_valid_event_acknowledge_state-method) + - [is\_valid\_event\_acknowledge\_state: returns](#is_valid_event_acknowledge_state-returns) + - [is\_valid\_event\_acknowledge\_state: example](#is_valid_event_acknowledge_state-example) + - [is\_valid\_event\_downtime\_state method](#is_valid_event_downtime_state-method) + - [is\_valid\_event\_downtime\_state: returns](#is_valid_event_downtime_state-returns) + - [is\_valid\_event\_downtime\_state: example](#is_valid_event_downtime_state-example) + - [is\_valid\_event\_flapping\_state method](#is_valid_event_flapping_state-method) + - [is\_valid\_event\_flapping\_state: returns](#is_valid_event_flapping_state-returns) + - [is\_valid\_event\_flapping\_state: example](#is_valid_event_flapping_state-example) + - [is\_valid\_hostgroup method](#is_valid_hostgroup-method) + - [is\_valid\_hostgroup: returns](#is_valid_hostgroup-returns) + - [is\_valid\_hostgroup: example](#is_valid_hostgroup-example) + - [is\_valid\_servicegroup method](#is_valid_servicegroup-method) + - [is\_valid\_servicegroup: returns](#is_valid_servicegroup-returns) + - [is\_valid\_servicegroup: example](#is_valid_servicegroup-example) + - [is\_valid\_bam\_event method](#is_valid_bam_event-method) + - [is\_valid\_bam\_event: returns](#is_valid_bam_event-returns) + - [is\_valid\_bam\_event: example](#is_valid_bam_event-example) + - [is\_valid\_ba method](#is_valid_ba-method) + - [is\_valid\_ba: returns](#is_valid_ba-returns) + - [is\_valid\_ba: example](#is_valid_ba-example) + - [is\_valid\_ba\_status\_event method](#is_valid_ba_status_event-method) + - [is\_valid\_ba\_status\_event: returns](#is_valid_ba_status_event-returns) + - [is\_valid\_ba\_status\_event: example](#is_valid_ba_status_event-example) + - [is\_valid\_ba\_downtime\_state method](#is_valid_ba_downtime_state-method) + - [is\_valid\_ba\_downtime\_state: returns](#is_valid_ba_downtime_state-returns) + - [is\_valid\_ba\_downtime\_state: example](#is_valid_ba_downtime_state-example) + - [is\_valid\_ba\_acknowledge\_state method](#is_valid_ba_acknowledge_state-method) + - [is\_valid\_ba\_acknowledge\_state: returns](#is_valid_ba_acknowledge_state-returns) + - [is\_valid\_ba\_acknowledge\_state: example](#is_valid_ba_acknowledge_state-example) + - [is\_valid\_bv method](#is_valid_bv-method) + - [is\_valid\_bv: returns](#is_valid_bv-returns) + - [is\_valid\_bv: example](#is_valid_bv-example) + - [find\_hostgroup\_in\_list method](#find_hostgroup_in_list-method) + - [find\_hostgroup\_in\_list: parameters](#find_hostgroup_in_list-parameters) + - [find\_hostgroup\_in\_list: returns](#find_hostgroup_in_list-returns) + - [find\_hostgroup\_in\_list: example](#find_hostgroup_in_list-example) + - [find\_servicegroup\_in\_list method](#find_servicegroup_in_list-method) + - [find\_servicegroup\_in\_list: parameters](#find_servicegroup_in_list-parameters) + - [find\_servicegroup\_in\_list: returns](#find_servicegroup_in_list-returns) + - [find\_servicegroup\_in\_list: example](#find_servicegroup_in_list-example) + - [find\_bv\_in\_list method](#find_bv_in_list-method) + - [find\_bv\_in\_list: returns](#find_bv_in_list-returns) + - [find\_bv\_in\_list: example](#find_bv_in_list-example) + - [is\_valid\_poller method](#is_valid_poller-method) + - [is\_valid\_poller: returns](#is_valid_poller-returns) + - [is\_valid\_poller: example](#is_valid_poller-example) + - [find\_poller\_in\_list method](#find_poller_in_list-method) + - [find\_poller\_in\_list: returns](#find_poller_in_list-returns) + - [find\_poller\_in\_list: example](#find_poller_in_list-example) + - [is\_valid\_host\_severity method](#is_valid_host_severity-method) + - [is\_valid\_host\_severity: returns](#is_valid_host_severity-returns) + - [is\_valid\_host\_severity: example](#is_valid_host_severity-example) + - [is\_valid\_service\_severity method](#is_valid_service_severity-method) + - [is\_valid\_service\_severity: returns](#is_valid_service_severity-returns) + - [is\_valid\_service\_severity: example](#is_valid_service_severity-example) + - [is\_valid\_acknowledgement\_event method](#is_valid_acknowledgement_event-method) + - [is\_valid\_acknowledgement\_event: returns](#is_valid_acknowledgement_event-returns) + - [is\_valid\_acknowledgement\_event: example](#is_valid_acknowledgement_event-example) + - [is\_host\_status\_event\_duplicated method](#is_host_status_event_duplicated-method) + - [is\_host\_status\_event\_duplicated: returns](#is_host_status_event_duplicated-returns) + - [is\_host\_status\_event\_duplicated: example](#is_host_status_event_duplicated-example) + - [is\_service\_status\_event\_duplicated method](#is_service_status_event_duplicated-method) + - [is\_service\_status\_event\_duplicated: returns](#is_service_status_event_duplicated-returns) + - [is\_service\_status\_event\_duplicated: example](#is_service_status_event_duplicated-example) + - [is\_valid\_downtime\_event method](#is_valid_downtime_event-method) + - [is\_valid\_downtime\_event: returns](#is_valid_downtime_event-returns) + - [is\_valid\_downtime\_event: example](#is_valid_downtime_event-example) + - [get\_downtime\_host\_status method](#get_downtime_host_status-method) + - [get\_downtime\_host\_status: returns](#get_downtime_host_status-returns) + - [get\_downtime\_host\_status: example](#get_downtime_host_status-example) + - [get\_downtime\_service\_status method](#get_downtime_service_status-method) + - [get\_downtime\_service\_status: returns](#get_downtime_service_status-returns) + - [get\_downtime\_service\_status: example](#get_downtime_service_status-example) + - [is\_valid\_author method](#is_valid_author-method) + - [is\_valid\_author: returns](#is_valid_author-returns) + - [is\_valid\_author: example](#is_valid_author-example) + - [is\_downtime\_event\_useless method](#is_downtime_event_useless-method) + - [is\_downtime\_event\_useless: returns](#is_downtime_event_useless-returns) + - [is\_downtime\_event\_useless: example](#is_downtime_event_useless-example) + - [is\_valid\_downtime\_event\_start method](#is_valid_downtime_event_start-method) + - [is\_valid\_downtime\_event\_start: returns](#is_valid_downtime_event_start-returns) + - [is\_valid\_downtime\_event\_start: example](#is_valid_downtime_event_start-example) + - [is\_valid\_downtime\_event\_end method](#is_valid_downtime_event_end-method) + - [is\_valid\_downtime\_event\_end: returns](#is_valid_downtime_event_end-returns) + - [is\_valid\_downtime\_event\_end: example](#is_valid_downtime_event_end-example) + - [build\_outputs method](#build_outputs-method) + - [build\_outputs: example](#build_outputs-example) + - [is\_valid\_storage\_event method](#is_valid_storage_event-method) + +## Introduction + +The sc_event module provides methods to help you handle events for your stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with 5 parameters + +- an event table +- a params table +- a sc_common instance +- a sc_logger instance (will create a new one with default parameters if not provided) +- a sc_broker instance + +### constructor: Example + +```lua +local event = { + --- event data --- +} + + -- load module +local sc_param = require("centreon-stream-connectors-lib.sc_param") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) + +-- create a new instance of the sc_broker module +local test_broker = sc_broker.new(test_logger) + +-- create a new instance of the sc_event module +local test_event = sc_event.new(event, test_param.params, test_common, test_logger, test_broker) +``` + +## is_valid_category method + +The **is_valid_category** method checks if the event category is part of [**accepted_categories**](sc_param.md#default-parameters) + +### is_valid_category: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_category: example + +```lua +local result = test_event:is_valid_category() +--> result is true or false +``` + +## is_valid_element method + +The **is_valid_element** method checks if the event element is part of [**accepted_elements**](sc_param.md#default-parameters) + +### is_valid_element: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_element: example + +```lua +local result = test_event:is_valid_element() +--> result is true or false +``` + +## is_valid_event method + +The **is_valid_event** method checks if the event is valid based on [**default parameters**](sc_param.md#default-parameters) + +head over the following chapters for more information + +- [is_valid_neb_event](#is_valid_neb_event-method) +- [is_valid_bam_event](#is_valid_bam_event-method) +- [is_valid_storage_event](#is_valid_storage_event-method) + +### is_valid_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event: example + +```lua +local result = test_event:is_valid_event() +--> result is true or false +``` + +## is_valid_neb_event method + +The **is_valid_neb_event** method checks if the event is a valid **neb** event based on [**default parameters**](sc_param.md#default-parameters) in the **neb** scope + +head over the following chapters for more information + +- [is_valid_host_status_event](#is_valid_host_status_event-method) +- [is_valid_service_status_event](#is_valid_service_status_event-method) +- [is_valid_acknowledgement_event](#is_valid_acknowledgement_event-method) + +### is_valid_neb_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_neb_event: example + +```lua +local result = test_event:is_valid_neb_event() +--> result is true or false +``` + +## is_valid_host_status_event method + +The **is_valid_host_status_event** method checks if the host status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **host_status** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_event_states](#is_valid_event_states-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) +- [is_host_status_event_duplicated](#is_host_status_event_duplicated-method) + +### is_valid_host_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_status_event: example + +```lua +local result = test_event:is_valid_host_status_event() +--> result is true or false +``` + +## is_valid_service_status_event method + +The **is_valid_service_status_event** method checks if the service status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **service_status** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_event_states](#is_valid_event_states-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) + +### is_valid_service_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_status_event: example + +```lua +local result = test_event:is_valid_service_status_event() +--> result is true or false +``` + +## is_valid_host method + +The **is_valid_host** method checks if the host is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) + +If the host is valid, all broker cache information regarding this host will be added to the event in a cache.host table. More details about this cache table [**here**](sc_broker.md#get_host_all_infos-example) + +### is_valid_host: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host: example + +```lua +local result = test_event:is_valid_host() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + host = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_service method + +The **is_valid_service** method checks if the service is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) in the **service_status** scope + +If the service is valid, all broker cache information regarding this service will be added to the event in a cache.service table. More details about this cache table [**here**](sc_broker.md#get_service_all_infos-example) + +### is_valid_service: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service: example + +```lua +local result = test_event:is_valid_service() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + service = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_event_states method + +The **is_valid_event_states** method checks if the event states (downtime, hard/soft, acknowledgement, flapping) are valid based on[**hard_only, in_downtime, acknowledged and flapping parameters**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +head over the following chapters for more information + +- [is_valid_event_state_type](#is_valid_event_state_type-method) +- [is_valid_event_acknowledge_state](#is_valid_event_acknowledge_state-method) +- [is_valid_event_downtime_state](#is_valid_event_downtime_state-method) +- [is_valid_event_flapping_state](#is_valid_event_flapping_state-method) + +### is_valid_event_states: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_states: example + +```lua +local result = test_event:is_valid_event_states(test_param.params.host_status) +--> result is true or false +``` + +## is_valid_event_status method + +The **is_valid_event_states** method checks if the event status is valid based on [**host_status, service_status or ba_status parameters**](sc_param.md#default-parameters) in the **host_status, service_status or ba_status** scope + +### is_valid_event_status: parameters + +| parameter | type | optional | default value | +| ------------------------------------------------ | ------ | -------- | ------------- | +| the list of accepted status code from parameters | string | no | | + +### is_valid_event_status: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_status: example + +```lua +local result = test_event:is_valid_event_status() +--> result is true or false +``` + +## is_valid_event_state_type method + +The **is_valid_event_state_type** method checks if the event state (HARD/SOFT) is valid based on the [**hard_only parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_state_type: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_state_type: example + +```lua +local result = test_event:is_valid_event_state_type() +--> result is true or false +``` + +## is_valid_event_acknowledge_state method + +The **is_valid_event_acknowledge_state** method checks if the event is in valid acknowledgement state based on the [**acknowledged parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_acknowledge_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_acknowledge_state: example + +```lua +local result = test_event:is_valid_event_acknowledge_state() +--> result is true or false +``` + +## is_valid_event_downtime_state method + +The **is_valid_event_downtime_state** method checks if the event is in a valid downtime state based on the [**in_downtime parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_downtime_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_downtime_state: example + +```lua +local result = test_event:is_valid_event_downtime_state() +--> result is true or false +``` + +## is_valid_event_flapping_state method + +The **is_valid_event_flapping_state** method checks if the event is in valid flapping state based on the [**flapping parameter**](sc_param.md#default-parameters) in the **host_status, service_status** scope + +### is_valid_event_flapping_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_event_flapping_state: example + +```lua +local result = test_event:is_valid_event_flapping_state() +--> result is true or false +``` + +## is_valid_hostgroup method + +The **is_valid_hostgroup** method checks if the event is in a valid hostgroup based on [**accepted_hostgroups or rejected_hostgroups**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **accepted_hostgroups or rejected_hostgroups** is configured, all broker cache information regarding the hostgroups linked to a host will be added to the event in a cache.hostgroups table. More details about this cache table [**here**](sc_broker.md#get_hostgroups-example) + +### is_valid_hostgroup: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_hostgroup: example + +```lua +local result = test_event:is_valid_hostgroup() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + hostgroups = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_servicegroup method + +The **is_valid_servicegroup** method checks if the event is in a valid servicegroup based on [**accepted_servicegroups or rejected_servicegroups**](sc_param.md#default-parameters) in the **service_status** scope + +If the **accepted_servicegroup or rejected_servicegroups** is configured, all broker cache information regarding the servicegroups linked to a service will be added to the event in a cache.servicegroups table. More details about this cache table [**here**](sc_broker.md#get_servicegroups-example) + +### is_valid_servicegroup: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_servicegroup: example + +```lua +local result = test_event:is_valid_servicegroup() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + servicegroups = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_bam_event method + +The **is_valid_bam_event** method checks if the bam status event is valid based on [**default parameters**](sc_param.md#default-parameters) in the **bam** scope + +head over the following chapters for more information + +- [is_valid_ba](#is_valid_ba-method) +- [is_valid_ba_status_event](#is_valid_ba_status_event-method) +- [is_valid_ba_downtime_state](#is_valid_ba_downtime_state-method) +- [is_valid_ba_acknowledge_state](#is_valid_ba_acknowledge_state-method) +- [is_valid_bv](#is_valid_bv-method) + +### is_valid_bam_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bam_event: example + +```lua +local result = test_event:is_valid_bam_event() +--> result is true or false +``` + +## is_valid_ba method + +The **is_valid_ba** method checks if the BA is valid based on [**skip_nil_id and skip_anon_events**](sc_param.md#default-parameters) + +If the BA is valid, all broker cache information regarding this BA will be added to the event in a cache.ba table. More details about this cache table [**here**](sc_broker.md#get_ba_infos-example) + +### is_valid_ba: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba: example + +```lua +local result = test_event:is_valid_ba() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + ba = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_ba_status_event method + +The **is_valid_ba_status_event** method checks if the BA status is valid based on [**ba_status**](sc_param.md#default-parameters) in the **ba_status** scope + +head over the following chapters for more information + +- [is_valid_event_status](#is_valid_event_status-method) + +### is_valid_ba_status_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba_status_event: example + +```lua +local result = test_event:is_valid_ba_status_event() +--> result is true or false +``` + +## is_valid_ba_downtime_state method + +The **is_valid_ba_downtime_state** method checks if the BA is in a valid downtime state based on [**in_downtime**](sc_param.md#default-parameters) in the **ba_status** scope + +### is_valid_ba_downtime_state: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_ba_downtime_state: example + +```lua +local result = test_event:is_valid_ba_downtime_state() +--> result is true or false +``` + +## is_valid_ba_acknowledge_state method + +**DOES NOTHING** The **is_valid_ba_acknowledge_state** method checks if the event is in a valid acknowledgement state based on [**acknowledged**](sc_param.md#default-parameters) in the **ba_status** scope + +### is_valid_ba_acknowledge_state: returns + +| return | type | always | condition | +| ------ | ------- | ------ | --------- | +| true | boolean | yes | | + +### is_valid_ba_acknowledge_state: example + +```lua +local result = test_event:is_valid_ba_acknowledge_state() +--> result is true +``` + +## is_valid_bv method + +The **is_valid_bv** method checks if the event is linked to a valid BV based on [**accepted_bvs or rejected_bvs**](sc_param.md#default-parameters) in the **ba_status** scope + +If the **accepted_bvs** is configured, all broker cache information regarding the BVs linked to a service will be added to the event in a cache.bvs table. More details about this cache table [**here**](sc_broker.md#get_bvs_infos-example) + +### is_valid_bv: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bv: example + +```lua +local result = test_event:is_valid_bv() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + bvs = { + --- cache data --- + } + --- other cache data type --- + } + } +]] +``` + +## find_hostgroup_in_list method + +The **find_hostgroup_in_list** method checks if one of the hostgroup in the hostgroups list parameter ([**accepted_hostgroups or rejected_hostgroups parameters**](sc_param.md#default-parameters)) is linked to the host. + +### find_hostgroup_in_list: parameters + +| parameter | type | optional | default value | +|-------------------------------------| ------ | -------- | ------------- | +| a coma separated list of hostgroups | string | no | | + +### find_hostgroup_in_list: returns + +| return | type | always | condition | +| --------------------------------------------- | ------- | ------ | ----------------------- | +| the name of the first hostgroup that is found | string | no | a hostgroup must match | +| false | boolean | no | if no hostgroup matched | + +### find_hostgroup_in_list: example + +```lua +-- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 +-- host from event is linked to my_hostgroup_2 + +local result = test_event:find_hostgroup_in_list(accepted_hostgroups) +--> result is: "my_hostgroup_2" + +-- accepted_hostgroups are my_hostgroup_1 and my_hostgroup_2 +-- host from is linked to my_hostgroup_2712 + +result = test_event:find_hostgroup_in_list(accepted_hostgroups) +--> result is: false +``` + +## find_servicegroup_in_list method + +The **find_servicegroup_in_list** method checks if one of the servicegroup in the servicegroups list parameter ([**accepted_servicegroups or rejected_servicegroups**](sc_param.md#default-parameters)) is linked to the service. + +### find_servicegroup_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------------| ------ | -------- | ------------- | +| a coma separated list of servicegroups | string | no | | + +### find_servicegroup_in_list: returns + +| return | type | always | condition | +| ------------------------------------------------ | ------- | ------ | -------------------------- | +| the name of the first servicegroup that is found | string | no | a servicegroup must match | +| false | boolean | no | if no servicegroup matched | + +### find_servicegroup_in_list: example + +```lua +-- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 +-- service from event is linked to my_servicegroup_2 + +local result = test_event:find_servicegroup_in_list(accepted_servicegroups) +--> result is: "my_servicegroup_2" + +-- accepted_servicegroups are my_servicegroup_1 and my_servicegroup_2 +-- service from is linked to my_servicegroup_2712 + +result = test_event:find_servicegroup_in_list(accepted_servicegroups) +--> result is: false +``` + +## find_bv_in_list method + +The **find_bv_in_list** method checks if one of the BV in the bvs list parameter ([**accepted_bvs or rejected_bvs**](sc_param.md#default-parameters)) is linked to the BA. + +### find_bv_in_list: parameters + +| parameter | type | optional | default value | +|------------------------------| ------ | -------- | ------------- | +| a coma separated list of bvs | string | no | | + +### find_bv_in_list: returns + +| return | type | always | condition | +| -------------------------------------- | ------- | ------ | ---------------- | +| the name of the first BV that is found | string | no | a BV must match | +| false | boolean | no | if no BV matched | + +### find_bv_in_list: example + +```lua +-- accepted_bvs are my_bv_1 and my_bv_2 +-- BA from event is linked to my_bv_2 + +local result = test_event:find_bv_in_list(accepted_bvs) +--> result is: "my_bv_2" + +-- accepted_bvs are my_bv_1 and my_bv_2 +-- BA from is linked to my_bv_2712 + +result = test_event:find_bv_in_list(accepted_bvs) +--> result is: false +``` + +## is_valid_poller method + +The **is_valid_poller** method checks if the event is monitored from an accepted poller based on [**accepted_pollers or rejected_pollers**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **accepted_pollers** is configured, all broker cache information regarding the poller linked to a host will be added to the event in a cache.poller index. More details about this cache index [**here**](sc_broker.md#get_instance-example) + +### is_valid_poller: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_poller: example + +```lua +local result = test_event:is_valid_poller() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + hostgroups = "my_poller_name" + --- other cache data type --- + } + } +]] +``` + +## find_poller_in_list method + +The **find_poller_in_list** method checks if one of the pollers in the pollers list parameter ([**accepted_pollers or rejected_pollers**](sc_param.md#default-parameters)) is monitoring the host. + +### find_poller_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------| ------ | -------- | ------------- | +| a coma separated list of pollers | string | no | | + +### find_poller_in_list: returns + +| return | type | always | condition | +| ------------------------------------------ | ------- | ------ | -------------------- | +| the name of the first poller that is found | string | no | a poller must match | +| false | boolean | no | if no poller matched | + +### find_poller_in_list: example + +```lua +-- accepted_pollers are my_poller_1 and my_poller_2 +-- host from event is monitored from my_poller_2 + +local result = test_event:find_poller_in_list() +--> result is: "my_poller_2" + +-- accepted_pollers are my_poller_1 and my_poller_2 +-- host from event is monitored from my_poller_2712 + +result = test_event:find_poller_in_list() +--> result is: false +``` + +## is_valid_host_severity method + +The **is_valid_host_severity** method checks if the event has an accepted host severity based on [**host_severity_threshold and host_severity_operator**](sc_param.md#default-parameters) in the **host_status or service_status** scope + +If the **host_severity_threshold** is configured, all broker cache information regarding the severity linked to a host will be added to the event in a cache.host_severity index. More details about this cache index [**here**](sc_broker.md#get_severity-example) + +### is_valid_host_severity: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_severity: example + +```lua +local result = test_event:is_valid_host_severity() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + severity = { + host = 2712 + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_service_severity method + +The **is_valid_service_severity** method checks if the event has an accepted service severity based on [**service_severity_threshold and service_severity_operator**](sc_param.md#default-parameters) in the **service_status** scope + +If the **service_severity_threshold** is configured, all broker cache information regarding the severity linked to a service will be added to the event in a cache.service_severity index. More details about this cache index [**here**](sc_broker.md#get_severity-example) + +### is_valid_service_severity: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_severity: example + +```lua +local result = test_event:is_valid_service_severity() +--> result is true or false +--[[ + --> test_event.event structure is: + { + --- event data --- + cache = { + severity = { + service = 2712 + } + --- other cache data type --- + } + } +]] +``` + +## is_valid_acknowledgement_event method + +The **is_valid_acknowledgement_event** method checks if the acknowledgement event is accepted based on [**default_parameters**](sc_param.md#default-parameters) in the **acknowledgement** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_author](#is_valid_author-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) + +### is_valid_acknowledgement_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_acknowledgement_event: example + +```lua +local result = test_event:is_valid_acknowledgement_event() +--> result is true or false +``` + +## is_host_status_event_duplicated method + +The **is_host_status_event_duplicated** method checks if the event is a duplicated one. for example, if host down event has already been received, it will consider the next down host event as a duplicated one. To enable this feature you must set the [**enable_host_status_dedup option to 1**](sc_param.md#default-parameters) + +### is_host_status_event_duplicated: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_host_status_event_duplicated: example + +```lua +local result = test_event:is_host_status_event_duplicated() +--> result is true or false +``` + +## is_service_status_event_duplicated method + +The **is_service_status_event_duplicated** method checks if the event is a duplicated one. for example, if service critical event has already been received, it will consider the next critical service event as a duplicated one. To enable this feature you must set the [**enable_service_status_dedup option to 1**](sc_param.md#default-parameters) + +### is_service_status_event_duplicated: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_service_status_event_duplicated: example + +```lua +local result = test_event:is_service_status_event_duplicated() +--> result is true or false +``` + +## is_valid_downtime_event method + +The **is_valid_downtime_event** method checks if the downtime event is valid based on [**default_parameters**](sc_param.md#default-parameters) in the **downtime** scope + +head over the following chapters for more information + +- [is_valid_host](#is_valid_host-method) +- [is_valid_author](#is_valid_author-method) +- [is_valid_poller](#is_valid_poller-method) +- [is_valid_host_severity](#is_valid_host_severity-method) +- [is_valid_event_status](#is_valid_event_status-method) +- [is_valid_service](#is_valid_service-method) +- [is_valid_service_severity](#is_valid_service_severity-method) +- [is_valid_servicegroup](#is_valid_servicegroup-method) +- [is_valid_hostgroup](#is_valid_hostgroup-method) +- [get_downtime_host_status](#get_downtime_host_status-method) +- [get_downtime_service_status](#get_downtime_service_status-method) + +### is_valid_downtime_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event: example + +```lua +local result = test_event:is_valid_downtime_event() +--> result is true or false +``` + +## get_downtime_host_status method + +The **get_downtime_host_status** method retrieve the status of the host in a host downtime event + +### get_downtime_host_status: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| event status code | number | yes | | + +### get_downtime_host_status: example + +```lua + local result = test_event:get_downtime_host_status() + --> result is 0 or 1 (UP or DOWN) +``` + +## get_downtime_service_status method + +The **get_downtime_service_status** method retrieve the status of the host in a host downtime event + +### get_downtime_service_status: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| event status code | number | yes | | + +### get_downtime_service_status: example + +```lua + local result = test_event:get_downtime_service_status() + --> result is 0 or 1 or 2 or 3 (OK, WARNING, CRITICAL, UNKNOWN) +``` + +## is_valid_author method + +The **is_valid_author** method checks if the author of a comment is valid according to the [**accepted_authors or rejected_authors parameter**](sc_param.md#default-parameters). + +### is_valid_author: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_author: example + +```lua +local result = test_event:is_valid_author() +--> result is true or false +``` + +## find_author_in_list method + +The **find_author_in_list** method checks if one of the author in the authors list parameter ([**accepted_authors or rejected_authors**](sc_param.md#default-parameters)) is the author of a comment. + +### find_author_in_list: parameters + +| parameter | type | optional | default value | +|----------------------------------| ------ | -------- | ------------- | +| a coma separated list of authors | string | no | | + +### find_author_in_list: returns + +| return | type | always | condition | +|--------------------------------------------| ------- | ------ |----------------------| +| the name of the first author that is found | string | no | an author must match | +| false | boolean | no | if no author matched | + +### find_author_in_list: example + +```lua +-- accepted_authors are author_1 and author_2 +-- author_1 is the author of the comment + +local result = test_event:find_author_in_list(accepted_authors) +--> result is: "author_1" + +-- accepted_authors are author_1 and author_2 +-- author_3 is the author of the comment + +result = test_event:find_author_in_list(accepted_authors) +--> result is: false +``` + +## is_downtime_event_useless method + +The **is_downtime_event_useless** method checks if the downtime event is a true start or end of a downtime. + +head over the following chapters for more information + +- [is_valid_downtime_event_start](#is_valid_downtime_event_start-method) +- [is_valid_downtime_event_end](#is_valid_downtime_event_end-method) + +### is_downtime_event_useless: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_downtime_event_useless: example + +```lua +local result = test_event:is_downtime_event_useless() +--> result is true or false +``` + +## is_valid_downtime_event_start method + +The **is_valid_downtime_event_start** method checks if the downtime event is a true start of downtime event. It checks if there is no `actual_end_time` information in the downtime and that the `actual_start_time` is set. Otherwise it is not a true start of downtime event. + +### is_valid_downtime_event_start: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event_start: example + +```lua +local result = test_event:is_valid_downtime_event_start() +--> result is true or false +``` + +## is_valid_downtime_event_end method + +The **is_valid_downtime_event_end** method checks if the downtime event is a true end of downtime event. It checks if there the `deletion_time` is set. Otherwise it is not a true end of downtime event. + +### is_valid_downtime_event_end: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_downtime_event_end: example + +```lua +local result = test_event:is_valid_downtime_event_end() +--> result is true or false +``` + +## build_outputs method + +The **build_outputs** method adds short_output and long_output entries in the event table. output entry will be equal to one or another depending on the [**use_long_output parameter](sc_param.md#default-parameters). + +### build_outputs: example + +```lua +local result = test_event:build_outputs() +``` + +## is_valid_storage_event method + +**DEPRECATED** does nothing diff --git a/stream-connectors/modules/docs/sc_flush.md b/stream-connectors/modules/docs/sc_flush.md new file mode 100644 index 00000000000..e18a3a302f3 --- /dev/null +++ b/stream-connectors/modules/docs/sc_flush.md @@ -0,0 +1,339 @@ +# Documentation of the sc_flush module + +- [Documentation of the sc\_flush module](#documentation-of-the-sc_flush-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [add\_queue\_metadata method](#add_queue_metadata-method) + - [add\_queue\_metadata: parameters](#add_queue_metadata-parameters) + - [add\_queue\_metadata: example](#add_queue_metadata-example) + - [flush\_all\_queues method](#flush_all_queues-method) + - [flush\_all\_queues: parameters](#flush_all_queues-parameters) + - [flush\_all\_queues: returns](#flush_all_queues-returns) + - [flush\_all\_queues: example](#flush_all_queues-example) + - [reset\_all\_queues method](#reset_all_queues-method) + - [reset\_all\_queues: example](#reset_all_queues-example) + - [get\_queues\_size method](#get_queues_size-method) + - [get\_queues\_size: returns](#get_queues_size-returns) + - [get\_queues\_size: example](#get_queues_size-example) + - [flush\_mixed\_payload method](#flush_mixed_payload-method) + - [flush\_mixed\_payload: parameters](#flush_mixed_payload-parameters) + - [flush\_mixed\_payload: returns](#flush_mixed_payload-returns) + - [flush\_mixed\_payload: example](#flush_mixed_payload-example) + - [flush\_homogeneous\_payload method](#flush_homogeneous_payload-method) + - [flush\_homogeneous\_payload: parameters](#flush_homogeneous_payload-parameters) + - [flush\_homogeneous\_payload: returns](#flush_homogeneous_payload-returns) + - [flush\_homogeneous\_payload: example](#flush_homogeneous_payload-example) + - [flush\_payload method](#flush_payload-method) + - [flush\_payload: parameters](#flush_payload-parameters) + - [flush\_payload: returns](#flush_payload-returns) + - [flush\_payload: example](#flush_payload-example) + +## Introduction + +The sc_flush module provides methods to help handling queues of events in stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameter if the second one is not provided it will use a default value. + +- params. This is the table of all stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide this parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_flush = require("centreon-stream-connectors-lib.sc_flush") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +local params = { + param_A = "value A", + param_B = "value B" +} + +-- create a new instance of the sc_flush module +local test_flush = sc_flush.new(params, test_logger) +``` + +## add_queue_metadata method + +The **add_queue_metadata** method adds a list of metadata to a given queue. + +### add_queue_metadata: parameters + +| parameter | type | optional | default value | +| ---------------------------------------------------------------------------------------------- | ------ | -------- | ------------- | +| the category id of the queue | number | no | | +| the element id of the queue | number | no | | +| a table containing metadata where each key is the name of the metadata and the value its value | table | no | | + +### add_queue_metadata: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local host_metadata = { + endpoint = "/host", + method = "POST" +} + +local cateogry = 1 +local element = 14 + +test_flush:add_queue_metadata(category, element, host_metadata) +--> the host queue (category: 1, element: 14) now has metadata +--[[ + test_flush.queues = { + [1] = { + [14] = { + events = {}, + queue_metadata = { + category_id = 1, + element_id = 14, + endpoint = "/host", + method = "POST" + } + } + } + } +]]-- +``` + +## flush_all_queues method + +The **flush_all_queues** method tries to flush all the possible queues that can be created. It flushes queues according to the [**accepted_elements, max_buffer_size, max_buffer_age parameters and send_mixed_events**](sc_param.md#default_parameters) + +head over the following chapters for more information + +- [flush_mixed_payload](#flush_mixed_payload-method) +- [flush_homogeneous_payload](#flush_homogeneous_payload-method) +- [reset_all_queues](#reset_all_queues-method) + +### flush_all_queues: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_all_queues: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_all_queues: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local function build_payload() + -- build data payload +end + +local function send_data() + -- send data somewhere +end + +local result = test_flush:flush_all_queues(build_payload, send_data) +--> result is true or false +--> host_status and service_status are flushed if it is possible +``` + +## reset_all_queues method + +The **reset_all_queues** method removes all the entries from all the queue tables. + +### reset_all_queues: example + +```lua +test_flush.queues[1] = { + [14] = { + flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past + events = { + [1] = "first event", + [2] = "second event" + } + }, + [24] = { + flush_date = os.time() - 30, -- simulate an old queue by setting its last flush date 30 seconds in the past + events = { + [1] = "first event", + [2] = "second event" + } + } +} + +test_flush:reset_all_queues() +--> test_flush.queues are now reset +--[[ + test_flush.queues[1] = { + [14] = { + os.time() , -- the time at which the reset happened + events = {} + }, + [24] = { + os.time() , -- the time at which the reset happened + events = {} + } + } +]]-- +``` + +## get_queues_size method + +The **get_queues_size** method gets the number of events stored in all the queues. + +### get_queues_size: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a number | number | yes | | + +### get_queues_size: example + +```lua +test_flush.queues[1] = { + [14] = { + flush_date = os.time(), + events = { + [1] = "first event", + [2] = "second event" + } + }, + [24] = { + flush_date = os.time(), + events = { + [1] = "first event", + [2] = "second event" + } + } +} + +local result = test_flush:get_queues_size() +--> result is 4 +``` + +## flush_mixed_payload method + +The **flush_mixed_payload** method flushes a payload that contains various type of events (services mixed hosts for example) according to [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) + +### flush_mixed_payload: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_mixed_payload: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_mixed_payload: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local function build_payload() + -- build data payload +end + +local function send_data() + -- send data somewhere +end + +local result = test_flush:flush_all_queues(build_payload, send_data) +--> result is true or false +--> host_status and service_status are flushed if it is possible +``` + +## flush_homogeneous_payload method + +The **flush_mixed_payload** method flushes a payload that contains a single type of events (services with services only and hosts with hosts only for example) according to [**max_buffer_size and max_buffer_age parameters**](sc_param.md#default_parameters) + +### flush_homogeneous_payload: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| the function that must be used to send data. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.send_data` but not `self:send_data` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | + +### flush_homogeneous_payload: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_homogeneous_payload: example + +```lua +-- if accepted_elements is set to "host_status,service_status" + +local function build_payload() + -- build data payload +end + +local function send_data() + -- send data somewhere +end + +local result = test_flush:flush_homogeneous_payload(build_payload, send_data) +--> result is true or false +--> host_status and service_status are flushed if it is possible +``` + +## flush_payload method + +The **flush_payload** method sends a payload using the given method. + +### flush_payload: parameters + +| parameter | type | optional | default value | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------- | ------------- | +| the function that must be used to build the data payload. If the method is part of a lua module, you must use the dot syntax and not the colon syntax. Meaning it can be `self.build_payload` but not `self:build_payload` (do not put parenthesis otherwise it will pass the result of the function as a parameter instead of the function itself) | function | no | | +| a table containing the payload that must be sent | table | no | | +| a table containing metadata for the payload | table | no | `{}` | + +### flush_payload: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### flush_payload: example + +```lua +local payload = { + host = "mont", + state = "2", + service = "marsan" +} + +local metadata = { + endpoint = "/api/event" +} + +local function send_data() + -- send data somewhere +end + +result = test_flush:flush_payload(send_data, payload, metadata) +--> result is true or false +``` diff --git a/stream-connectors/modules/docs/sc_logger.md b/stream-connectors/modules/docs/sc_logger.md new file mode 100644 index 00000000000..620bf721b1d --- /dev/null +++ b/stream-connectors/modules/docs/sc_logger.md @@ -0,0 +1,204 @@ +# Documentation of the sc_logger module + +- [Documentation of the sc\_logger module](#documentation-of-the-sc_logger-module) + - [Introduction](#introduction) + - [Best practices](#best-practices) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [error method](#error-method) + - [error: parameters](#error-parameters) + - [error: example](#error-example) + - [warning method](#warning-method) + - [warning: parameters](#warning-parameters) + - [warning: example](#warning-example) + - [debug method](#debug-method) + - [debug: parameters](#debug-parameters) + - [debug: example](#debug-example) + - [info method](#info-method) + - [info: parameters](#info-parameters) + - [info: example](#info-example) + - [notice method](#notice-method) + - [notice: parameters](#notice-parameters) + - [notice: example](#notice-example) + - [log\_curl\_command method](#log_curl_command-method) + - [log\_curl\_command: parameters](#log_curl_command-parameters) + - [log\_curl\_command: example](#log_curl_command-example) + +## Introduction + +The sc_logger module provides methods to help you handle logging in your stream connectors. It has been made in OOP (object oriented programming) + +Logs can be configured with two parameters called + +- logfile +- log_level + +there are three different **log_level** going from 1 to 3. Below is the list of the logs message type you can expect with their corresponding **log_level**. + +| log_level | message type | +| --------- | ----------------------------------- | +| 1 | notice, error | +| 2 | info, warning, notice, error | +| 3 | debug, info, warning, notice, error | + +## Best practices + +All the stream-connectors-lib are using the following syntax when logging: + +"[module_name:method_name]: your error message" + +For example + +```lua +function EventQueue:do_things() + -- do things -- + + test_logger:debug("[EventQueue:do_things]: this is a debug message that is using the best practices") +end +``` + +This is important for a more efficient troubleshooting. Log messages can come from various places and using this convention drastically improves the readability of the situation + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor can be initialized with two parameters or it will use default values + +- the log file. **Default value: /var/log/centreon-broker/stream-connector.log** +- the maximum accepted severity level. Going from 1 (only error and notice message) to 3 (all messages including debug). **Default value: 1** + +### constructor: Example + +```lua +-- load module +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) +``` + +If the logfile and severity are not provided, default values are going to be used. + +## error method + +The **error** method will print an error message in the logfile if **severity is equal or superior to 1** + +### error: parameters + +- message. A string that is the error message you want to display in your logfile + +### error: example + +```lua +-- call error method +test_logger:error("[module_name:method_name]: This is an error message.") +``` + +## warning method + +The **warning** method will print a warning message in the logfile if **severity is equal or superior to 2** + +### warning: parameters + +- message. A string that is the warning message you want to display in your logfile + +### warning: example + +```lua +-- call warning method +test_logger:warning("[module_name:method_name]: This is a warning message.") +``` + +## debug method + +The **debug** method will print a debug message in the logfile if **severity is equal or superior to 3** + +### debug: parameters + +- message. A string that is the debug message you want to display in your logfile + +### debug: example + +```lua +-- call debug method +test_logger:debug("[module_name:method_name]: This is a debug message.") +``` + +## info method + +The **info** method will print an info message in the logfile if **severity is equal or superior to 2**. + +### info: parameters + +- message. A string that is the info message you want to display in your logfile + +### info: example + +```lua +-- call info method +test_logger:info("[module_name:method_name]: This is a info message.") +``` + +## notice method + +The **notice** method will print a notice message in the logfile if **severity is equal or superior to 1**. + +### notice: parameters + +- message. A string that is the notice message you want to display in your logfile + +### notice: example + +```lua +-- call notice method +test_logger:notice("[module_name:method_name]: This is a notice message.") +``` + +## log_curl_command method + +The **log_curl_command** method will print a notice message containing a ready to use shell curl command in the logfile. + +See [notice method](#notice-method) for more information about notice logs + +### log_curl_command: parameters + +| parameter | type | optional | default value | +| ---------------------------------------------------------------- | ------ | -------- | ------------- | +| the url for the curl command | string | no | | +| metadata containing headers information and http method for curl | table | no | | +| stream connector parameters | table | no | | +| data that must be sent | string | yes | | + +### log_curl_command: example + +```lua +local url = "https://127.0.0.1/my_endpoint" +local metadata = { + method = "POST", + headers = { + "content-type: application/json", + "token: mont-de-marsan" + } +} + +local params = { + allow_insecure_connection = 1 +} + +local data = '{"host":"test-host","state":"down"}' + +-- call notice method +test_logger:log_curl_command(url, metadata, params, data) +--> this will print the following log +--[[ + Thu Mar 17 10:44:53 2022: INFO: [sc_logger:log_curl_command]: curl -k -X POST -H "content-type: application/json" -H "token: mont-de-marsan" "https://127.0.0.1/my_endpoint" -d '{"host":"test-host","state":"down"}' +]]-- +``` diff --git a/stream-connectors/modules/docs/sc_macros.md b/stream-connectors/modules/docs/sc_macros.md new file mode 100644 index 00000000000..6bb246d3bc8 --- /dev/null +++ b/stream-connectors/modules/docs/sc_macros.md @@ -0,0 +1,1075 @@ +# Documentation of the sc_macros module + +- [Documentation of the sc_macros module](#documentation-of-the-sc_macros-module) + - [Introduction](#introduction) + - [Stream connectors macro explanation](#stream-connectors-macro-explanation) + - [Event macros](#event-macros) + - [Cache macros](#cache-macros) + - [Group macros](#group-macros) + - [group type](#group-type) + - [output format](#output-format) + - [regex filter](#regex-filter) + - [examples](#examples) + - [Transformation flags](#transformation-flags) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [replace_sc_macro method](#replace_sc_macro-method) + - [replace_sc_macro: parameters](#replace_sc_macro-parameters) + - [replace_sc_macroreplace_sc_macro: returns](#replace_sc_macroreplace_sc_macro-returns) + - [replace_sc_macro: example](#replace_sc_macro-example) + - [get_cache_macro method](#get_cache_macro-method) + - [get_cache_macro: parameters](#get_cache_macro-parameters) + - [get_cache_macro: returns](#get_cache_macro-returns) + - [get_cache_macro: example](#get_cache_macro-example) + - [get_event_macro method](#get_event_macro-method) + - [get_event_macro: parameters](#get_event_macro-parameters) + - [get_event_macro: returns](#get_event_macro-returns) + - [get_event_macro: example](#get_event_macro-example) + - [get_group_macro method](#get_group_macro-method) + - [get_group_macro: parameters](#get_group_macro-parameters) + - [get_group_macro: returns](#get_group_macro-returns) + - [get_group_macro: example](#get_group_macro-example) + - [convert_centreon_macro method](#convert_centreon_macro-method) + - [convert_centreon_macro: parameters](#convert_centreon_macro-parameters) + - [convert_centreon_macro: returns](#convert_centreon_macro-returns) + - [convert_centreon_macro: example](#convert_centreon_macro-example) + - [get_centreon_macro method](#get_centreon_macro-method) + - [get_centreon_macro: parameters](#get_centreon_macro-parameters) + - [get_centreon_macro: returns](#get_centreon_macro-returns) + - [get_centreon_macro: example](#get_centreon_macro-example) + - [get_transform_flag method](#get_transform_flag-method) + - [get_transform_flag: parameters](#get_transform_flag-parameters) + - [get_transform_flag: returns](#get_transform_flag-returns) + - [get_transform_flag: example](#get_transform_flag-example) + - [transform_date method](#transform_date-method) + - [transform_date: parameters](#transform_date-parameters) + - [transform_date: returns](#transform_date-returns) + - [transform_date: example](#transform_date-example) + - [transform_short method](#transform_short-method) + - [transform_short: parameters](#transform_short-parameters) + - [transform_short: returns](#transform_short-returns) + - [transform_short: example](#transform_short-example) + - [transform_type method](#transform_type-method) + - [transform_type: parameters](#transform_type-parameters) + - [transform_type: returns](#transform_type-returns) + - [transform_type: example](#transform_type-example) + - [transform_state method](#transform_state-method) + - [transform_state: parameters](#transform_state-parameters) + - [transform_state: returns](#transform_state-returns) + - [transform_state: example](#transform_state-example) + - [transform_number method](#transform_number-method) + - [transform_number: parameters](#transform_number-parameters) + - [transform_number: returns](#transform_number-returns) + - [transform_number: example](#transform_number-example) + - [transform_string method](#transform_string-method) + - [transform_string: parameters](#transform_string-parameters) + - [transform_string: returns](#transform_string-returns) + - [transform_string: example](#transform_string-example) + - [get_hg_macro method](#get_hg_macro-method) + - [get_hg_macro: parameters](#get_hg_macro-parameters) + - [get_hg_macro: returns](#get_hg_macro-returns) + - [get_hg_macro: example](#get_hg_macro-example) + - [get_sg_macro method](#get_sg_macro-method) + - [get_sg_macro: parameters](#get_sg_macro-parameters) + - [get_sg_macro: returns](#get_sg_macro-returns) + - [get_sg_macro: example](#get_sg_macro-example) + - [get_bv_macro method](#get_bv_macro-method) + - [get_bv_macro: parameters](#get_bv_macro-parameters) + - [get_bv_macro: returns](#get_bv_macro-returns) + - [get_bv_macro: example](#get_bv_macro-example) + - [build_group_macro_value method](#build_group_macro_value-method) + - [build_group_macro_value: parameters](#build_group_macro_value-parameters) + - [build_group_macro_value: returns](#build_group_macro_value-returns) + - [build_group_macro_value: example](#build_group_macro_value-example) + - [group_macro_format_table method](#group_macro_format_table-method) + - [group_macro_format_table: parameters](#group_macro_format_table-parameters) + - [group_macro_format_table: returns](#group_macro_format_table-returns) + - [group_macro_format_table: example](#group_macro_format_table-example) + - [group_macro_format_inline method](#group_macro_format_inline-method) + - [group_macro_format_inline: parameters](#group_macro_format_inline-parameters) + - [group_macro_format_inline: returns](#group_macro_format_inline-returns) + - [group_macro_format_inline: example](#group_macro_format_inline-example) + - [build_converted_string_for_cache_and_event_macro method](#build_converted_string_for_cache_and_event_macro-method) + - [build_converted_string_for_cache_and_event_macro: parameters](#build_converted_string_for_cache_and_event_macro-parameters) + - [build_converted_string_for_cache_and_event_macro: returns](#build_converted_string_for_cache_and_event_macro-returns) + - [build_converted_string_for_cache_and_event_macro: example](#build_converted_string_for_cache_and_event_macro-example) + +## Introduction + +The sc_macros module provides methods to handle a stream connector oriented macro system such as {cache.host.name} and Centreon standard macro such as $HOSTALIAS$. It has been made in OOP (object oriented programming) + +## Stream connectors macro explanation + +There are three kind of stream connectors macro: + +- **event macros** +- **cache macros** +- **group macros** + +The first type refers to data that are accessible right from the event. The second type refers to data that needs to be retrieved from the broker cache. And the last type refers to three kind of group object in Centreon (hostgroups, servicegroups and Business views) + +### Event macros + +This one is quite easy to understand. The macro syntaxt is `{macro_name}` where *macro_name* is a property of an event. For example, for a service_status neb event all macro names are available [there](broker_data_structure.md#Service_status). + +This means that it is possible to use the following macros + +```lua +"{service_id}" -- will be replaced by the service_id +"{output}" -- will be replaced by the service output +"{last_check}" -- will be replaced by the last_check timestamp +"{state_type}" -- will be replaced by the state type value (0 or 1 for SOFT or HARD) +"{state}" -- will be replaced by the state of the service (0, 1, 2, 3 for OK, WARNING, CRITICAL, UNKNOWN) +``` + +### Cache macros + +This one is a bit more complicated. The purpose is to retrieve information from the event cache using a macro. If you rely on the centreon-stream-connectors-lib to fill the cache, here is what you need to know. + +There are 5 kind of cache + +- host cache (for any event that is linked to a host, which means any event but BA events) +- service cache (for any event that is linked to a service) +- poller cache (for any event that is linked to a poller, which means any event but BA events) +- severity cache (for any event that is linked to a host, which means any event but BA events) +- ba cache (only for a ba_status event) + +For example, if we want to retrieve the description of a service in the cache (because the description is not provided in the event data). We will use `{cache.service.description}`. + +For example, for a service_status neb event, all cache macros are available [there](sc_broker.md#get_service_all_infos-example) + +This means that it is possible to use the following macros + +```lua +"{cache.service.description}" -- will be replaced by the service description +"{cache.service.notes}" -- will be replaced by the service notes +"{cache.service.last_time_critical}" -- will be replaced by the service last_time_critical timestamp +``` + +cache values for hosts: [list](sc_broker.md#get_host_all_infos-example) +cache values for services: [list](sc_broker.md#get_services_all_infos-example) +cache values for BAs: [list](sc_broker.md#get_ba_infos-example) +cache values for pollers: + +- {cache.instance.name} +- {cache.instance.id} + +cache values for severities: + +- {cache.severity.service} +- {cache.severity.host} + +### Group macros + +Group macros are a very special kind of macros that allows you to retrieve the hostgroups, services groups or BVs that are linked to your host/service/BA. The syntax goes as follow: `{groups(,,)}` + +It means that when using a group macro, you need to specify which kind of group you want, its output format and the filter you are going to use. + +#### group type + +When using a group macro, you need to set a group type. You have three possibilities + +- hg (to retrieve hostgroups) +- sg (to retrieve servicegroups) +- bv (to retrives business views) + +#### output format + +When using a group, you need to set an output format. You have two possibilities + +- table (each found group is going to be stored in a table structure) +- inline (each found group is going to be stored in a string and each value will be separated using a `,`) + +#### regex filter + +When using a group, you need to set a regex filter. You accept everything using `.*` or you can accept groups that will only have alpha numerical characters in their name with `^%w+$`. + +[More information about regex in lua](https://www.lua.org/pil/20.2.html) + +#### examples + +for a service linked to: + +| hostgroups | servicegroups | +| ---------- | -------------- | +| HG_1 | FOO_the-first | +| HG_2 | FOO_the-second | +| HG_3 | another_sg | + +get all hostgroups in a table format: + +| macro | result | +| ----------------------- | -------------------------- | +| `{groups(hg,table,.*)}` | `["HG_1", "HG_2", "HG_3"]` | + +get all servicegroups that start with "FOO" in an inline format: `{groups} + +| macro | result | +| ---------------------------- | -------------------------------- | +| `{groups(sg,inline,^FOO.*)}` | `"FOO_the-first,FOO_the-second"` | + +### Transformation flags + +You can use transformation flags on stream connectors macros. Those flags purpose is to convert the given value to something more appropriate. For example, you can convert a timestamp to a human readable date. + +Here is the list of all available flags + +| flag name | purpose | without flag | with flag | +| --------- | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | ---------------------- | +| _scdate | convert a timestamp to a date | 1623691758 | 2021-06-14 19:29:18 | +| _sctype | convert a state type number to its human value | 0 | SOFT | +| _scstate | convert a state to its human value | 2 | WARNING (for a servie) | +| _scshort | only retrieve the first line of a string (mostly use to get the output instead of the long output of a service for exemple) | "my output\n this is part of the longoutput" | "my output" | +| _scnumber | convert a string to a number | "1" | 1 | +| _scstring | convert anything to a string | false | "false" | + +The **_scdate** is a bit specific because you can change the date format using the [**timestamp_conversion_format parameter**](sc_param.md#default-parameters) + +With all that information in mind, we can use the following macros + +```lua +"{cache.service.last_time_critical}" -- will be replaced by the service last_time_critical timestamp +"{cache.service.last_time_critical_scdate}" -- will be replaced by the service last_time_critical converted in a human readable date format +"{state_type_sctype}" -- will be replaced by the service state_type in a human readable format (SOFT or HARD) +"{state_scstate}" -- will be replaced by the servie state in a human readable format (OK, WARNING, CRITICAL or UNKNOWN) +"{output_scshort}" -- will be replaced by the first line of the service output +"{cache.severity.service_scnumber}" -- will be replaced by 1 instead of "1" +"{acknowledged_scstring}" -- will be replaced by "false" instead of false +``` + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with two parameters, if the second one is not provided it will use a default value + +- params. This is a table of all the stream connectors parameters +- sc_logger. This is an instance of the sc_logger module + +If you don't provide the sc_logger parameter it will create a default sc_logger instance with default parameters ([sc_logger default params](./sc_logger.md#module-initialization)) + +### constructor: Example + +```lua +-- load modules +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_macros = require("centreon-stream-connecotrs-lib.sc_macros") + +-- initiate "mandatory" informations for the logger module +local logfile = "/var/log/test_logger.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) +-- some stream connector params + +local params = { + my_param = "my_value" +} + +-- create a new instance of the sc_macros module +local test_macros = sc_macros.new(params, test_logger) +``` + +## replace_sc_macro method + +The **replace_sc_macro** method replaces all stream connector macro in a string with its value. + +head over the following chapters for more information + +- [Stream connectors macro explanation](#stream-connectors-macro-explanation) +- [get_cache_macro](#get_cache_macro-method) +- [get_event_macro](#get_event_macro-method) + +### replace_sc_macro: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the string with macros | string | no | | +| the event | table | no | | + +### replace_sc_macroreplace_sc_macro: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | --------- | +| converted_string | string | yes | | + +### replace_sc_macro: example + +```lua +local string = "my host id is {host_id}, name is {cache.host.name}, its status is {state_scstate} and its state type is {state_type_scstate}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:replace_sc_macro(string, event) +--> result is "my host id is 2712, name is Tatooine, its status is UP and its state type is HARD" +``` + +## get_cache_macro method + +The **get_cache_macro** method replaces a stream connector cache macro by its value. + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) +- [Cache macros](#cache-macros) +- [get_transform_flag](#get_transform_flag-method) + +### get_cache_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_cache_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ----------------------------------------------------------------- | +| false | boolean | no | if the macro is not a cache macro or value can't be find in cache | +| value of the macro | boolean or string or number | no | the value that has been found in the cache | + +### get_cache_macro: example + +```lua +local macro = "{cache.host.name}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:get_cache_macro(macro, event) +--> result is "Tatooine" + +macro = "{host_id}" +result = test_macros:get_cache_macro(macro, event) +--> result is false, host_id is in the event table, not in a table inside the cache table of the event +``` + +## get_event_macro method + +The **get_event_macro** method replaces a stream connector event macro by its value. + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) +- [Event macros](#event-macros) +- [get_transform_flag](#get_transform_flag-method) + +### get_event_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_event_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ------------------------------------------ | +| false | boolean | no | if the macro is not an event macro | +| value of the macro | boolean or string or number | no | the value that has been found in the event | + +### get_event_macro: example + +```lua +local macro = "{host_id}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine" + } + } +} + +local result = test_macros:get_event_macro(macro, event) +--> result is "2712" + +macro = "{cache.host.name}" +result = test_macros:get_event_macro(macro, event) +--> result is false, cache.host.name is in the cache table, not directly in the event table +``` + +## get_group_macro method + +The **get_group_macro** method replaces a stream connector group macro by its value. + +head over the following chapters for more information + +- [Group macros](#group-macros) + +### get_group_macro: parameters + +| parameter | type | optional | default value | +| -------------- | ------ | -------- | ------------- | +| the macro name | string | no | | +| the event | table | no | | + +### get_group_macro: returns + +| return | type | always | condition | +| ------------------ | --------------------------- | ------ | ------------------------------------------ | +| false | boolean | no | if the macro is not a group macro | +| value of the macro | boolean or string or number | no | the value that has been found in the event | + +### get_group_macro: example + +```lua +local macro = "{groups(hg,table,^%w+$)}" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } + } +} + +local result = test_macros:get_group_macro(macro, event) +--> result is +--[[ + { + [1] = "hg2", + [2] = "hg3" + } +]] + +macro = "{groups(foo,bar,.*)}" +result = test_macros:get_group_macro(macro, event) +--> result is false, foo is not a valid group type and bar is not a valid format type +``` + +## convert_centreon_macro method + +The **convert_centreon_macro** method replaces all centreon macro in a string (such as $HOSTALIAS$) by its value. It will first convert it to its stream connector macro counterpart and then convert the stream connector macro to its value. + +### convert_centreon_macro: parameters + +| parameter | type | optional | default value | +| ---------------------- | ------ | -------- | ------------- | +| the string with macros | string | no | | +| the event | table | no | | + +### convert_centreon_macro: returns + +| return | type | always | condition | +| ---------------- | ------ | ------ | ------------------------------------------ | +| converted string | string | yes | the value that has been found in the event | + +### convert_centreon_macro: example + +```lua +local string = "We should go to $HOSTNAME$ but address $HOSTADDRESS$ is not on open street map and by the way there is $HOSTALIAS$" +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + host = { + name = "Tatooine", + address = "27.12.19.91" + alias = "Too much sand" + } + } +} + +local result = test_macros:convert_centreon_macro(macro, event) +--> result is "We should go to Tatooine but address 27.12.19.91 is not on open street map and by the way there is Too much sand" +``` + +## get_centreon_macro method + +The **get_centreon_macro** method retrieves the given macro in a Centreon macro list set up in the sc_macros module constructor and returns its associated stream connector macro. + +### get_centreon_macro: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the name of the macro | string | no | | + +### get_centreon_macro: returns + +| return | type | always | condition | +| -------------------------------------- | ------- | ------ | ------------------------------------------------------ | +| false | boolean | no | if the macro is not found in the predefined macro list | +| the appropriate stream connector macro | string | no | the value that has been found in the event | + +### get_centreon_macro: example + +```lua +local macro = "$HOSTALIAS$" + +local result = test_macros:get_centreon_macro(macro) +--> result is "{cache.host.alias}" + +macro = "$ENDOR$" + +result = test_macros:get_centreon_macro(macro) +--> result is false +``` + +## get_transform_flag method + +The **get_transform_flag** method gets the flag from a macro if there is one + +head over the following chapters for more information + +- [Transformation flags](#transformation-flags) + +### get_transform_flag: parameters + +| parameter | type | optional | default value | +| --------------------- | ------ | -------- | ------------- | +| the name of the macro | string | no | | + +### get_transform_flag: returns + +| return | type | always | condition | +| ------ | ------------- | ------ | --------------------------------------------- | +| macro | string | yes | the name of the macro | +| flag | string or nil | yes | the macro transformation flag if there is one | + +### get_transform_flag: example + +```lua +local macro = "{state_scstate}" + +local result, flag = test_macros:get_transform_flag(macro) +--> result is "state" flag is "state" (_sc prefix is removed) + +macro = "{last_check}" + +result, flag = test_macros:get_transform_flag(macro) +--> result is "last_check" flag is nil +``` + +## transform_date method + +The **transform_date** method converts a timestamp into a human readable date. It is possible to chose the date format using the [**timestamp_conversion_format parameter**](sc_param.md#default-parameters) and get help from the [**lua documentation**](https://www.lua.org/pil/22.1.html) for the option syntax. + +### transform_date: parameters + +| parameter | type | optional | default value | +| ----------------- | ------ | -------- | ------------- | +| a timestamp value | number | no | | + +### transform_date: returns + +| return | type | always | condition | +| ------ | ------ | ------ | --------------------------- | +| date | string | yes | timestamp converted to date | + +### transform_date: example + +```lua +local timestamp = 1623691758 + +local result = test_macros:transform_date(timestamp) +--> result is "2021-06-14 19:29:18" +``` + +## transform_short method + +The **transform_short** method keeps the first line of a string. + +### transform_short: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| a string | string | no | | + +### transform_short: returns + +| return | type | always | condition | +| -------------------------- | ------ | ------ | --------- | +| the first line of a string | string | yes | | + +### transform_short: example + +```lua +local string = "Paris is a nice city\n Mont de Marsan is way better" + +local result, flag = test_macros:transform_short(string) +--> result is "Paris is a nice city" +``` + +## transform_type method + +The **transform_type** method transforms a 0 or 1 value into SOFT or HARD + +### transform_type: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| 0 or 1 | number | no | | + +### transform_type: returns + +| return | type | always | condition | +| ------------ | ------ | ------ | --------- | +| SOFT or HARD | string | yes | | + +### transform_type: example + +```lua +local state_type = 0 + +local result = test_macros:transform_type(state_type) +--> result is "SOFT" +``` + +## transform_state method + +The **transform_state** method transforms a status code into its human readable status (e.g: UP, DOWN, WARNING, CRITICAL...) + +### transform_state: parameters + +| parameter | type | optional | default value | +| ------------ | ------ | -------- | ------------- | +| 0, 1, 2 or 3 | number | no | | +| the event | table | no | | + +### transform_state: returns + +| return | type | always | condition | +| ----------------- | ------ | ------ | --------- | +| the status string | string | yes | | + +### transform_state: example + +```lua +local event = { + service_id = 2712, + element = 24, + category = 1, + host_id = 1991 +} + +local state = 1 + +local result = test_macros:transform_state(state, event) +--> result is "WARNING" because it is a service (category 1 = neb, element 24 = service_status event) + + +event = { + element = 14, + category = 1, + host_id = 1991 +} + +result = test_macros:transform_state(state, event) +--> result is "DOWN" because it is a service (category 1 = neb, element 14 = host_status event) +``` + +## transform_number method + +The **transform_number** method transforms a string value into a number + +### transform_number: parameters + +| parameter | type | optional | default value | +| --------- | ------ | -------- | ------------- | +| a string | string | no | | + +### transform_number: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a number | number | yes | | + +### transform_number: example + +```lua +local string_number = "0" + +local result = test_macros:transform_number(string_number) +--> result is 0 +``` + +## transform_string method + +The **transform_string** method transforms any value into a string + +### transform_string: parameters + +| parameter | type | optional | default value | +| --------- | ---- | -------- | ------------- | +| anything | any | no | | + +### transform_string: returns + +| return | type | always | condition | +| -------- | ------ | ------ | --------- | +| a string | string | yes | | + +### transform_string: example + +```lua +local boolean = false + +local result = test_macros:transform_string(boolean) +--> result is "false" +``` + +## get_hg_macro method + +The **get_hg_macro** method retrieves hostgroup information and make it available as a macro + +### get_hg_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_hg_macro: returns + +| return | type | always | condition | +| ------------------------------------------------------- | ------ | ------ | --------- | +| all hostgroups | table | yes | | +| the name of the index that is linked to hostgroups name | string | yes | | + +### get_hg_macro: example + +```lua +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } + } +} + +local hostgroups, index_name = test_macros:get_hg_macro(event) +--> hostgroups is: +--[[ + hostgroups = { + [1] = { + group_id = 27, + group_name = "hg_1" + }, + [2] = { + group_id = 12, + group_name = "hg2" + }, + [3] = { + group_id = 1991 + group_name = "hg3" + } + } +]] +--> index_name is: group_name +``` + +## get_sg_macro method + +The **get_sg_macro** method retrieves servicegroup information and make it available as a macro + +### get_sg_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_sg_macro: returns + +| return | type | always | condition | +| ---------------------------------------------------------- | ------ | ------ | --------- | +| all servicegroups | table | yes | | +| the name of the index that is linked to servicegroups name | string | yes | | + +### get_sg_macro: example + +```lua +local event = { + host_id = 2712, + state_type = 1, + state = 0 + cache = { + hostgroups = { + [1] = { + group_id = 27, + group_name = "sg_1" + }, + [2] = { + group_id = 12, + group_name = "sg2" + }, + [3] = { + group_id = 1991 + group_name = "sg3" + } + } + } +} + +local servicegroups, index_name = test_macros:get_sg_macro(event) +--> servicegroups is: +--[[ + servicegroups = { + [1] = { + group_id = 27, + group_name = "sg_1" + }, + [2] = { + group_id = 12, + group_name = "sg2" + }, + [3] = { + group_id = 1991 + group_name = "sg3" + } + } +]] +--> index_name is: group_name +``` + +## get_bv_macro method + +The **get_bv_macro** method retrieves business views information and make it available as a macro + +### get_bv_macro: parameters + +| parameter | type | optional | default value | +| --------- | ----- | -------- | ------------- | +| the event | table | no | | + +### get_bv_macro: returns + +| return | type | always | condition | +| ----------------------------------------------------------- | ------ | ------ | --------- | +| all business views | table | yes | | +| the name of the index that is linked to business views name | string | yes | | + +### get_bv_macro: example + +```lua +local event = { + ba_id = 2712, + state = 0 + cache = { + bvs = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } + } +} + +local bvs, index_name = test_macros:get_bv_macro(event) +--> bvs is: +--[[ + bvs = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } +]] +--> index_name is: bv_name +``` + +## build_group_macro_value method + +The **build_group_macro_value** method builds the value that must replace the macro (it will also put it in the desired format) + +### build_group_macro_value: parameters + +| parameter | type | optional | default value | +| -------------------------------------------------------- | ------ | -------- | ------------- | +| the group data | table | no | | +| the name of the index where the group name will be found | string | no | | +| the format in which the result will be built | string | no | | +| the regex that will filter found groups | string | no | | + +### build_group_macro_value: returns + +| return | type | always | condition | +| ----------------------------------- | --------------- | ------ | ----------------------------------- | +| boolean | boolean | yes | | +| the macro value in the right format | string or table | no | only if the desired format is valid | + +### build_group_macro_value: example + +```lua +local group_data = { + [1] = { + bv_id = 27, + bv_name = "bv_1" + }, + [2] = { + bv_id = 12, + bv_name = "bv2" + }, + [3] = { + bv_id = 1991 + bv_name = "bv3" + } + } +local index_name = "bv_name" +local format = "inline" +local regex = "^%w+$" + +local code, result = test_macros:build_group_macro_value(group_data, index_name, format, regex) +--> code is: true +--> result is: "bv2,bv3" + +format = "bad_format" +code, result = test_macros:build_group_macro_value(group_data, index_name, format, regex) +--> code is: false +--> result is: nil +``` + +## group_macro_format_table method + +The **group_macro_format_table** method transforms the given macro value into a table (does nothing as is) + +### group_macro_format_table: parameters + +| parameter | type | optional | default value | +| --------------- | ----- | -------- | ------------- | +| the macro value | table | no | | + +### group_macro_format_table: returns + +| return | type | always | condition | +| -------------------------- | ----- | ------ | --------- | +| the macro value as a table | table | yes | | + +### group_macro_format_table: example + +```lua +local macro_value = { + [1] = "bv2", + [2] = "bv3" +} + +local result = test_macros:group_macro_format_table(macro_value) +--> result is: +--[[ + result = { + [1] = "bv2", + [2] = "bv3" + } +]]-- +``` + +## group_macro_format_inline method + +The **group_macro_format_inline** method transforms the give macro value into a string with values separated using comas + +### group_macro_format_inline: parameters + +| parameter | type | optional | default value | +| --------------- | ----- | -------- | ------------- | +| the macro value | table | no | | + +### group_macro_format_inline: returns + +| return | type | always | condition | +| --------------------------- | ------ | ------ | --------- | +| the macro value as a string | string | yes | | + +### group_macro_format_inline: example + +```lua +local macro_value = { + [1] = "bv2", + [2] = "bv3" +} + +local result = test_macros:group_macro_format_inline(macro_value) +--> result is: "bv2,bv3" +``` + +## build_converted_string_for_cache_and_event_macro method + +The **build_converted_string_for_cache_and_event_macro** method replace a cache or event macro in a string that may contain those macros + +### build_converted_string_for_cache_and_event_macro: parameters + +| parameter | type | optional | default value | +| ---------------------------------- | ------ | -------- | ------------- | +| the macro value | any | no | | +| the macro name | string | no | | +| the string that may contain macros | string | no | | + +### build_converted_string_for_cache_and_event_macro: returns + +| return | type | always | condition | +| ----------------------------------- | ------ | ------ | --------- | +| the string with the macro converted | string | yes | | + +### build_converted_string_for_cache_and_event_macro: example + +```lua +local string_with_macros = "my cache macro {cache.host.name} +local macro_name = "{cache.host.name}" +local macro_value = "Arcadia" + +local result = test_macros:build_converted_string_for_cache_and_event_macro(macro_value, macro_name, string_with_macros) +--> result is: "my cache macro Arcadia" +``` diff --git a/stream-connectors/modules/docs/sc_metrics.md b/stream-connectors/modules/docs/sc_metrics.md new file mode 100644 index 00000000000..cac7e0ad81c --- /dev/null +++ b/stream-connectors/modules/docs/sc_metrics.md @@ -0,0 +1,262 @@ +# Documentation of the sc_flush module + +- [Documentation of the sc\_flush module](#documentation-of-the-sc_flush-module) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [is\_valid\_bbdo\_element method](#is_valid_bbdo_element-method) + - [is\_valid\_bbdo\_element: returns](#is_valid_bbdo_element-returns) + - [is\_valid\_bbdo\_element: example](#is_valid_bbdo_element-example) + - [is\_valid\_metric\_event method](#is_valid_metric_event-method) + - [is\_valid\_metric\_event: returns](#is_valid_metric_event-returns) + - [is\_valid\_metric\_event: example](#is_valid_metric_event-example) + - [is\_valid\_host\_metric\_event method](#is_valid_host_metric_event-method) + - [is\_valid\_host\_metric\_event: returns](#is_valid_host_metric_event-returns) + - [is\_valid\_host\_metric\_event: example](#is_valid_host_metric_event-example) + - [is\_valid\_service\_metric\_event method](#is_valid_service_metric_event-method) + - [is\_valid\_service\_metric\_event: returns](#is_valid_service_metric_event-returns) + - [is\_valid\_service\_metric\_event: example](#is_valid_service_metric_event-example) + - [is\_valid\_kpi\_metric\_event method](#is_valid_kpi_metric_event-method) + - [is\_valid\_kpi\_metric\_event: returns](#is_valid_kpi_metric_event-returns) + - [is\_valid\_kpi\_metric\_event: example](#is_valid_kpi_metric_event-example) + - [is\_valid\_perfdata method](#is_valid_perfdata-method) + - [is\_valid\_perfdata parameters](#is_valid_perfdata-parameters) + - [is\_valid\_perfdata: returns](#is_valid_perfdata-returns) + - [is\_valid\_perfdata: example](#is_valid_perfdata-example) + - [build\_metric method](#build_metric-method) + - [build\_metric parameters](#build_metric-parameters) + - [build\_metric: example](#build_metric-example) + +## Introduction + +The sc_metrics module provides methods to help you handle metrics for your stream connectors. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with 5 parameters + +- an event table +- a params table +- a sc_common instance +- a sc_broker instance +- a sc_logger instance (will create a new one with default parameters if not provided) + +### constructor: Example + +```lua +local event = { + --- event data --- +} + + -- load module +local sc_param = require("centreon-stream-connectors-lib.sc_param") +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_metrics = require("centreon-stream-connectors-lib.sc_metrics") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) + +-- create a new instance of the sc_broker module +local test_broker = sc_broker.new(test_logger) + +-- create a new instance of the sc_event module +local test_metrics = sc_metrics.new(event, test_param.params, test_common, test_broker, test_logger) +``` + +## is_valid_bbdo_element method + +The **is_valid_bbdo_element** method checks if the event is in an accepted category and is an appropriate element. It uses the [**accepted_elements and accepted_categories parameters**](sc_param.md#default_parameters) to validate an event. It also checks if the element is one that provides performance data (current list is: *host, service, host_status, service_status, kpi_event*) + +head over the following chapters for more information + +- [flush_queue](#flush_queue-method) + +### is_valid_bbdo_element: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_bbdo_element: example + +```lua +local result = test_metrics:is_valid_bbdo_element() +--> result is true or false +``` + +## is_valid_metric_event method + +The **is_valid_metric_event** method makes sure that the metric event is valid if it is a **host, service, service_status or kpi_event** event. + +head over the following chapters for more information + +- [is_valid_host_metric_event](#is_valid_host_metric_event-method) +- [is_valid_service_metric_event](#is_valid_service_metric_event-method) +- [is_valid_kpi_metric_event](#is_valid_kpi_metric_event-method) + +### is_valid_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_metric_event: example + +```lua +local result = test_metrics:is_valid_metric_event() +--> result is true or false +``` + +## is_valid_host_metric_event method + +The **is_valid_host_metric_event** method makes sure that the metric event is valid host metric event. + +head over the following chapters for more information + +- [is_valid_host](sc_event.md#is_valid_host-method) +- [is_valid_poller](sc_event.md#is_valid_poller-method) +- [is_valid_host_severity](sc_event.md#is_valid_host_severity-method) +- [is_valid_hostgroup](sc_event.md#is_valid_hostgroup-method) +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_host_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_host_metric_event: example + +```lua +local result = test_metrics:is_valid_host_metric_event() +--> result is true or false +``` + +## is_valid_service_metric_event method + +The **is_valid_service_metric_event** method makes sure that the metric event is valid service metric event. + +head over the following chapters for more information + +- [is_valid_host](sc_event.md#is_valid_host-method) +- [is_valid_poller](sc_event.md#is_valid_poller-method) +- [is_valid_host_severity](sc_event.md#is_valid_host_severity-method) +- [is_valid_hostgroup](sc_event.md#is_valid_hostgroup-method) +- [is_valid_service](sc_event.md#is_valid_service-method) +- [is_valid_service_severity](sc_event.md#is_valid_service_severity-method) +- [is_valid_servicegroup](sc_event.md#is_valid_servicegroup-method) +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_service_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_service_metric_event: example + +```lua +local result = test_metrics:is_valid_service_metric_event() +--> result is true or false +``` + +## is_valid_kpi_metric_event method + +The **is_valid_kpi_metric_event** method makes sure that the metric event is valid kpi metric event. + +head over the following chapters for more information + +- [is_valid_perfdata](#is_valid_perfdata-method) + +### is_valid_kpi_metric_event: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_kpi_metric_event: example + +```lua +local result = test_metrics:is_valid_kpi_metric_event() +--> result is true or false +``` + +## is_valid_perfdata method + +The **is_valid_perfdata** method makes sure that the performance data is valid. Meaning that it is not empty and that it can be parsed. If the performance data is valid, it will store its information in a new table + +### is_valid_perfdata parameters + +| parameter | type | optional | default value | +| --------------------------------------------- | ------ | -------- | ------------- | +| the performance data that needs to be checked | string | no | | + +### is_valid_perfdata: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | --------- | +| true or false | boolean | yes | | + +### is_valid_perfdata: example + +```lua +local perfdata = "pl=45%;40;80;0;100" +local result = test_metrics:is_valid_perfdata(perfdata) +--> result is true or false +--> test_metrics.metrics is now +--[[ + test_metrics.metrics = { + pl = { + value = 45, + uom = "%", + min = 0, + max = 100, + warning_low = 0, + warning_high = 40, + warning_mode = false, + critical_low = 0, + critical_high = 80, + critical_mode = false, + name = "pl" + } + } +]]-- +``` + +## build_metric method + +The **build_metric** method uses the provided stream connector format method to parse every metric in the event. It also filter out metrics based on their name and the parameter **[accepted_metrics](sc_param.md#default-parameters)** + +### build_metric parameters + +| parameter | type | optional | default value | +| -------------------------------------------- | -------- | -------- | ------------- | +| "the format method from the stream connector | function | no | | + +### build_metric: example + +```lua +local function my_format_method(metric_data) + -- your code here +end + +local stored_method = function(metric_data) return my_format_method(metric_data) end +test_metrics:build_metric(stored_method) +``` diff --git a/stream-connectors/modules/docs/sc_param.md b/stream-connectors/modules/docs/sc_param.md new file mode 100644 index 00000000000..f5b3877eb90 --- /dev/null +++ b/stream-connectors/modules/docs/sc_param.md @@ -0,0 +1,410 @@ +# Documentation of the sc_param module + +- [Documentation of the sc\_param module](#documentation-of-the-sc_param-module) + - [Introduction](#introduction) + - [Default parameters](#default-parameters) + - [Module initialization](#module-initialization) + - [module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [param\_override method](#param_override-method) + - [param\_override: parameters](#param_override-parameters) + - [param\_override: example](#param_override-example) + - [check\_params method](#check_params-method) + - [check\_params: example](#check_params-example) + - [get\_kafka\_parameters method](#get_kafka_parameters-method) + - [get\_kafka\_params: parameters](#get_kafka_params-parameters) + - [get\_kafka\_params: example](#get_kafka_params-example) + - [is\_mandatory\_config\_set method](#is_mandatory_config_set-method) + - [is\_mandatory\_config\_set: parameters](#is_mandatory_config_set-parameters) + - [is\_mandatory\_config\_set: returns](#is_mandatory_config_set-returns) + - [is\_mandatory\_config\_set: example](#is_mandatory_config_set-example) + - [load\_event\_format\_file method](#load_event_format_file-method) + - [load\_event\_format\_file: parameters](#load_event_format_file-parameters) + - [load\_event\_format\_file: returns](#load_event_format_file-returns) + - [load\_event\_format\_file: example](#load_event_format_file-example) + - [validate\_pattern\_param method](#validate_pattern_param-method) + - [validate\_pattern\_param: parameters](#validate_pattern_param-parameters) + - [validate\_pattern\_param: returns](#validate_pattern_param-returns) + - [validate\_pattern\_param: example](#validate_pattern_param-example) + - [build\_and\_validate\_filters\_pattern method](#build_and_validate_filters_pattern-method) + - [build\_and\_validate\_filters\_pattern: parameters](#build_and_validate_filters_pattern-parameters) + - [build\_and\_validate\_filters\_pattern: example](#build_and_validate_filters_pattern-example) + +## Introduction + +The sc_param module provides methods to help you handle parameters for your stream connectors. It also provides a list of default parameters that are available for every stream connectors (the complete list is below) and a set of mappings to convert ID to human readable text or the other way around. Head over [**the mappings documentation**](mappings.md) for more information. It has been made in OOP (object oriented programming) + +### Default parameters + +| Parameter name | type | default value | description | default scope | additional information | +| --------------------------------------- | ------ | ----------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| accepted_categories | string | neb,bam | each event is linked to a broker category that we can use to filter events | | it is a comma-separated list, can use "neb", "bam", "storage". Storage is deprecated, use "neb" to get metrics data [more information](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#event-categories) | +| accepted_elements | string | host_status,service_status,ba_status | | each event is linked to a broker element that we can use to filter events | it is a comma-separated list, can use any type in the "neb", "bam" and "storage" tables [described here](https://docs.centreon.com/current/en/developer/developer-broker-bbdo.html#neb) (you must use lower case and replace blank spaces with underscores. "Host status" becomes "host_status") | +| host_status | string | 0,1,2 | comma-separated list of accepted host statuses (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | | | +| service_status | string | 0,1,2,3 | coma separated list of accepted services status (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | | | +| ba_status | string | 0,1,2 | coma separated list of accepted BA status (0 = OK, 1 = WARNING, 2 = CRITICAL) | | | +| hard_only | number | 1 | accept only events that are in a HARD state (use 0 to accept SOFT state too) | host_status(neb), service_status(neb) | | +| acknowledged | number | 0 | accept only events that aren't acknowledged (use 1 to accept acknowledged events too) | host_status(neb), service_status(neb) | | +| in_downtime | number | 0 | accept only events that aren't in downtime (use 1 to accept events that are in downtime too) | host_status(neb), service_status(neb), ba_status(bam) | | +| flapping | number | 0 | accept only events that aren't flapping (use 1 to accept flapping events too) | host_status(neb), service_status(neb) | | +| accepted_hostgroups | string | | coma separated list of hostgroups that are accepted (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| rejected_hostgroups | string | | comma-separated list of hostgroups that are rejected (for example: my_hostgroup_1,my_hostgroup_2) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| accepted_servicegroups | string | | comma-separated list of servicegroups that are accepted (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| rejected_servicegroups | string | | comma-separated list of servicegroups that are rejected (for example: my_servicegroup_1,my_servicegroup_2) | service_status(neb), acknowledgement(neb) | | +| accepted_hosts | string | | accepted hosts (can use Lua patterns) that are accepted (for example: host_A%2) | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | +| accepted_services | string | | accepted services (can use Lua patterns) that are accepted (for example: service_A%d+) | service_status(neb), acknowledgement(neb), downtime(neb) | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | +| accepted_hosts_enable_split | number | 0 | allows you to use a comma-separated list of hosts in the accepted_hosts parameter (for example: host_A%d+,host_B%d+). (0 = disabled, 1 = enable) | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_services_enable_split | number | 0 | allows you to use a comma-separated list of services in the accepted_services parameter (for example: service_A%d+,service_B%d+). (0 = disabled, 1 = enable) | service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_services_split_characters | string | , | the separator that is used when using the accepted_services_enable_split and accepted_services parameters | service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_hosts_split_characters | string | , | the separator that is used when using the accepted_hosts_enable_split and accepted_hosts parameters | host_status(neb), service_status(neb), acknowledgement(neb), downtime(neb) | | +| accepted_bvs | string | | comma-separated list of BVs that are accepted (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| rejected_bvs | string | | comma-separated list of BVs that are rejected (for example: my_bv_1,my_bv_2) | ba_status(bam) | | +| accepted_pollers | string | | comma-separated list of pollers that are accepted (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_pollers | string | | comma-separated list of pollers that are rejected (for example: my_poller_1,my_poller_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| accepted_authors | string | | comma-separated list of authors that are accepted (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| rejected_authors | string | | comma-separated list of authors that are rejected (for example: author_1,author_2) | host_status(neb), service_status(neb),acknowledgement(neb) | | +| accepted_metrics | string | `.*` | filter metrics based on their name. Use lua pattern to filter | metrics stream connectors | [lua pattern documentation](https://www.lua.org/pil/20.2.html) | +| skip_anon_events | number | 1 | filter out events if their name can't be found in the broker cache (use 0 to accept them) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| skip_nil_id | number | 1 | filter out events if their ID is nil (use 0 to accept them. YOU SHOULDN'T DO THAT) | host_status(neb), service_status(neb), ba_status(bam), acknowledgement(neb) | | +| max_buffer_size | number | 1 | this is the number of events the stream connector is going to store before sending them. (bulk send is made using a value above 1). | | | +| max_buffer_age | number | 5 | if no new event has been stored in the buffer in the past 5 seconds, all stored events are going to be sent even if the max_buffer_size hasn't been reached | | | +| max_all_queues_age | number | 300 | if last global flush date was 300 seconds ago, it will force a flush of each queue | | | +| send_mixed_events | number | 1 | when sending data, it will mix all sorts of events in every payload. It means that you can have events about hosts mixed with events about services when set to 1. Performance wise, it is **better** to set it to **1**. **Only** set it to **0** if the tool that you are sending events to **doesn't handle a payload with mixed events**. | | | +| service_severity_threshold | number | nil | the threshold that will be used to filter severity for services. it must be used with service_severity_operator option | service_status(neb), acknowledgement(neb) | | +| service_severity_operator | string | >= | the mathematical operator used to compare the accepted service severity threshold and the service severity (operation order is: threshold >= service severity) | service_status(neb), acknowledgement(neb) | | +| host_severity_threshold | number | nil | the threshold that will be used to filter severity for hosts. it must be used with host_severity_operator option | host_status(neb), service_status(neb) , acknowledgement(neb) | | +| host_severity_operator | string | >= | the mathematical operator used to compare the accepted host severity threshold and the host severity (operation order is: threshold >= host severity) | host_status(neb), service_status(neb), acknowledgement(neb) | | +| ack_host_status | string | | coma separated list of accepted host status for an acknowledgement event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | acknowledgement(neb) | | +| ack_service_status | string | | coma separated list of accepted service status for an acknowledgement event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | acknowledgement(neb) | | +| dt_host_status | string | | coma separated list of accepted host status for a downtime event. It uses the host_status parameter by default (0 = UP, 1 = DOWN, 2 = UNREACHABLE) | downtime(neb) | | +| dt_service_status | string | | coma separated list of accepted service status for a downtime event. It uses the service_status parameter by default (0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN) | downtime(neb) | | +| enable_host_status_dedup | number | 1 | enable the deduplication of host status event when set to 1 | host_status(neb) | | +| enable_service_status_dedup | number | 1 | enable the deduplication of service status event when set to 1 | service_status(neb) | | +| accepted_authors | string | | coma separated list of accepted authors for a comment. It uses the alias (login) of the Centreon contacts | downtime(neb), acknowledgement(neb) | | +| local_time_diff_from_utc | number | default value is the time difference the centreon central server has from UTC | the time difference from UTC in seconds | all | | +| timestamp_conversion_format | string | %Y-%m-%d %X | the date format used to convert timestamps. Default value will print dates like this: 2021-06-11 10:43:38 | all | [date format information](https://www.lua.org/pil/22.1.html) | +| send_data_test | number | 0 | When set to 1, send data in the logfile of the stream connector instead of sending it where the stream connector was designed to | all | | +| format_file | string | | Path to a file that will be used as a template to format events instead of using default format | only usable for events stream connectors (\*-events-apiv2.lua) and not metrics stream connectors (\*-metrics-apiv2.lua) you should put the file in /etc/centreon-broker to keep your broker configuration in a single place. [**See documentation for more information**](templating.md) | | +| custom_code_file | string | | Path to a file that contains your custom lua code | any | [Documentation](custom_code.md) | +| proxy_address | string | | address of the proxy | | | +| proxy_port | number | | port of the proxy | | | +| proxy_username | string | | user for the proxy | | | +| proxy_password | string | | pasword of the proxy user | | | +| proxy_protocol | string | http | protocol to use with the proxy (can be http or https) | | | +| connection_timeout | number | 60 | time to wait in second when opening connection | | | +| allow_insecure_connection | number | 0 | check the certificate validity of the peer host (0 = needs to be a valid certificate), use 1 if you are using self signed certificates | | | +| use_long_output | number | 1 | use the long output when sending an event (set to 0 to send the short output) | service_status(neb), host_status(neb) | | +| remove_line_break_in_output | number | 1 | replace all line breaks (\n) in the output with the character set in the output_line_break_replacement_character parameter | service_status(neb), host_status(neb) | | +| output_line_break_replacement_character | string | " " | replace all replace line break with this parameter value in the output (default value is a blank space) | service_status(neb), host_status(neb) | +| output_size_limit | number | "" | put a character number limit for the output (no limit by default) | service_status(neb), host_status(neb) | | +| metric_name_regex | string | "" | the regex that will be used to transform the metric name to a compatible name for the software that will receive the data | service_status(neb), host_status(neb) | | +| metric_replacement_character | string | "_" | the character that will be used to replace invalid characters in the metric name | service_status(neb), host_status(neb) | | +| logfile | string | **check the stream connector documentation** | the logfile that will be used for the stream connector | any | | +| log_level | number | 1 | the verbosity level for the logs. 1 = error + notice, 2 = error + warning + notice, 3 = error + warning + notice + debug (you should avoid using level 3) | any | | +| log_curl_commands | number | 0 | log ready to use curl commands when enabled (0 = disabled, 1 = enabled) | any | | + +## Module initialization + +Since this is OOP, it is required to initiate your module. + +### module constructor + +Constructor must be initialized with two parameters + +- a sc_common instance +- a sc_logger instance (will create a new one with default parameters if not provided) + +### constructor: Example + +```lua +-- load module +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") + +-- initiate "mandatory" information for the logger module +local logfile = "/var/log/test_param.log" +local severity = 1 + +-- create a new instance of the sc_logger module +local test_logger = sc_logger.new(logfile, severity) + +-- create a new instance of the sc_common module +local test_common = sc_common.new(test_logger) + +-- create a new instance of the sc_param module +local test_param = sc_param.new(test_common, test_logger) +``` + +## param_override method + +The **param_override** method checks if a standard parameter from [**Default parameters**](#default-parameters) has been overriden by the user. If so, it replace the default value with the one provided by the user + +### param_override: parameters + +| parameter | type | optional | default value | +| -------------------------------------- | ----- | -------- | ------------- | +| the list of parameters and their value | table | no | | + +### param_override: example + +```lua +--> test_param.param.accepted_elements is: "host_status,service_status,ba_status" +--> test_param.param.in_downtime is: 0 + +-- change the value of the default parameter called accepted_elements and in_downtime (normally they come from the web configuration, we just simulate this behavior in this example) +local params = { + accepted_elements = "ba_status", + in_downtime = 1 +} + +-- use the param override method to override the default values for in_downtime and accepted_elements +test_param:param_override(params) +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 1 +``` + +## check_params method + +The **check_params** method applies various conformity checks on the default parameters. If the conformity check fails on a parameter, it is reverted to a the [**default value**](#default-parameters) + +### check_params: example + +```lua +--> test_param.param.accepted_elements is: "host_status,service_status,ba_status" +--> test_param.param.in_downtime is: 0 + +-- change the value of the default parameter called accepted_elements and in_downtime (normally they come from the web configuration, we just simulate this behavior in this example) +local params = { + accepted_elements = "ba_status", + in_downtime = 12 -- this must be 0 or 1 +} + +-- use the param override method to override the default values for in_downtime and accepted_elements +test_param:param_override(params) +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 12 + +-- checks default param validity +test_param:check_params() +--> test_param.param.accepted_elements is: "ba_status" +--> test_param.param.in_downtime is: 0 (12 is not a valid value, it goes back to the default one) +``` + +## get_kafka_parameters method + +The **get_kafka_parameters** method find the configuration parameters that are related to a stream connector that sends data to **Kafka**. +To achieve this, parameters must match the following regular expression `^_sc_kafka_`. It will then exclude the `_sc_kafka_` prefix from the parameter name and add the parameter to the kafka_config object. + +A list of Kafka parameters is available [**here**](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md). You must put **_sc_kafka_** as a prefix to use them. +For example the parameter `security.protocol` becomes `_sc_kafka_security.protocol` + +### get_kafka_params: parameters + +| parameter | type | optional | default value | +| ------------ | ------ | -------- | ------------- | +| kafka_config | object | no | | +| params | table | no | | + +### get_kafka_params: example + +```lua +-- create the kafka_config object +local test_kafka_config = kafka_config.create() + +-- set up a parameter list +local params = { + broker = "localhost:9093", + ["_sc_kafka_sasl.username"] = "john", + topic = "pasta", + ["_sc_kafka_sasl.password"] = "doe" +} + +test_param:get_kafka_params(test_kafka_config, params) + +--> test_kafka_config["sasl.username"] is "john" +--> test_kafka_config["sasl.password"] is "doe" +--> test_kafka_config["topic"] is nil +``` + +## is_mandatory_config_set method + +The **is_mandatory_config_set** method checks if all mandatory parameters for a stream connector are set up. If one is missing, it will print an error and return false. Each mandatory parameter that is found is going to be stored in the standard parameters list. + +### is_mandatory_config_set: parameters + +| parameter | type | optional | default value | +| ---------------- | ----- | -------- | ------------- | +| mandatory_params | table | no | | +| params | table | no | | + +### is_mandatory_config_set: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------- | +| true or false | boolean | yes | if a mandatory configuration parameter is missing or not | + +### is_mandatory_config_set: example + +```lua +-- create a list of mandatory parameters +local mandatory_parameters = { + [1] = "username", + [2] = "password" +} + +-- list of parameters configured by the user +local params = { + username = "John", + address = "localhost", +} + +local result = test_param:is_mandatory_config_set(mandatory_params, params) + +--> result is false because the "password" parameter is not in the list of parameters +--[[ + since username index (1) is lower than password index (2), the username property will still be available in the test_param.param table + --> test_param.param.username is "John" +]] + +params.password = "hello" + +result = test_param:is_mandatory_config_set(mandatory_params, params) +--> result is true because password and username are in the params table +--> test_param.param.username is "John" +--> test_param.param.password is "hello" +``` + +## load_event_format_file method + +The **load_event_format_file** load a json file which purpose is to serve as a template to format events. It will use the [**format_file parameter**](#default-parameters) in order to know which file to load. If a file has been successfully loaded, a template table will be created in the self.params table. If the **json_string** parameter is set to true, the template format won't be a table but a json string. + +### load_event_format_file: parameters + +| parameter | type | optional | default value | +| ----------- | ------- | -------- | ------------------ | +| json_string | boolean | yes | nil (act as false) | + +### load_event_format_file: returns + +| return | type | always | condition | +| ------------- | ------- | ------ | -------------------------------------------------------- | +| true or false | boolean | yes | if the template file has been successfully loaded or not | + +### load_event_format_file: example + +```lua +--[[ + /etc/centreon-broker/sc_template.json content is: + + { + "neb_service_status": { + "time_of_event": "{last_check_scdate}", + "index": "centreon", + "payload": { + "host_name": "{cache.host.name}", + "service_name": "{cache.service.description}", + "status": "{state}" + } + } + } +]]-- +test_param.params.format_file = "/etc/centreon-broker/sc_template.json" + +-- using true as a parameter +local result = test_param:load_event_format_file(true) + +--> result is true +--[[ +test_param.params.format_template is now created and looks like + +test_param.params = { + format_template = { + [1] = { + [24] = '{"time_of_event":"{last_check_scdate}","index":"centreon","payload":{"host_name":"{cache.host.name}","service_name":"{cache.service.description}","status":"{state}"}}' + } + } +} +]]-- + +-- using false as a parameter +result = test_param:load_event_format_file(false) + +--> result is true +--[[ +test_param.params.format_template is now created and looks like + +test_param.params = { + format_template = { + [1] = { + [24] = { + time_of_event = "{last_check_scdate}", + index = "centreon", + payload = { + host_name = "{cache.host.name}", + service_name = "{cache.service.description}", + status = "{state}" + } + } + } + } +} +]]-- + +test_param.params.format_file = 3 + +result = test_param:load_event_format_file(true) +--> result is false +``` + +## validate_pattern_param method + +The **validate_pattern_param** method checks if a parameter has a valid Lua pattern as a value. + +### validate_pattern_param: parameters + +| parameter | type | optional | default value | +| -------------------------- | ------ | -------- | ------------- | +| the name of the parameter | string | no | | +| the value of the parameter | string | no | | + +### validate_pattern_param: returns + +| return | type | always | condition | +| ----------------------------------- | ------ | ------ | ----------------------------------------------------------------------------------------- | +| the parameter value or empty string | string | yes | if the Lua pattern is not validated it will return an empty string instead of the pattern | + +### validate_pattern_param: example + +```lua +-- create a list of mandatory parameters +local param_name = "my_param" +local param_value = "pattern.*" + +local result = test_param:validate_pattern_param(param_name, param_value) +--> result is "pattern.*" because it is a valid Lua pattern + +param_value = "pattern%2" + +local result = test_param:validate_pattern_param(param_name, param_value) +--> result is "" because it is not a valid Lua pattern +``` + +## build_and_validate_filters_pattern method + +The **build_and_validate_filters_pattern** method validates Lua patterns and builds a table of patterns to compensate the lack of alternation operator (commonly known as | in POSIX regex) + +### build_and_validate_filters_pattern: parameters + +| parameter | type | optional | default value | +| -------------------------- | ------ | -------- | ------------- | +| a list of parameter names that may have Lua patterns as a value | string | no | | + +### build_and_validate_filters_pattern: example + +```lua +-- create a list of parameters name +local param_list = {"accepted_hosts", "accepted_services"} + +test_param:build_and_validate_filters_pattern(param_name, param_value) +--> it creates a test_param.params.accepted_hosts_pattern_list table and a test_param.params.accepted_services_pattern_list +``` diff --git a/stream-connectors/modules/docs/templating.md b/stream-connectors/modules/docs/templating.md new file mode 100644 index 00000000000..c8d612e696f --- /dev/null +++ b/stream-connectors/modules/docs/templating.md @@ -0,0 +1,121 @@ +# Templating documentation + +- [Templating documentation](#templating-documentation) + - [Introduction](#introduction) + - [Templating](#templating) + - [Structure](#structure) + - [Template and macros](#template-and-macros) + - [Example: adding new entries to already handled event types](#example-adding-new-entries-to-already-handled-event-types) + - [Example: adding a not handled event type](#example-adding-a-not-handled-event-type) + +## Introduction + +Templating with stream connectors is an options that is offered **only for events oriented stream connecotrs**. This means that **you can't use** templating with **metrics oriented stream connectors** + +Templating allows you to format your events at your convenience either because the default format doesn't suit your needs or because the stream connector doesn't handle a type of event that you would like to receive. + +Stream connectors modules are build to handle the following event types + +- acknowledgemnt +- downtime +- host_status +- service_status +- ba_status + +It means that if you create a template that is not related to those types, you will not be able to use the built in features of the stream connectors modules. More event types may be handled in the feature. If some important ones come to your mind, feel free to let us know by opening an issue at [https://github.com/centreon/centreon-stream-connector-scripts/issues](https://github.com/centreon/centreon-stream-connector-scripts/issues) + +## Templating + +### Structure + +A template is a json file with the following structure + +```json +{ + "_": { + "key_1": "value_1", + "key_2": "value_2" + }, + "_": { + "key_1": "value_1", + "key_2": "value_2" + } +} +``` + +### Template and macros + +To make the best use of the template feature, you should take a look at the whole macros system that is implemented in the stream connectors modules. [**Macros documentation**](sc_macros.md#stream-connectors-macro-explanation) + +### Example: adding new entries to already handled event types + +In order to get a better overview of the system, we are going to work on the Splunk-events-apiv2 stream connector. + +This stream connector handles the following event types + +- host_status +- service_status + +lets take a closer look at the format of a host_status event + +```lua +self.sc_event.event.formated_event = { + event_type = "host", + state = self.sc_event.event.state, + state_type = self.sc_event.event.state_type, + hostname = self.sc_event.event.cache.host.name, + output = string.gsub(self.sc_event.event.output, "\n", ""), +} +``` + +In the code, the formated event is made of a string (event_type), the state, state_type, hostname and output. Let say we would like to have the **host_id** and the **address**. The first one needs to be in an index called **"MY_HOST_ID"** and the address stored in an index called **"IP"** + +This will result in the following json templating file + +```json +{ + "neb_host_status": { + "event_type": "host", + "state": "{state}", + "state_type": "{state_type}", + "hostname": "{cache.host.name}", + "outout": "{output_scshort}", + "MY_HOST_ID": "{host_id}", + "IP": "{cache.host.address}" + } +} +``` + +As you can see, there are a lot of **{text}** those are macros that will be replaced by the value found in the event or linked to the event (like in the cache). + +The service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. + +### Example: adding a not handled event type + +This example will use what has been made in [the previous example](#example-adding-new-entries-to-already-handled-event-types) + +As stated before, only **host_status** and **service_status** are handled by the Splunk stream connector. Stream connectors module are able to handle a few others that have been communicated [**in the introduction**](#introduction) + +Let say we would like to handle **ba_status** events. To do so, we need to add this kind of event in the json file + +```json +{ + "neb_host_status": { + "event_type": "host", + "state": "{state}", + "state_type": "{state_type}", + "hostname": "{cache.host.name}", + "outout": "{output_scshort}", + "MY_HOST_ID": "{host_id}", + "IP": "{cache.host.address}" + }, + "bam_ba_status": { + "event_type": "BA", + "ba_name": "{cache.ba.ba_name}", + "ba_id": "{ba_id}", + "state": "{state}" + } +} +``` + +As stated in the previous example, the service_status event type is not in the json file. Therefore, it will use the default format provided by the Splunk stream connector. diff --git a/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0-1.rockspec b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0-1.rockspec new file mode 100644 index 00000000000..27f21877cf7 --- /dev/null +++ b/stream-connectors/modules/specs/0.1.x/centreon-stream-connectors-lib-0.1.0-1.rockspec @@ -0,0 +1,28 @@ +package = "centreon-stream-connectors-lib" +version = "0.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "0.1.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0-1.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0-1.rockspec new file mode 100644 index 00000000000..b3eb7de32d0 --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.0.0-1.rockspec @@ -0,0 +1,28 @@ +package = "centreon-stream-connectors-lib" +version = "1.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.0.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec new file mode 100644 index 00000000000..a6c4ed1296f --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-2.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec new file mode 100644 index 00000000000..34cae90a9e7 --- /dev/null +++ b/stream-connectors/modules/specs/1.0.x/centreon-stream-connectors-lib-1.1.0-3.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-3" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-3" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec new file mode 100644 index 00000000000..c52cac7ab1f --- /dev/null +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-1.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec new file mode 100644 index 00000000000..a6c4ed1296f --- /dev/null +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-2.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec new file mode 100644 index 00000000000..34cae90a9e7 --- /dev/null +++ b/stream-connectors/modules/specs/1.1.x/centreon-stream-connectors-lib-1.1.0-3.rockspec @@ -0,0 +1,33 @@ +package = "centreon-stream-connectors-lib" +version = "1.1.0-3" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.1.0-3" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua" + } +} \ No newline at end of file diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1 copy.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1 copy.rockspec new file mode 100644 index 00000000000..0dd857fed3b --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-1 copy.rockspec @@ -0,0 +1,38 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "LuaSocket >= 3.0rc1-2", + "LuaCrypto >= 0.3.2-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec new file mode 100644 index 00000000000..ae732fb6a62 --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.0-2 copy.rockspec @@ -0,0 +1,38 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2", + "luacrypto >= 0.3.2-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec new file mode 100644 index 00000000000..351a34a6533 --- /dev/null +++ b/stream-connectors/modules/specs/1.2.x/centreon-stream-connectors-lib-1.2.1-1.rockspec @@ -0,0 +1,37 @@ +package = "centreon-stream-connectors-lib" +version = "1.2.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.2.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec new file mode 100644 index 00000000000..47b22dee43f --- /dev/null +++ b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.3.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.3.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec new file mode 100644 index 00000000000..770d06f1825 --- /dev/null +++ b/stream-connectors/modules/specs/1.3.x/centreon-stream-connectors-lib-1.3.0-2.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.3.0-2" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.3.0-2" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec new file mode 100644 index 00000000000..a599961a210 --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec new file mode 100644 index 00000000000..a3a44fb62de --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec new file mode 100644 index 00000000000..7f0eb5d668c --- /dev/null +++ b/stream-connectors/modules/specs/1.4.x/centreon-stream-connectors-lib-1.4.2-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.4.2-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.4.2-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec new file mode 100644 index 00000000000..e07503e4bc8 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec new file mode 100644 index 00000000000..f7850706c83 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec new file mode 100644 index 00000000000..1f96a3de553 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.2-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.2-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.2-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec new file mode 100644 index 00000000000..a3e968e2fe4 --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.3-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.3-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.3-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec new file mode 100644 index 00000000000..9986d5cf5ac --- /dev/null +++ b/stream-connectors/modules/specs/1.5.x/centreon-stream-connectors-lib-1.5.4-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "1.5.4-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "1.5.4-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec b/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec new file mode 100644 index 00000000000..d2087ffb63f --- /dev/null +++ b/stream-connectors/modules/specs/2.0.x/centreon-stream-connectors-lib-2.0.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "2.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "2.0.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec b/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec new file mode 100644 index 00000000000..eda06245b12 --- /dev/null +++ b/stream-connectors/modules/specs/2.1.x/centreon-stream-connectors-lib-2.1.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "2.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "2.1.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec b/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec new file mode 100644 index 00000000000..d5d8f5a71a9 --- /dev/null +++ b/stream-connectors/modules/specs/3.0.x/centreon-stream-connectors-lib-3.0.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.0.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.0.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec b/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec new file mode 100644 index 00000000000..bee0a86b40e --- /dev/null +++ b/stream-connectors/modules/specs/3.1.x/centreon-stream-connectors-lib-3.1.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.1.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.1.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec b/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec new file mode 100644 index 00000000000..9efdb745d84 --- /dev/null +++ b/stream-connectors/modules/specs/3.2.x/centreon-stream-connectors-lib-3.2.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.2.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.2.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec b/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec new file mode 100644 index 00000000000..025e0cf66b3 --- /dev/null +++ b/stream-connectors/modules/specs/3.3.x/centreon-stream-connectors-lib-3.3.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.3.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.3.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec b/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec new file mode 100644 index 00000000000..2e2852941e3 --- /dev/null +++ b/stream-connectors/modules/specs/3.4.x/centreon-stream-connectors-lib-3.4.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.4.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.4.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec new file mode 100644 index 00000000000..f20753b3888 --- /dev/null +++ b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.5.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.5.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec new file mode 100644 index 00000000000..727ce86c73c --- /dev/null +++ b/stream-connectors/modules/specs/3.5.x/centreon-stream-connectors-lib-3.5.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.5.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.5.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec new file mode 100644 index 00000000000..9bd517c9e9a --- /dev/null +++ b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.6.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.6.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec new file mode 100644 index 00000000000..44ca80f85ec --- /dev/null +++ b/stream-connectors/modules/specs/3.6.x/centreon-stream-connectors-lib-3.6.1-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.6.1-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.6.1-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec b/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec new file mode 100644 index 00000000000..81d9db31cb6 --- /dev/null +++ b/stream-connectors/modules/specs/3.7.x copy/centreon-stream-connectors-lib-3.7.0-1.rockspec @@ -0,0 +1,39 @@ +package = "centreon-stream-connectors-lib" +version = "3.7.0-1" +source = { + url = "git+https://github.com/centreon/centreon-stream-connector-scripts", + tag = "3.7.0-1" +} +description = { + summary = "Centreon stream connectors lua modules", + detailed = [[ + Those modules provides helpful methods to create + stream connectors for Centreon + ]], + license = "" +} +dependencies = { + "lua >= 5.1, < 5.4", + "luasocket >= 3.0rc1-2" +} +build = { + type = "builtin", + modules = { + ["centreon-stream-connectors-lib.sc_broker"] = "modules/centreon-stream-connectors-lib/sc_broker.lua", + ["centreon-stream-connectors-lib.sc_common"] = "modules/centreon-stream-connectors-lib/sc_common.lua", + ["centreon-stream-connectors-lib.sc_event"] = "modules/centreon-stream-connectors-lib/sc_event.lua", + ["centreon-stream-connectors-lib.sc_logger"] = "modules/centreon-stream-connectors-lib/sc_logger.lua", + ["centreon-stream-connectors-lib.sc_params"] = "modules/centreon-stream-connectors-lib/sc_params.lua", + ["centreon-stream-connectors-lib.sc_test"] = "modules/centreon-stream-connectors-lib/sc_test.lua", + ["centreon-stream-connectors-lib.sc_macros"] = "modules/centreon-stream-connectors-lib/sc_macros.lua", + ["centreon-stream-connectors-lib.sc_flush"] = "modules/centreon-stream-connectors-lib/sc_flush.lua", + ["centreon-stream-connectors-lib.sc_metrics"] = "modules/centreon-stream-connectors-lib/sc_metrics.lua", + ["centreon-stream-connectors-lib.rdkafka.config"] = "modules/centreon-stream-connectors-lib/rdkafka/config.lua", + ["centreon-stream-connectors-lib.rdkafka.librdkafka"] = "modules/centreon-stream-connectors-lib/rdkafka/librdkafka.lua", + ["centreon-stream-connectors-lib.rdkafka.producer"] = "modules/centreon-stream-connectors-lib/rdkafka/producer.lua", + ["centreon-stream-connectors-lib.rdkafka.topic_config"] = "modules/centreon-stream-connectors-lib/rdkafka/topic_config.lua", + ["centreon-stream-connectors-lib.rdkafka.topic"] = "modules/centreon-stream-connectors-lib/rdkafka/topic.lua", + ["centreon-stream-connectors-lib.google.auth.oauth"] = "modules/centreon-stream-connectors-lib/google/auth/oauth.lua", + ["centreon-stream-connectors-lib.google.bigquery.bigquery"] = "modules/centreon-stream-connectors-lib/google/bigquery/bigquery.lua" + } +} diff --git a/stream-connectors/modules/tests/acknowledgement_stream_connector.lua b/stream-connectors/modules/tests/acknowledgement_stream_connector.lua new file mode 100644 index 00000000000..d965a0fca31 --- /dev/null +++ b/stream-connectors/modules/tests/acknowledgement_stream_connector.lua @@ -0,0 +1,208 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "acknowledgement" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + for i, v in pairs(self.sc_event.event) do + self.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + end + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.host.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + my_author = self.sc_event.event.author, + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + -- for i, v in pairs(queue.sc_event.event) do + -- queue.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/bam_stream_connector.lua b/stream-connectors/modules/tests/bam_stream_connector.lua new file mode 100644 index 00000000000..6908f2f262d --- /dev/null +++ b/stream-connectors/modules/tests/bam_stream_connector.lua @@ -0,0 +1,203 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector-bam.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "bam" + params.accepted_elements = "ba_status" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- starting to handle information from BA + self.sc_event.event.formated_event = { + -- name of BA has been stored in a cache table when calling is_valid_even() + my_ba = self.sc_event.event.cache.ba.ba_name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + -- like the name of the BA, BA description is stored in the cache table of the event + my_description = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.ba.ba_description, "no description found") + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/downtime_stream_connector.lua b/stream-connectors/modules/tests/downtime_stream_connector.lua new file mode 100644 index 00000000000..b81beb0fe30 --- /dev/null +++ b/stream-connectors/modules/tests/downtime_stream_connector.lua @@ -0,0 +1,210 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 3 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "downtime" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- for i, v in pairs(self.sc_event.event) do + -- self.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.host.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.type][self.sc_event.event.state], + my_author = self.sc_event.event.author, + my_start_time = self.sc_event.event.actual_start_time, + my_end_time = self.sc_event.event.actual_end_time, + } + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + -- for i, v in pairs(queue.sc_event.event) do + -- queue.sc_logger:error("index: " .. tostring(i) .. " value: " .. tostring(v)) + -- end + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/kafka_test_connexion.lua b/stream-connectors/modules/tests/kafka_test_connexion.lua new file mode 100644 index 00000000000..2f313b86964 --- /dev/null +++ b/stream-connectors/modules/tests/kafka_test_connexion.lua @@ -0,0 +1,45 @@ +----- START OF PARAMETERS ------- + +-- put your kafka broker address {"host1:port", "host2:port"} +local BROKERS_ADDRESS = { "hhhhhhh:pppp" } +-- change topic depending on your needs +local TOPIC_NAME = "centreon" + +local config = require 'centreon-stream-connectors-lib.rdkafka.config'.new() + +-- set up your configuration. List of parameters there : https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +config["security.protocol"] = "sasl_plaintext" +config["sasl.mechanisms"] = "PLAIN" +config["sasl.username"] = "xxxxx" +config["sasl.password"] = "yyyyyyyy" +config["statistics.interval.ms"] = "1000" + +-- this is the message you want to send to kafka +local message = "This is a test message" + +------ END OF PARAMETERS --------- + + +-- you can uncomment this if you are on el7 +-- config:set_delivery_cb(function (payload, err) print("Delivery Callback '"..payload.."'") end) +-- config:set_stat_cb(function (payload) print("Stat Callback '"..payload.."'") end) + +local producer = require 'centreon-stream-connectors-lib.rdkafka.producer'.new(config) + +for k, v in pairs(BROKERS_ADDRESS) do + producer:brokers_add(v) +end + +local topic_config = require 'centreon-stream-connectors-lib.rdkafka.topic_config'.new() +topic_config["auto.commit.enable"] = "true" + +local topic = require 'centreon-stream-connectors-lib.rdkafka.topic'.new(producer, TOPIC_NAME, topic_config) + +local KAFKA_PARTITION_UA = -1 + +producer:produce(topic, KAFKA_PARTITION_UA, message) + + +while producer:outq_len() ~= 0 do + producer:poll(10) +end \ No newline at end of file diff --git a/stream-connectors/modules/tests/neb_stream_connector.lua b/stream-connectors/modules/tests/neb_stream_connector.lua new file mode 100644 index 00000000000..54ac4311e27 --- /dev/null +++ b/stream-connectors/modules/tests/neb_stream_connector.lua @@ -0,0 +1,213 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_logger = require("centreon-stream-connectors-lib.sc_logger") +local sc_broker = require("centreon-stream-connectors-lib.sc_broker") +local sc_event = require("centreon-stream-connectors-lib.sc_event") +local sc_params = require("centreon-stream-connectors-lib.sc_params") + +local EventQueue = {} + +function EventQueue.new(params) + local self = {} + + -- initiate EventQueue variables + self.events = {} + self.fail = false + + -- set up log configuration + local logfile = params.logfile or "/var/log/centreon-broker/stream-connector.log" + local log_level = params.log_level or 1 + + -- initiate mandatory objects + self.sc_logger = sc_logger.new(logfile, log_level) + self.sc_common = sc_common.new(self.sc_logger) + self.sc_broker = sc_broker.new(self.sc_logger) + self.sc_params = sc_params.new(self.sc_common, self.sc_logger) + + -- initiate parameters dedicated to this stream connector + self.sc_params.params.output_file = params.output_file + + -- overriding default parameters for this stream connector + params.accepted_categories = "neb" + params.accepted_elements = "host_status,service_status" + + -- checking mandatory parameters and setting a fail flag + if not params.output_file then + self.sc_logger:error("output_file is a mandatory parameter.") + self.fail = true + end + + -- apply users params and check syntax of standard ones + self.sc_params:param_override(params) + self.sc_params:check_params() + + -- return EventQueue object + setmetatable(self, { __index = EventQueue }) + return self +end + +-------------------------------------------------------------------------------- +-- EventQueue:format_event, build your own table with the desired information +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:format_event() + -- starting to handle shared information between host and service + self.sc_event.event.formated_event = { + -- name of host has been stored in a cache table when calling is_valid_even() + my_host = self.sc_event.event.cache.host.name, + -- states (critical, ok...) are found and converted to human format thanks to the status_mapping table + my_state = self.sc_params.params.status_mapping[self.sc_event.event.category][self.sc_event.event.element][self.sc_event.event.state], + -- get output of the event + my_output = self.sc_common:ifnil_or_empty(string.match(self.sc_event.event.output, "^(.*)\n"), "no output"), + -- like the name of the host, notes are stored in the cache table of the event + my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.host.notes, "no notes found") + } + + -- handle service specific information + if self.sc_event.event.element == 24 then + -- like the name of the host, service description is stored in the cache table of the event + self.sc_event.event.formated_event.my_description = self.sc_event.event.cache.service.description + -- if the service doesn't have notes, we can retrieve the ones from the host by fetching it from the broker cache + self.sc_event.event.formated_event.my_notes = self.sc_common:ifnil_or_empty(self.sc_event.event.cache.service.notes, self.sc_event.event.formated_event.my_notes) + end + + self:add() + + return true +end + +-------------------------------------------------------------------------------- +-- EventQueue:add, add an event to the sending queue +-------------------------------------------------------------------------------- +function EventQueue:add () + -- store event in self.events list + self.events[#self.events + 1] = self.sc_event.event.formated_event +end + +-------------------------------------------------------------------------------- +-- EventQueue:flush, flush stored events +-- Called when the max number of events or the max age are reached +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:flush () + self.sc_logger:debug("EventQueue:flush: Concatenating all the events as one string") + + -- send stored events + retval = self:send_data() + + -- reset stored events list + self.events = {} + + -- and update the timestamp + self.sc_params.params.__internal_ts_last_flush = os.time() + + return retval +end + +-------------------------------------------------------------------------------- +-- EventQueue:send_data, send data to external tool +-- @return (boolean) +-------------------------------------------------------------------------------- +function EventQueue:send_data () + local data = "" + local counter = 0 + + -- concatenate all stored event in the data variable + for _, formated_event in ipairs(self.events) do + if counter == 0 then + data = broker.json_encode(formated_event) + counter = counter + 1 + else + data = data .. "," .. broker.json_encode(formated_event) + end + end + + self.sc_logger:debug("EventQueue:send_data: creating json: " .. tostring(data)) + + -- output data to the tool we want + if self:call(data) then + return true + end + + return false +end + +-------------------------------------------------------------------------------- +-- EventQueue:call send the data where we want it to be +-- @param data (string) the data we want to send +-- @return true (boolean) +-------------------------------------------------------------------------------- +function EventQueue:call (data) + data = data or nil + + -- open a file + self.sc_logger:debug("EventQueue:call: opening file " .. self.sc_params.params.output_file) + local file = io.open(self.sc_params.params.output_file, "a") + io.output(file) + + -- write in the file + self.sc_logger:debug("EventQueue:call: writing message " .. tostring(data)) + io.write(data .. "\n") + + -- close the file + self.sc_logger:debug("EventQueue:call: closing file " .. self.sc_params.params.output_file) + io.close(file) + + return true +end + +local queue + +function init(params) + queue = EventQueue.new(params) +end + +function write(event) + -- skip event if a mandatory parameter is missing + if queue.fail then + queue.sc_logger:error("Skipping event because a mandatory parameter is not set") + return true + end + + -- initiate event object + queue.sc_event = sc_event.new(event, queue.sc_params.params, queue.sc_common, queue.sc_logger, queue.sc_broker) + + -- drop event if wrong category + if not queue.sc_event:is_valid_category() then + return true + end + + -- drop event if wrong element + if not queue.sc_event:is_valid_element() then + return true + end + + -- First, are there some old events waiting in the flush queue ? + if (#queue.events > 0 and os.time() - queue.sc_params.params.__internal_ts_last_flush > queue.sc_params.params.max_buffer_age) then + queue.sc_logger:debug("write: Queue max age (" .. os.time() - queue.sc_params.params.__internal_ts_last_flush .. "/" .. queue.sc_params.params.max_buffer_age .. ") is reached, flushing data") + queue:flush() + end + + -- Then we check that the event queue is not already full + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached BEFORE APPENDING AN EVENT, trying to flush data before appending more events, after 1 second pause.") + os.execute("sleep " .. tonumber(1)) + queue:flush() + end + + -- drop event if it is not validated + if queue.sc_event:is_valid_event() then + queue:format_event() + else + return true + end + + -- Then we check whether it is time to send the events to the receiver and flush + if (#queue.events >= queue.sc_params.params.max_buffer_size) then + queue.sc_logger:debug("write: Queue max size (" .. #queue.events .. "/" .. queue.sc_params.params.max_buffer_size .. ") is reached, flushing data") + queue:flush() + end + + return true +end diff --git a/stream-connectors/modules/tests/sc_common-test.lua b/stream-connectors/modules/tests/sc_common-test.lua new file mode 100644 index 00000000000..f5ed474a479 --- /dev/null +++ b/stream-connectors/modules/tests/sc_common-test.lua @@ -0,0 +1,115 @@ +#!/usr/bin/lua + +local sc_common = require("centreon-stream-connectors-lib.sc_common") +local sc_test = require("centreon-stream-connectors-lib.sc_test") + +local common = sc_common.new() + +local vempty = '' +local vnil = nil +local string = 'value_1,value_2,value_3' +local vtbool = true +local vfbool = false + +--- +-- test1: ifnil_or_empty +local test1_alt = 'alternate' + +print("-- test1: ifnil_or_empty --") +-- test nil value +print("test nil value: " .. sc_test.compare_result('alternate', common:ifnil_or_empty(vnil, test1_alt))) + +-- test empty value +print("test empty value: " .. sc_test.compare_result('alternate', common:ifnil_or_empty(vempty, test1_alt))) + +-- test a value +print("test a value: " .. sc_test.compare_result(string, common:ifnil_or_empty(string, test1_alt))) + +--- +-- test2: boolean_to_number +print("-- test2: boolean_to_number --") + +-- test a true and false boolean +print("test a true value: " .. sc_test.compare_result(1, common:boolean_to_number(vtbool))) +print("test a false value: " .. sc_test.compare_result(0, common:boolean_to_number(vfbool))) + +-- test invalid type (string) +print("test a string value: " .. sc_test.compare_result(1, common:boolean_to_number(string))) + +-- test invalid type (nil) +print("test a nil value: " .. sc_test.compare_result(0, common:boolean_to_number(vnil))) + +--- +-- test3: check_boolean_number_option_syntax +local test3_default = 0 +local test3_good_1 = 1 +local test3_good_0 = 0 + +print("-- test3: check_boolean_number_option_syntax --") + +-- test a string value +print("test a string value: " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(string, test3_default))) + +-- test boolean numbers (0 and 1) +print("test a boolean number: " .. sc_test.compare_result(1, common:check_boolean_number_option_syntax(test3_good_1, test3_default))) +print("test a boolean number: " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(test3_good_0, test3_default))) + +-- test a boolean (true) +print("test a boolean (true): " .. sc_test.compare_result(0, common:check_boolean_number_option_syntax(vtbool, test3_default))) + +--- +-- test4: split +local test4_no_separator = 'a string without separator' +local test4_custom_separator = 'value_1:value_2:value_3' +print("-- test4: split --") + +-- test a coma separated string +local table = { + [1] = value_1, + [2] = value_2, + [3] = value_3 +} +print("test a coma separated string: " .. sc_test.compare_tables(table, common:split(string))) + +-- test a colon separated string +print("test a colon separated string: " .. sc_test.compare_tables(table, common:split(test4_custom_separator, ':'))) + +-- test a string without separator +table = { + [1] = test4_no_separator +} +print("test a string without separators: " .. sc_test.compare_tables(table, common:split(test4_no_separator))) + +-- test an empty string +print("test an empty string: " .. sc_test.compare_result('', common:split(vempty))) + +-- test a nil value +print("test a nil value: " .. sc_test.compare_result('', common:split(vnil))) + +--- +-- test5: compare_numbers + +print("-- test5: compare_numbers --") +-- test inferior number +print("test a <= b. " .. sc_test.compare_result(true, common:compare_numbers(1, 3, '<='))) + +-- test with fload number +print("test a <= b (b is a float number): " .. sc_test.compare_result(true, common:compare_numbers(1, 3.5, '<='))) + +-- test superior number +print("test a <= b: " .. sc_test.compare_result(false, common:compare_numbers(3, 1, '<='))) + +-- test nil operator +print("test with a nil operator: " .. sc_test.compare_result(nil, common:compare_numbers(3, 1, nil))) + +-- test empty operator +print("test with a empty operator: " .. sc_test.compare_result(nil, common:compare_numbers(3, 1, ''))) + +-- test nil number +print("test with a nil number: " .. sc_test.compare_result(nil, common:compare_numbers(nil, 1, '<='))) + +-- test empty number +print("test with a empty number: " .. sc_test.compare_result(nil, common:compare_numbers('', 1, '<='))) + +-- test with string as number +print("test with a string: " .. sc_test.compare_result(nil, common:compare_numbers(string, 1, '<='))) \ No newline at end of file diff --git a/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml new file mode 100644 index 00000000000..888b00be78b --- /dev/null +++ b/stream-connectors/packaging/connectors-lib/centreon-stream-connectors-lib.yaml @@ -0,0 +1,51 @@ +name: "centreon-stream-connectors-lib" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Those modules provides helpful methods to create stream connectors for Centreon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../../modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/@luaver@/centreon-stream-connectors-lib" + packager: rpm + + - src: "../../modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/5.3/centreon-stream-connectors-lib" + packager: deb + - src: "../../modules/centreon-stream-connectors-lib" + dst: "/usr/share/lua/5.4/centreon-stream-connectors-lib" + packager: deb + +overrides: + rpm: + depends: + - centreon-broker-core >= ${MAJOR_VERSION} + - centreon-broker-core < ${NEXT_MAJOR_VERSION} + - lua-curl >= ${MAJOR_VERSION} + - lua-curl < ${NEXT_MAJOR_VERSION} + - lua-socket >= 3.0 + - lua + deb: + depends: + - "centreon-broker-core (>= ${MAJOR_VERSION}~)" + - "centreon-broker-core (<< ${NEXT_MAJOR_VERSION}~)" + - "lua5.3-curl (>= ${MAJOR_VERSION}~)" + - "lua5.3-curl (<< ${NEXT_MAJOR_VERSION}~)" + - "lua-socket (>= 3.0~)" + - "lua5.3" + +rpm: + summary: Centreon stream connectors lua modules + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml new file mode 100644 index 00000000000..f9e05dfe988 --- /dev/null +++ b/stream-connectors/packaging/connectors/centreon-stream-connectors.yaml @@ -0,0 +1,39 @@ +name: "@PACKAGE_NAME@" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +epoch: 1 +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Those modules provides helpful methods to create stream connectors for Centreon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../../centreon-certified/@CONNECTOR_NAME@" + dst: "/usr/share/centreon-broker/lua" + +overrides: + rpm: + depends: [ + "centreon-stream-connectors-lib >= ${MAJOR_VERSION}", + "centreon-stream-connectors-lib < ${NEXT_MAJOR_VERSION}", + @RPM_DEPENDENCIES@ + ] + deb: + depends: [ + "centreon-stream-connectors-lib (>= ${MAJOR_VERSION}~)", + "centreon-stream-connectors-lib (<< ${NEXT_MAJOR_VERSION}~)", + @DEB_DEPENDENCIES@ + ] +rpm: + summary: Centreon stream connectors lua modules + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/stream-connectors/training_course/answers/build_school.lua b/stream-connectors/training_course/answers/build_school.lua new file mode 100644 index 00000000000..bb27efae642 --- /dev/null +++ b/stream-connectors/training_course/answers/build_school.lua @@ -0,0 +1,141 @@ +#!/usr/bin/lua + +-- load required dependencies +local JSON = require("JSON") +local centreon_classroom = require("centreon_classroom") +local centreon_cafeteria = require("centreon_cafeteria") +local centreon_school = require("centreon_school") + +-- hire our first teacher +local first_teacher = { + first_name = "John", + last_name = "Doe", + speciality = "Maths" +} +-- build our first classroom +local first_classroom = centreon_classroom.new(first_teacher) + +-- put chairs and tables in our classroom +first_classroom:put_tables(13) +first_classroom:put_chairs(26) + +-- hire our second teacher +local second_teacher = { + first_name = "Jane", + last_name = "Doe", + speciality = "History" +} +-- build our second classroom +local second_classroom = centreon_classroom.new(second_teacher) + +-- put chairs and tables in our classroom +second_classroom:put_tables(5) +second_classroom:put_chairs(10) + +-- hire our third teacher +local third_teacher = { + first_name = "Robert", + last_name = "Bridge", + speciality = "Chemistry" +} +-- build our third classroom +local third_classroom = centreon_classroom.new(third_teacher) + +-- put chairs and tables in our classroom +third_classroom:put_tables(16) +third_classroom:put_chairs(32) + +-- hire a cook +local cook = { + nickname = "SpicyBob", + favourite_dish = "water" +} + +-- create a menu +local menu = { + starters = { + ["apple pie"] = { + name = "apple pie", + calories = 35, + ingredients = {"apple", "pie"} + }, + ["oignon soup"] = { + name = "oignon soup", + calories = 64, + ingredients = {"oignon", "soup"} + } + }, + dishes = { + ["fish and chips"] = { + name = "fish and chips", + calories = 666, + ingredients = {"fish", "chips"} + }, + ["mashed potatoes"] = { + name = "mashed potatoes", + calories = 25, + ingredients = {"potatoes", "milk"} + } + }, + desserts = { + ["cheese cake"] = { + name = "cheese cake", + calories = 251, + ingredients = {"cheese", "cake"} + }, + ["ice cream"] = { + name = "ice cream", + calories = 353, + ingredients = {"ice", "cream"} + } + } +} + +-- build our cafeteria +local cafeteria = centreon_cafeteria.new(menu, cook) + +-- add all classrooms in a table +local classrooms = { + first_classroom, + second_classroom, + third_classroom +} + +-- chose a city in which the school will be build +local city = { + country = "USA", + state = "Louisiana", + name = "New Orleans" +} + +-- build our school +local school = centreon_school.new(classrooms, cafeteria, city) + +-- display the capacity of the school +print("school capacity: " .. school:get_capacity()) + +-- get the school latitude and longitude +local school_location = JSON:decode(school:get_school_geocoordinates()) +-- store them in the appropriate table inside our school object +school.city.lat = school_location[1].lat +school.city.lon = school_location[1].lon + +-- open the list of facilities +local sport_facilities_file = io.open("/tmp/sport_facilities.json", "r") +-- read the content of the file and store it +local file_content = sport_facilities_file:read("*a") +-- close the file +io.close(sport_facilities_file) + +-- decode the list of facilities +local sport_facilities = JSON:decode(file_content) +-- try to find the best facility +local code, sport_facility = school:get_nearest_sport_facility(sport_facilities) + +-- print the result with the appropriate message +if code then + print("facility name is: " .. sport_facility.name .. ". Distance from school is: " .. sport_facility.distance .. "m") +else + print("no sport for our children, we should find new partnership with facilities near: " .. school.city.name) +end + diff --git a/stream-connectors/training_course/answers/centreon_cafeteria.lua b/stream-connectors/training_course/answers/centreon_cafeteria.lua new file mode 100644 index 00000000000..119c81ff8c4 --- /dev/null +++ b/stream-connectors/training_course/answers/centreon_cafeteria.lua @@ -0,0 +1,115 @@ +--[[ +----------------- EXERCICE 4 BEGINNING -------------------- +-- do not forget the return at the end of the file +--]] + +-- initiate centreon_cafeteria object +local centreon_cafeteria = {} +local CentreonCafeteria = {} + +-- begin the centreon_cafeteria constructor +function centreon_cafeteria.new(menu, cook) + local self = {} + + -- use the hired cook or hire one if there is none + if cook then + self.cook = cook + else + self.cook = { + nickname = "Ratatouille", + favourite_dish = "Apple pie" + } + end + + -- use provided menu or use a default one is there is none + if menu then + self.menu = menu + else + self.menu = { + starters = { + ["apple pie"] = { + name = "apple pie", + calories = 35, + ingredients = {"apple", "pie"} + }, + ["oignon soup"] = { + name = "oignon soup", + calories = 64, + ingredients = {"oignon", "soup"} + } + }, + dishes = { + ["fish and chips"] = { + name = "fish and chips", + calories = 666, + ingredients = {"fish", "chips"} + }, + ["mashed potatoes"] = { + name = "mashed potatoes", + calories = 25, + ingredients = {"potatoes", "milk"} + } + }, + desserts = { + ["cheese cake"] = { + name = "cheese cake", + calories = 251, + ingredients = {"cheese", "cake"} + }, + ["ice cream"] = { + name = "ice cream", + calories = 353, + ingredients = {"ice", "cream"} + } + } + } + end + + -- end the constructor + setmetatable(self, { __index = CentreonCafeteria }) + return self +end + +--[[ +----------------- EXERCICE 4 ENDING -------------------- +--]] + + +--[[ +----------------- EXERCICE 5 BEGINNING -------------------- +--]] + +function CentreonCafeteria:check_alergy(dish, alergies) + -- find dish + local type = false + + if self.menu.starters[dish] then + type = "starters" + elseif self.menu.dishes[dish] then + type = "dishes" + elseif self.menu.desserts[dish] then + type = "desserts" + end + + if not type then + return false, "dish: " .. tostring(dish) .. " is not on the menu today." + end + + for index, customer_ingredient in pairs(alergies) do + for key, dish_ingredient in pairs(self.menu[type][dish].ingredients) do + if customer_ingredient == dish_ingredient then + return false, "you are alergic to: " .. tostring(customer_ingredient) .. " and there is: " .. tostring(dish_ingredient) .. " in the dish: " .. tostring(dish) + end + end + end + + return true, "Here is your: " .. tostring(dish) +end + + +--[[ +----------------- EXERCICE 5 ENDIG -------------------- +--]] + +-- below is the return thas is part of the exercice 4 +return centreon_cafeteria diff --git a/stream-connectors/training_course/answers/centreon_school.lua b/stream-connectors/training_course/answers/centreon_school.lua new file mode 100644 index 00000000000..6ae13afe49c --- /dev/null +++ b/stream-connectors/training_course/answers/centreon_school.lua @@ -0,0 +1,168 @@ +-- load required dependencies +local curl = require("cURL") +local JSON = require("JSON") + + +-- initiate centreon_school object +local centreon_school = {} +local CentreonSchool = {} + +-- beginning of the constructor +function centreon_school.new(classrooms, cafeteria, city) + local self = {} + + -- create a default city if there's none + if not city or type(city) ~= "table" then + self.city = { + country = "France", + state = "Landes", + name = "Mont de Marsan" + } + else + self.city = city + end + + -- store classrooms and cafeteria inside the school object + self.classrooms = classrooms + self.cafeteria = cafeteria + + -- end of constructor + setmetatable(self, { __index = CentreonSchool }) + return self +end + +-- get capacity method +function CentreonSchool:get_capacity() + -- one chair per people + local chairs_number = 0 + + -- count each chair in each classroom + for index, classroom in ipairs(self.classrooms) do + chairs_number = chairs_number + classroom.chairs + end + + -- return the numbers of chairs that is equal to the maximum capacity of the school + return chairs_number +end + +-- get school geocoordinates method +function CentreonSchool:get_school_geocoordinates() + -- using openstreetmap to get lat and lon of our school + local openstreetmap = "https://nominatim.openstreetmap.org" + -- remote " " from names and replace it with "-" to build the OSM query + local query = "/search?q=" .. string.gsub(self.city.name, " ", "-") + .. "-" .. string.gsub(self.city.state, " ", "-") + .. "-" .. string.gsub(self.city.country, " ", "-") + .. "&format=json&polygon=1&addressdetails=1" + + local url = openstreetmap .. query + + -- create curl object + local http_response_body = "" + local http_request = curl.easy() + -- use the url we just built + :setopt_url(url) + -- store curl body result inside a dedicated variable + :setopt_writefunction( + function (response) + http_response_body = tostring(response) + end + ) + -- add a timeout to the connection + :setopt(curl.OPT_TIMEOUT, 60) + -- make sure we check the certificates + :setopt(curl.OPT_SSL_VERIFYPEER, true) + -- add the user-agent header so we don't get blocked + :setopt( + curl.OPT_HTTPHEADER, + { + "user-agent: user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" + } + ) + + -- run the query + http_request:perform() + -- store http code + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + -- close curl object + http_request:close() + -- return result (we could at least check http return code before doing that) + return http_response_body +end + +-- get nearest sport facility. It requires a table with all the sport facilities +function CentreonSchool:get_nearest_sport_facility(sport_facilities_list) + -- use project OSRM to get routing info from OSM + local routing_osm = "https://router.project-osrm.org" + -- kids do not drive so they are going to walk + local endpoint = "/route/v1/foot/" + local option = "overview=false" + + -- at the moment, we do not have any best facility + local best_facility = { + name = nil, + distance = nil + } + + local result + -- create curl object + local http_request = curl.easy() + -- store curl response body + :setopt_writefunction( + function (response) + http_response_body = tostring(response) + end + ) + -- add a connection timeout + :setopt(curl.OPT_TIMEOUT, 60) + -- make sure we check the certificates + :setopt(curl.OPT_SSL_VERIFYPEER, true) + -- add user-agent header to not be blocked + :setopt( + curl.OPT_HTTPHEADER, + { + "user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36" + } + ) + + -- we are going to get the distance from our school to every sport facility + for index, facility in ipairs(sport_facilities_list.facilities) do + -- build the OSRM query + query = endpoint .. self.city.lon .. "," .. self.city.lat .. ";" .. facility.lon .. "," .. facility.lat + + -- add the url to our curl object + http_request:setopt(curl.OPT_URL, routing_osm .. query .. "?" .. option) + -- run the curl query + http_request:perform() + + -- get the http return code + http_response_code = http_request:getinfo(curl.INFO_RESPONSE_CODE) + -- decode result (should check before if it is a json to avoid getting an error) + result = JSON:decode(http_response_body) + + -- apparently kids don't know how to walk over water so they can't go to some specific facilities + if result.code ~= "Ok" or http_response_code ~= 200 then + print("can't use facility located in: " .. tostring(facility.comment)) + -- if there is walkable route to the facility, this might be the good one + else + -- only store the facitility info in the best_facility table if it is the best one + if best_facility.distance == nil or result.routes[1].distance < best_facility.distance then + best_facility.name = facility.name + best_facility.distance = result.routes[1].distance + end + end + end + + -- do not forget to close the curl object when all the queries are done + http_request:close() + + -- maybe there wasn't any facility that could be reach by the kids + if not best_facility.name then + return false, best_facility + end + + return true, best_facility +end + + +return centreon_school \ No newline at end of file diff --git a/stream-connectors/training_course/centreon_classroom.lua b/stream-connectors/training_course/centreon_classroom.lua new file mode 100644 index 00000000000..04b272cdfe1 --- /dev/null +++ b/stream-connectors/training_course/centreon_classroom.lua @@ -0,0 +1,48 @@ +local centreon_classroom = {} + +local CentreonClassroom = {} + +function centreon_classroom.new(teacher) + local self = {} + + if teacher then + self.teacher = teacher + else + self.teacher = { + first_name = "Minerva", + last_name = "McGonagall", + speciality = "Transfiguration" + } + end + + setmetatable(self, { __index = CentreonClassroom }) + return self +end + +function CentreonClassroom:put_tables(tables) + if not tables then + math.randomseed(os.time()) + self.tables = math.random(1,20) + elseif tables > 20 then + print(tables .. " tables is a bit much, it is a classroom not a stadium") + math.randomseed(os.time()) + self.tables = math.random(1,20) + else + self.tables = tables + end +end + +function CentreonClassroom:put_chairs(chairs) + if not self.tables then + self:put_tables() + end + + if chairs > self.tables * 2 then + print("there are only " .. tostring(self.tables) .. " tables in the classroom," + .. "you can't have more than 2 chairs per table") + end + + self.chairs = chairs +end + +return centreon_classroom diff --git a/stream-connectors/training_course/centreon_classroom.md b/stream-connectors/training_course/centreon_classroom.md new file mode 100644 index 00000000000..6d5a587840a --- /dev/null +++ b/stream-connectors/training_course/centreon_classroom.md @@ -0,0 +1,90 @@ +# centreon_classroom module documentation + +- [centreon_classroom module documentation](#centreon_classroom-module-documentation) + - [Introduction](#introduction) + - [Module initialization](#module-initialization) + - [Module constructor](#module-constructor) + - [constructor: Example](#constructor-example) + - [put_tables method](#put_tables-method) + - [put_tables: parameters](#put_tables-parameters) + - [put_tables: example](#put_tables-example) + - [put_chairs method](#put_chairs-method) + - [put_chairs: parameters](#put_chairs-parameters) + - [put_chairs: example](#put_chairs-example) + +## Introduction + +The centreon_classroom module provides methods to help setting up your classroom. It has been made in OOP (object oriented programming) + +## Module initialization + +Since this is OOP, it is required to initiate your module + +### Module constructor + +Constructor can be initialized with one parameter or it will use a default value. + +- teacher. This is a table with teacher informations + +If you don't provide this parameter it will hire a default teacher + +### constructor: Example + +```lua +-- load classroom module +local centreon_classroom = require("centreon_classroom") + +local teacher = { + first_name = "Horace", + last_name = "Slughorn", + speciality = "Potions" +} + +-- create a new instance of the centreon_classroom module +local classroom = centreon_classroom.new(teacher) +``` + +## put_tables method + +The **put_tables** method put tables in the classroom. You can decide how many tables you want or it will put between 1 or 20 tables in your classroom + +### put_tables: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| tables | number | yes | | + +### put_tables: example + +```lua +local tables = 15 + +classroom:put_tables(tables) +print(classroom.tables) +--> it will print 15 + +classroom:put_tables() +print(classroom.tables) +--> it will print a number between 1 and 20 +``` + +## put_chairs method + +The **put_chairs** method add chairs in your classroom. You can't have more than 2 chairs per table. +If you don't have any tables in your classroom, it will add tables before and put 2 chairs per table. + +### put_chairs: parameters + +| parameter | type | optional | default value | +| ------------------------------ | -------------- | -------- | ------------- | +| chairs | number | no | | + +### put_chairs: example + +```lua +local chairs = 14 + +classroom:put_chairs(14) +print(classroom.chairs) +--> result is 14 +``` diff --git a/stream-connectors/training_course/exercices_answers.md b/stream-connectors/training_course/exercices_answers.md new file mode 100644 index 00000000000..e5fe50ae995 --- /dev/null +++ b/stream-connectors/training_course/exercices_answers.md @@ -0,0 +1,115 @@ +# Exercices Answers + +- [Exercices Answers](#exercices-answers) + - [Exercise 1](#exercise-1) + - [Exercise 2](#exercise-2) + - [Exercise 3](#exercise-3) + - [Exercise 4](#exercise-4) + - [Exercise 5](#exercise-5) + +## Exercise 1 + +you can use the default teacher + +```lua +centreon_classroom = require("centreon_classroom") + +local classroom = centreon_classroom.new() +print(tostring(classroom.teacher.first_name)) +--> will print "Minerva" +``` + +or you can hire your own teacher + +```lua +centreon_classroom = require("centreon_classroom") + +local teacher = { + first_name = "Sybill" + last_name = "Trelawney" + speciality = "Divination" +} + +local classroom = centreon_classroom.new() +print(tostring(classroom.teacher.first_name)) +--> will print "Sybill" +``` + +## Exercise 2 + +you can let someone else decide how many tables and chairs there will be + +```lua +-- if you do not have tables, using put chairs will also put tables in the classroom +classroom:put_chairs() +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) +--> will print "tables: xx, chairs: yy" + +-- or you can first add tables and then add chairs +classroom:put_tables() +classroom:put_chairs() +``` + +or you can decide how many tables and chairs you want + +```lua +classroom:put_tables(10) +classroom:put_chairs(15) +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) +--> will print "tables: 10, chairs: 15" +``` + +## Exercise 3 + +you need to add a "security" layer in the centreon_school module. The table parameter must be a number so we are going to make sure people call the put_tables method with a number and nothing else. + +```lua +function CentreonClassroom:put_tables(tables) + if not tables or type(tables) ~= "number" then + math.randomseed(os.time()) + self.tables = math.random(1,20) + elseif tables > 20 then + print(tables .. " tables is a bit much, it is a classroom not a stadium") + math.randomseed(os.time()) + self.tables = math.random(1,20) + else + self.tables = tables + end +end +``` + +In the above example, we've added a check that says if the type of the "tables" variables is not a number, then we are going to ignore it and add a random number of tables in the classroom. + +## Exercise 4 + +In this exercise, you must create your first lua module and its constructor. There is an example in the [centreon_cafetria.lua module](answers/centreon_cafeteria.lua) file + +you can test your constructor using the following code in a lua script + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +print(tostring(cafeteria.cook.nickname)) +--> must print the nickname of your cook + +print(tostring(cafeteria.menu.starters["duck soup"].name)) +--> must print the name of the dish "duck soup" +``` + +## Exercise 5 + +In this exercise, you must check if a kid has an allergy to an ingredient that is in the dish that he want. There is an example of method that check allergies in the [centreon_cafetria.lua module](answers/centreon_cafeteria.lua) file + +you can check your code using the following lua script + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +local return_code, return_message = cafeteria:check_alergy("duck soup", {"duck", "salt"}) + +if not return_code then + print(return_message) +end +``` diff --git a/stream-connectors/training_course/exercises.md b/stream-connectors/training_course/exercises.md new file mode 100644 index 00000000000..1af610d6fea --- /dev/null +++ b/stream-connectors/training_course/exercises.md @@ -0,0 +1,223 @@ +# centreon_classroom Exercises + +- [centreon_classroom Exercises](#centreon_classroom-exercises) + - [CHAPTER 1: use a module](#chapter-1-use-a-module) + - [Exercise 1](#exercise-1) + - [Exercise 1: What you must do](#exercise-1-what-you-must-do) + - [Exercise 1: How can you check that it works](#exercise-1-how-can-you-check-that-it-works) + - [Exercise 2](#exercise-2) + - [Exercise 2: What you must do](#exercise-2--what-you-must-do) + - [Exercise 2: How can you check that it works](#exercise-2-how-can-you-check-that-it-works) + - [Exercise 3](#exercise-3) + - [Exercise 3: What you must do](#exercise-3-what-you-must-do) + - [Exercise 3: How can you check that it works](#exercise-3-how-can-you-check-that-it-works) + - [CHAPTER 2: create your lua module](#chapter-2-create-your-lua-module) + - [Exercise 4](#exercise-4) + - [Exercise 4: What you must do](#exercise-4-what-you-must-do) + - [Exercise 4: How can you check that it works](#exercise-4-how-can-you-check-that-it-works) + - [Exercise 5](#exercise-5) + - [Exercise 5: What you must do](#exercise-5-what-you-must-do) + - [CHAPTER 6: module interactions](#chapter-6-module-interactions) + - [Exercise 6](#exercise-6) + - [Exercise 6: What you must do](#exercise-6-what-you-must-do) + - [Exercise 7](#exercise-7) + - [Exercise 7: What you must do](#exercise-7-what-you-must-do) + - [Exercie 7: important information](#exercie-7-important-information) + - [Exercise 8](#exercise-8) + - [Exercise 8: What you must do](#exercise-8-what-you-must-do) + - [Exercise 8: how to succeed](#exercise-8-how-to-succeed) + - [Exercise 9](#exercise-9) + - [Exercise 9: What you must do](#exercise-9-what-you-must-do) + - [Exercise 9: How to succeed](#exercise-9-how-to-succeed) + +## CHAPTER 1: use a module + +What you will learn: + +- use a simple lua module using its documentation +- use its methods +- improve lua codes + +### Exercise 1 + +Create a `my_first_lesson.lua` script. + +To get your first lesson, you will need a classroom. Luckily, we got you covered. +In your lua script, you must build a new classroom. To do so, use the centreon_classroom module. +Maybe this module documentation can help you go through that + +#### Exercise 1: What you must do + +- instantiate a new classroom +- check if a teacher is in your classroom + +#### Exercise 1: How can you check that it works + +```lua +print(tostring(classroom.teacher.first_name)) +--> must print the first name of your teacher +``` + +### Exercise 2 + +You have a classroom, maybe you want to sit somewhere. So add at least one table and one chair + +#### Exercise 2: What you must do + +- add tables in your classroom +- add chairs in your classroom + +#### Exercise 2: How can you check that it works + +```lua + print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) + --> must print "tables: xx, chairs: yy" +``` + +### Exercise 3 + +You do not like numbers and for some reason, you don't want **2** tables but **two** tables + +This means that you are going to use the following method + +```lua +classroom:put_tables("two") +``` + +Now that you have tables, you want chairs. + +```lua +classroom:put_chairs() +``` + +This is going to break all the classroom. + +#### Exercise 3: What you must do + +- find a way to handle bad parameters + +#### Exercise 3: How can you check that it works + +```lua +classroom:put_tables("two") +classroom:put_chairs() + +print("tables: " .. tostring(classroom.tables) .. ", chairs: " .. tostring(classroom.chairs)) + +--> must print "tables: xx, chairs: yy" +``` + +## CHAPTER 2: create your lua module + +What you will learn: + +- create your first lua module +- create your first method +- looping through tables + +### Exercise 4 + +There is an old legend saying that people must eat and drink in order to survive. We are going to build a cafeteria + +#### Exercise 4: What you must do + +- create a lua module called centreon_cafeteria +- a cafeteria must have a cook and a menu. + - a menu is made of starters, dishes and desserts + - each starter, dish and dessert has a name, a number of calories and a list of ingredients + - a cook has a nickname and a favourite dish + +#### Exercise 4: How can you check that it works + +```lua +local centreon_cafeteria = require("centreon_cafeteria") +local cafeteria = centreon_cafeteria.new(cook, menu) + +print(tostring(cafeteria.cook.nickname)) +--> must print the nickname of your cook + +print(tostring(cafeteria.menu.starters["duck soup"].name)) +--> must print the name of the dish "duck soup" +``` + +### Exercise 5 + +We should make sure that we don't serve dishes to people that are not alergic to an ingredient. Our cafeteria module will have a method called check_alergy() that has two parameters, the dish that our student wants and the list of ingredients that the studend is alergic to. + +#### Exercise 5: What you must do + +- create a method called check_alergy in your module +- it needs to have the dish and the list of ingredients that the studend can't eat as parameters +- it must return false and a message if there is at least one ingredient that the student can't eat in the dish or if the dish doesn't exist +- it must return true and a message if the dish is safe for the student + +## CHAPTER 6: module interactions + +What you will learn: + +- use external modules such as lua-json +- work with basic API using cURL +- create/update an object +- interactions between objects + +### Exercise 6 + +We know how to build a classroom and a cafeteria. Nothing is stopping us from building our school. +A school needs three or more classroom and one cafeteria and a city to be built in. + +#### Exercise 6: What you must do + +- create a lua module called centreon_school +- a school must have two parameters + - a list of classrooms + - one cafeteria + - a city that is caracterised by the following information (you must use a real city): + - country + - state + - name + +### Exercise 7 + +The city mayor needs to know how many children can attend classes in your school. + +#### Exercise 7: What you must do + +- create a method called get_capacity() that returns the number of children that can sit in your school + +#### Exercie 7: important information + +The maximum number of children is equal to the number of chairs in all classrooms. We are not monsters, they will not learn by standing up all day. + +### Exercise 8 + +We want the children that attend classes in our school to be as healthy as possible. To do so, we need to send them to the closest sport facility. First of all, we need to know the geo coordinates of our school + +#### Exercise 8: What you must do + +- create a method called get_school_geocoordinates() that returns a table with the latitude and the longitude +- you must use the following user agent http header with curl to not be blocked **user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36** + +#### Exercise 8: how to succeed + +- to get your locations data, you can use the following url where: + - Mont de Marsan is the city name + - Landes is the state + - France is the country + +https://nominatim.openstreetmap.org/search?q=mont-de-marsan-landes-france&format=json&polygon=1&addressdetails=1 + +- your script needs to use a json parser, the answers use lua-json. + +### Exercise 9 + +Find the closest sport facility. Our administrative department concluded a partnership with 6 sport facilities across the world. Our children may have to walk a few thousand kilometers in order to play basketball but if they don't attend their sport class, they are going to be expelled from our school! + +#### Exercise 9: What you must do + +- use the sport_facilities.json file to locate every sport facility +- return the name of the closest sport facility and the distance the children will have to walk to get there + +#### Exercise 9: How to succeed + +- depending on where your school is located, there might not be any facility where children can go. You should handle that specific case +- to get distance information between your school and the sport_facilities you can use the following api documentation http://project-osrm.org/docs/v5.24.0/api/# diff --git a/stream-connectors/training_course/sport_facilities.json b/stream-connectors/training_course/sport_facilities.json new file mode 100644 index 00000000000..97009e44149 --- /dev/null +++ b/stream-connectors/training_course/sport_facilities.json @@ -0,0 +1,47 @@ +{ + "facilities": + [ + { + "name": "Marco Polo", + "lat": 35.078807, + "lon": -106.59378, + "comment": "North America" + }, + { + "name": "Christopher Columbus", + "lat": -33.483838, + "lon": -70.631712, + "comment": "South America" + }, + { + "name": "Amerigo Vespucci", + "lat": 35.25125182, + "lon": 25.125182, + "comment": "Europe" + }, + { + "name": "John Cabot", + "lat": -1.285892, + "lon": 36.897156, + "comment": "Africa" + }, + { + "name": "Ferdinand Magellan", + "lat": 22.571086, + "lon": 88.366460, + "comment": "India" + }, + { + "name": "James Cook", + "lat": 62.011106, + "lon": -6.775236, + "comment": "Feroe Island" + }, + { + "name": "Vasco da Gama", + "lat": -18.138824, + "lon": -178.425807, + "comment": "Fidji Island" + } + ] +} \ No newline at end of file