From 4cef2a9b0c7e58f24b977ecc50379c0bcde968d3 Mon Sep 17 00:00:00 2001 From: Marcin Date: Tue, 15 Oct 2024 13:43:38 +0200 Subject: [PATCH 01/25] Add `onlyNamespace` prop and conditional rendering for weighted topics. --- .../pages/CreateCommunity/CreateCommunity.tsx | 10 ++- .../CommunityStakeStep/CommunityStakeStep.tsx | 4 ++ .../EnableStake/EnableStake.tsx | 52 ++++++++++----- .../SignStakeTransactions.tsx | 63 ++++++++++++------- .../useReserveCommunityNamespace.tsx | 4 ++ .../steps/CommunityStakeStep/types.ts | 2 + .../CreateCommunity/useCreateCommunity.ts | 4 +- .../views/pages/CreateCommunity/utils.ts | 3 +- 8 files changed, 97 insertions(+), 45 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/CreateCommunity.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/CreateCommunity.tsx index b1d41335a9a..09a6fddbad5 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/CreateCommunity.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/CreateCommunity.tsx @@ -10,12 +10,15 @@ import SuccessStep from './steps/SuccessStep'; import useCreateCommunity from './useCreateCommunity'; import { CreateCommunityStep, getFormSteps } from './utils'; +import { useFlag } from 'hooks/useFlag'; import CWPageLayout from 'views/components/component_kit/new_designs/CWPageLayout'; import useAppStatus from '../../../hooks/useAppStatus'; import './CreateCommunity.scss'; import CommunityInformationStep from './steps/CommunityInformationStep'; const CreateCommunity = () => { + const weightedTopicsEnabled = useFlag('weightedTopics'); + const { createCommunityStep, selectedCommunity, @@ -73,6 +76,7 @@ const CreateCommunity = () => { selectedAddress={selectedAddress} // @ts-expect-error chainId={selectedChainId} + onlyNamespace={weightedTopicsEnabled} /> ); @@ -86,7 +90,11 @@ const CreateCommunity = () => {
{!isSuccessStep && ( )} diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx index 50fadef8a00..b0f8767959a 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx @@ -14,6 +14,7 @@ interface CommunityStakeStepProps { isTopicFlow?: boolean; onTopicFlowStepChange?: (step: CreateTopicStep) => void; refetchStakeQuery?: () => void; + onlyNamespace?: boolean; } const CommunityStakeStep = ({ @@ -25,6 +26,7 @@ const CommunityStakeStep = ({ isTopicFlow, onTopicFlowStepChange, refetchStakeQuery, + onlyNamespace, }: CommunityStakeStepProps) => { const [enableStakePage, setEnableStakePage] = useState(true); const [communityStakeData, setCommunityStakeData] = useState({ @@ -60,6 +62,7 @@ const CommunityStakeStep = ({ communityStakeData={communityStakeData} chainId={chainId} isTopicFlow={isTopicFlow} + onlyNamespace={onlyNamespace} /> ) : ( )}
diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/EnableStake/EnableStake.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/EnableStake/EnableStake.tsx index 012c1d5eeae..f75e8b42e9a 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/EnableStake/EnableStake.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/EnableStake/EnableStake.tsx @@ -1,5 +1,6 @@ import React, { useState } from 'react'; +import { useFlag } from 'hooks/useFlag'; import { CWDivider } from 'views/components/component_kit/cw_divider'; import { CWText } from 'views/components/component_kit/cw_text'; import { CWButton } from 'views/components/component_kit/new_designs/CWButton'; @@ -19,10 +20,12 @@ const EnableStake = ({ communityStakeData, chainId, isTopicFlow, + onlyNamespace, }: EnableStakeProps) => { const [namespaceError, setNamespaceError] = useState(''); const { namespaceFactory } = useNamespaceFactory(parseInt(chainId)); + const weightedTopicsEnabled = useFlag('weightedTopics'); const clearNamespaceError = () => { setNamespaceError(''); @@ -58,14 +61,27 @@ const EnableStake = ({ return (
- Do you want to enable community stake? + + {onlyNamespace && weightedTopicsEnabled + ? 'Register a Namespace for your community' + : 'Do you want to enable community stake?'} + - Community stake allows your community to fundraise via member - contributions. Community members can make financial contributions in - exchange for more voting power within the community. The more stake a - member has, the stronger their vote becomes. The funds are stored in a - secure community wallet on-chain and can be redistributed if members - decide to burn their stake. + {onlyNamespace && weightedTopicsEnabled ? ( + <> + Registering your Namespace onchain will enable you to utilize + onchain features on Common such as contests and weighted voting + + ) : ( + <> + Community stake allows your community to fundraise via member + contributions. Community members can make financial contributions + in exchange for more voting power within the community. The more + stake a member has, the stronger their vote becomes. The funds are + stored in a secure community wallet on-chain and can be + redistributed if members decide to burn their stake. + + )} @@ -101,16 +117,18 @@ const EnableStake = ({ /> - - Not sure? - - Learn more about community stake - - + {!onlyNamespace && !weightedTopicsEnabled && ( + + Not sure? + + Learn more about community stake + + + )} diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx index 84f7a1db3b6..7245d1889f2 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx @@ -23,6 +23,7 @@ const SignStakeTransactions = ({ isTopicFlow, onSuccess, onCancel, + onlyNamespace, }: SignStakeTransactionsProps) => { const { handleReserveCommunityNamespace, reserveNamespaceData } = useReserveCommunityNamespace({ @@ -31,6 +32,7 @@ const SignStakeTransactions = ({ symbol: communityStakeData.symbol, userAddress: selectedAddress.address, chainId, + onSuccess: onlyNamespace ? onSuccess : undefined, }); const { handleLaunchCommunityStake, launchStakeData } = @@ -59,22 +61,26 @@ const SignStakeTransactions = ({ onClick: handleReserveCommunityNamespace, }, }, - { - label: 'Launch community stake', - state: launchStakeData.state, - errorText: launchStakeData.errorText, - ...(reserveNamespaceData.state === 'completed' - ? { - actionButton: { - label: 'Sign', - disabled: - launchStakeData.state === 'loading' || - launchStakeData.state === 'completed', - onClick: handleLaunchCommunityStake, - }, - } - : {}), - }, + ...(onlyNamespace + ? [] + : [ + { + label: 'Launch community stake', + state: launchStakeData.state, + errorText: launchStakeData.errorText, + ...(reserveNamespaceData.state === 'completed' + ? { + actionButton: { + label: 'Sign', + disabled: + launchStakeData.state === 'loading' || + launchStakeData.state === 'completed', + onClick: handleLaunchCommunityStake, + }, + } + : {}), + }, + ]), ]; }; @@ -83,8 +89,9 @@ const SignStakeTransactions = ({ ? onCancel() : openConfirmation({ title: 'Are you sure you want to cancel?', - description: - 'Community Stake has not been enabled for your community yet', + description: onlyNamespace + ? 'Namespace has not been enabled for your community yet' + : 'Community Stake has not been enabled for your community yet', buttons: [ { label: 'Cancel', @@ -108,12 +115,22 @@ const SignStakeTransactions = ({ return (
- Sign transactions to launch stake? + + {onlyNamespace + ? 'Sign transactions to reserve namespace' + : 'Sign transactions to launch stake?'} + - In order to launch community stake you will need to sign two - transactions. The first launches your community namespace on the - blockchain, and the second launches your community stake. Both - transactions have associated gas fees. + {onlyNamespace ? ( + 'In order to reserve namespace you will need to sign one transaction.' + ) : ( + <> + In order to launch community stake you will need to sign two + transactions. The first launches your community namespace on the + blockchain, and the second launches your community stake. Both + transactions have associated gas fees. + + )} diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx index b36e902dee9..17318ce84c0 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx @@ -20,6 +20,7 @@ interface UseReserveCommunityNamespaceProps { symbol: string; userAddress: string; chainId: string; + onSuccess?: () => void; } const useReserveCommunityNamespace = ({ @@ -28,6 +29,7 @@ const useReserveCommunityNamespace = ({ symbol, userAddress, chainId, + onSuccess, }: UseReserveCommunityNamespaceProps) => { const [reserveNamespaceData, setReserveNamespaceData] = useState(defaultActionState); @@ -84,6 +86,8 @@ const useReserveCommunityNamespace = ({ errorText: '', }); + onSuccess?.(); + trackAnalytics({ event: MixpanelCommunityStakeEvent.RESERVED_COMMUNITY_NAMESPACE, community: chainId, diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts index 2ca7b2a44ff..6ee10d0e1b2 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts @@ -19,6 +19,7 @@ export interface SignStakeTransactionsProps { createdCommunityId: string; chainId: string; isTopicFlow?: boolean; + onlyNamespace?: boolean; } export interface EnableStakeProps { @@ -27,6 +28,7 @@ export interface EnableStakeProps { communityStakeData: StakeData; chainId: string; isTopicFlow?: boolean; + onlyNamespace?: boolean; } export const defaultActionState: ActionState = { diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts index 8ff216ab2f8..f0856743fd7 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts @@ -49,9 +49,7 @@ const useCreateCommunity = () => { ); const showCommunityStakeStep = - !weightedTopicsEnabled && - isValidStepToShowCommunityStakeFormStep && - isSupportedChainSelected; + isValidStepToShowCommunityStakeFormStep && isSupportedChainSelected; return { createCommunityStep, diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/utils.ts b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/utils.ts index 56321c2418e..f42794256b5 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/utils.ts +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/utils.ts @@ -11,6 +11,7 @@ export enum CreateCommunityStep { export const getFormSteps = ( createCommunityStep: CreateCommunityStep, showCommunityStakeStep: boolean, + weightedTopicsEnabled: boolean, ): CWFormStepsProps['steps'] => { return [ { @@ -32,7 +33,7 @@ export const getFormSteps = ( ...((showCommunityStakeStep ? [ { - label: 'Community Stake', + label: weightedTopicsEnabled ? 'Namespace' : 'Community Stake', state: createCommunityStep < CreateCommunityStep.CommunityStake ? 'inactive' From 8efa328373565c3ae1321aa7d2f107c600734f43 Mon Sep 17 00:00:00 2001 From: Marcin Date: Tue, 15 Oct 2024 14:13:06 +0200 Subject: [PATCH 02/25] Add `hasNamespaceReserved`, `namespace`, and `symbol` props; update state logic. --- .../StakeIntegration/StakeIntegration.tsx | 2 ++ .../CommunityStakeStep/CommunityStakeStep.tsx | 14 +++++++++++--- .../SignStakeTransactions.tsx | 2 ++ .../useReserveCommunityNamespace.tsx | 9 +++++++-- .../steps/CommunityStakeStep/types.ts | 1 + 5 files changed, 23 insertions(+), 5 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/StakeIntegration/StakeIntegration.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/StakeIntegration/StakeIntegration.tsx index 0ec5653d3de..4d8caab852e 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/StakeIntegration/StakeIntegration.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/StakeIntegration/StakeIntegration.tsx @@ -127,6 +127,8 @@ const StakeIntegration = ({ onTopicFlowStepChange={onTopicFlowStepChange} createdCommunityName={community?.name} createdCommunityId={community?.id || ''} + namespace={community?.namespace} + symbol={community?.default_symbol} // @ts-expect-error selectedAddress={selectedAddress} chainId={communityChainId} diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx index b0f8767959a..20bcbf2088b 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx @@ -15,6 +15,8 @@ interface CommunityStakeStepProps { onTopicFlowStepChange?: (step: CreateTopicStep) => void; refetchStakeQuery?: () => void; onlyNamespace?: boolean; + namespace?: string | null; + symbol?: string; } const CommunityStakeStep = ({ @@ -27,11 +29,16 @@ const CommunityStakeStep = ({ onTopicFlowStepChange, refetchStakeQuery, onlyNamespace, + namespace, + symbol, }: CommunityStakeStepProps) => { - const [enableStakePage, setEnableStakePage] = useState(true); + const hasNamespaceReserved = !!namespace; + const [enableStakePage, setEnableStakePage] = useState( + hasNamespaceReserved ? false : true, + ); const [communityStakeData, setCommunityStakeData] = useState({ - namespace: createdCommunityName || '', - symbol: (createdCommunityName || '').toUpperCase().slice(0, 4), + namespace: namespace || createdCommunityName || '', + symbol: symbol || (createdCommunityName || '').toUpperCase().slice(0, 4), }); const handleOptInEnablingStake = ({ namespace, symbol }) => { @@ -74,6 +81,7 @@ const CommunityStakeStep = ({ chainId={chainId} isTopicFlow={isTopicFlow} onlyNamespace={onlyNamespace} + hasNamespaceReserved={hasNamespaceReserved} /> )}
diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx index 7245d1889f2..a604b2f914d 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/SignStakeTransactions.tsx @@ -24,6 +24,7 @@ const SignStakeTransactions = ({ onSuccess, onCancel, onlyNamespace, + hasNamespaceReserved, }: SignStakeTransactionsProps) => { const { handleReserveCommunityNamespace, reserveNamespaceData } = useReserveCommunityNamespace({ @@ -33,6 +34,7 @@ const SignStakeTransactions = ({ userAddress: selectedAddress.address, chainId, onSuccess: onlyNamespace ? onSuccess : undefined, + hasNamespaceReserved, }); const { handleLaunchCommunityStake, launchStakeData } = diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx index 17318ce84c0..916b3dc8c15 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/SignStakeTransactions/useReserveCommunityNamespace.tsx @@ -21,6 +21,7 @@ interface UseReserveCommunityNamespaceProps { userAddress: string; chainId: string; onSuccess?: () => void; + hasNamespaceReserved?: boolean; } const useReserveCommunityNamespace = ({ @@ -30,9 +31,13 @@ const useReserveCommunityNamespace = ({ userAddress, chainId, onSuccess, + hasNamespaceReserved, }: UseReserveCommunityNamespaceProps) => { - const [reserveNamespaceData, setReserveNamespaceData] = - useState(defaultActionState); + const [reserveNamespaceData, setReserveNamespaceData] = useState( + hasNamespaceReserved + ? { state: 'completed', errorText: '' } + : defaultActionState, + ); const { namespaceFactory } = useNamespaceFactory(parseInt(chainId)); const { mutateAsync: updateCommunity } = useUpdateCommunityMutation({ diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts index 6ee10d0e1b2..b4697c12abe 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/types.ts @@ -20,6 +20,7 @@ export interface SignStakeTransactionsProps { chainId: string; isTopicFlow?: boolean; onlyNamespace?: boolean; + hasNamespaceReserved?: boolean; } export interface EnableStakeProps { From bd6acd7e6a9b5ba8bc93440d5fe55c30ec38bbdb Mon Sep 17 00:00:00 2001 From: rotorsoft Date: Tue, 15 Oct 2024 16:15:44 -0400 Subject: [PATCH 03/25] replace global activity cache with generic query caching --- libs/adapters/src/trpc/handlers.ts | 39 +++++- libs/core/src/ports/interfaces.ts | 1 + .../model/src/feed/GetGlobalActivity.query.ts | 5 +- libs/model/src/feed/GetUserActivity.query.ts | 2 +- ...ctivityCache.ts => getUserActivityFeed.ts} | 127 +----------------- libs/model/src/index.ts | 1 - packages/commonwealth/main.ts | 7 - .../server/api/external-router.ts | 29 ++-- packages/commonwealth/server/api/feed.ts | 4 +- packages/commonwealth/server/api/threads.ts | 13 +- .../controllers/server_comments_controller.ts | 7 +- .../controllers/server_threads_controller.ts | 7 +- .../server/routes/viewGlobalActivity.ts | 16 --- .../commonwealth/server/routing/router.ts | 15 +-- 14 files changed, 78 insertions(+), 195 deletions(-) rename libs/model/src/{globalActivityCache.ts => getUserActivityFeed.ts} (50%) delete mode 100644 packages/commonwealth/server/routes/viewGlobalActivity.ts diff --git a/libs/adapters/src/trpc/handlers.ts b/libs/adapters/src/trpc/handlers.ts index 3506758b015..91a6c2a7017 100644 --- a/libs/adapters/src/trpc/handlers.ts +++ b/libs/adapters/src/trpc/handlers.ts @@ -1,11 +1,14 @@ import { + CacheNamespaces, Events, INVALID_ACTOR_ERROR, INVALID_INPUT_ERROR, INVALID_STATE_ERROR, + cache, command as coreCommand, query as coreQuery, handleEvent, + logger, type EventSchemas, type EventsHandlerMetadata, type Metadata, @@ -14,6 +17,8 @@ import { TRPCError } from '@trpc/server'; import { ZodSchema, ZodUndefined, z } from 'zod'; import { Commit, Tag, Track, buildproc, procedure } from './middleware'; +const log = logger(import.meta); + const trpcerror = (error: unknown): TRPCError => { if (error instanceof Error) { const { name, message, ...other } = error; @@ -89,6 +94,7 @@ export const command = < * @param factory query factory * @param tag query tag used for OpenAPI spec grouping * @param forceSecure whether to force secure requests for rate-limited external-router + * @param ttlSecs cache response ttl in seconds * @returns tRPC query procedure */ export const query = < @@ -98,7 +104,10 @@ export const query = < >( factory: () => Metadata, tag: Tag, - forceSecure?: boolean, + options?: { + forceSecure?: boolean; + ttlSecs?: number; + }, ) => { const md = factory(); return buildproc({ @@ -106,10 +115,23 @@ export const query = < name: factory.name, md, tag, - forceSecure, + forceSecure: options?.forceSecure, }).query(async ({ ctx, input }) => { try { - return await coreQuery( + const cacheKey = options?.ttlSecs + ? `${factory.name}_${JSON.stringify(input)}` + : undefined; + if (cacheKey) { + const cachedReponse = await cache().getKey( + CacheNamespaces.Query_Response, + cacheKey, + ); + if (cachedReponse) { + log.info(`Returning cached response for ${cacheKey}`); + return JSON.parse(cachedReponse); + } + } + const response = await coreQuery( md, { actor: ctx.actor, @@ -117,6 +139,17 @@ export const query = < }, false, ); + if (cacheKey) { + void cache() + .setKey( + CacheNamespaces.Query_Response, + cacheKey, + JSON.stringify(response), + options?.ttlSecs, + ) + .then(() => log.info(`Cached response for ${cacheKey}`)); + } + return response; } catch (error) { throw trpcerror(error); } diff --git a/libs/core/src/ports/interfaces.ts b/libs/core/src/ports/interfaces.ts index 4a50c29732f..974b1a645d4 100644 --- a/libs/core/src/ports/interfaces.ts +++ b/libs/core/src/ports/interfaces.ts @@ -78,6 +78,7 @@ export enum CacheNamespaces { Activity_Cache = 'activity_cache', Rate_Limiter = 'rate_limiter', Api_key_auth = 'api_key_auth', + Query_Response = 'query_response', } /** diff --git a/libs/model/src/feed/GetGlobalActivity.query.ts b/libs/model/src/feed/GetGlobalActivity.query.ts index f0f8deeec99..b118d877201 100644 --- a/libs/model/src/feed/GetGlobalActivity.query.ts +++ b/libs/model/src/feed/GetGlobalActivity.query.ts @@ -1,13 +1,12 @@ import { Query } from '@hicommonwealth/core'; import * as schemas from '@hicommonwealth/schemas'; -import { GlobalActivityCache } from '../globalActivityCache'; +import { getUserActivityFeed } from '../getUserActivityFeed'; export function GetGlobalActivity(): Query { return { ...schemas.ActivityFeed, auth: [], secure: false, - body: async () => - await GlobalActivityCache.getInstance().getGlobalActivity(), + body: async () => await getUserActivityFeed({}), }; } diff --git a/libs/model/src/feed/GetUserActivity.query.ts b/libs/model/src/feed/GetUserActivity.query.ts index 3cabbca27bb..06b301e46d7 100644 --- a/libs/model/src/feed/GetUserActivity.query.ts +++ b/libs/model/src/feed/GetUserActivity.query.ts @@ -1,6 +1,6 @@ import { Query } from '@hicommonwealth/core'; import * as schemas from '@hicommonwealth/schemas'; -import { getUserActivityFeed } from '../globalActivityCache'; +import { getUserActivityFeed } from '../getUserActivityFeed'; export function GetUserActivity(): Query { return { diff --git a/libs/model/src/globalActivityCache.ts b/libs/model/src/getUserActivityFeed.ts similarity index 50% rename from libs/model/src/globalActivityCache.ts rename to libs/model/src/getUserActivityFeed.ts index 60ed27a0c08..9cf5d74c653 100644 --- a/libs/model/src/globalActivityCache.ts +++ b/libs/model/src/getUserActivityFeed.ts @@ -1,7 +1,5 @@ -import { CacheNamespaces, cache, logger } from '@hicommonwealth/core'; import { ActivityFeed, ActivityThread } from '@hicommonwealth/schemas'; import { QueryTypes } from 'sequelize'; -import { v4 as uuidv4 } from 'uuid'; import { z } from 'zod'; import { models } from './database'; @@ -15,9 +13,7 @@ export async function getUserActivityFeed({ user_id = 0, thread_limit = 50, comment_limit = 3, -}: Omit, 'is_global'> & { - user_id?: number; -}) { +}: z.infer & { user_id?: number }) { const query = ` WITH user_communities AS ( @@ -114,124 +110,3 @@ ORDER BY return threads.map((t) => t.thread); } - -const log = logger(import.meta); - -export class GlobalActivityCache { - private _cacheKey = 'global_activity'; - private _lockName = 'global_activity_cache_locker'; - private static _instance: GlobalActivityCache; - - constructor( - private _cacheTTL: number = 60 * 5, // cache TTL in seconds - ) {} - - static getInstance(cacheTTL?: number): GlobalActivityCache { - if (!GlobalActivityCache._instance) { - GlobalActivityCache._instance = new GlobalActivityCache(cacheTTL); - } - return GlobalActivityCache._instance; - } - - public async start() { - await this.refreshGlobalActivity(); - // eslint-disable-next-line @typescript-eslint/no-misused-promises - setInterval(this.refreshGlobalActivity.bind(this), this._cacheTTL * 1000); - } - - public async getGlobalActivity() { - const activity = await cache().getKey( - CacheNamespaces.Activity_Cache, - this._cacheKey, - ); - if (!activity) { - if (GlobalActivityCache._instance) { - const msg = 'Failed to fetch global activity from Redis'; - log.error(msg); - } - return await getUserActivityFeed({}); - } - return JSON.parse(activity); - } - - public async deleteActivityFromCache(threadId: number): Promise { - const errorMsg = 'Failed to update global activity in Redis'; - - try { - const res = await cache().getKey( - CacheNamespaces.Activity_Cache, - this._cacheKey, - ); - - if (!res) { - log.info('Global Activity Cache is empty'); - return; - } - - let activity = JSON.parse(res); - let updated = false; - activity = activity.filter((a: any) => { - let shouldKeep = true; - if (a.thread_id === threadId) { - updated = true; - shouldKeep = false; - } - return shouldKeep; - }); - - if (!updated) return; - - const result = await cache().setKey( - CacheNamespaces.Activity_Cache, - this._cacheKey, - JSON.stringify(activity), - ); - if (!result) { - log.error(errorMsg); - } - } catch (e: any) { - log.error(errorMsg, e); - } - } - - private async refreshGlobalActivity(): Promise { - try { - const lockAcquired = await this.acquireLock(); - - if (lockAcquired === false) { - log.info('Unable to acquire lock. Skipping refresh...'); - return; - } - - const activity = await getUserActivityFeed({}); - const result = await cache().setKey( - CacheNamespaces.Activity_Cache, - this._cacheKey, - JSON.stringify(activity), - ); - - if (!result) { - const msg = 'Failed to save global activity in Redis'; - log.error(msg); - return; - } - - log.info('Activity cache successfully refreshed'); - } catch (e: any) { - const msg = 'Failed to refresh the global cache'; - log.error(msg, e); - } - } - - private async acquireLock() { - return await cache().setKey( - CacheNamespaces.Activity_Cache, - this._lockName, - uuidv4(), - // shorten by 5 seconds to eliminate any discrepancies - // between setInterval delay and Redis TTL - this._cacheTTL - 5, - true, - ); - } -} diff --git a/libs/model/src/index.ts b/libs/model/src/index.ts index aac4bcb42ee..c465e12e0f5 100644 --- a/libs/model/src/index.ts +++ b/libs/model/src/index.ts @@ -31,6 +31,5 @@ export type { E2E_TestEntities } from './tester'; export * from './chainEventSignatures'; export * from './config'; export * from './database'; -export * from './globalActivityCache'; export * from './models'; export * from './utils'; diff --git a/packages/commonwealth/main.ts b/packages/commonwealth/main.ts index 0aacd89bcfb..5bad18566d8 100644 --- a/packages/commonwealth/main.ts +++ b/packages/commonwealth/main.ts @@ -1,7 +1,6 @@ import { CacheDecorator, setupErrorHandlers } from '@hicommonwealth/adapters'; import { logger } from '@hicommonwealth/core'; import type { DB } from '@hicommonwealth/model'; -import { GlobalActivityCache } from '@hicommonwealth/model'; import sgMail from '@sendgrid/mail'; import compression from 'compression'; import SessionSequelizeStore from 'connect-session-sequelize'; @@ -153,11 +152,6 @@ export async function main( setupMiddleware(); setupPassport(db); - // TODO: decouple as global singleton - const globalActivityCache = GlobalActivityCache.getInstance(); - // initialize async to avoid blocking startup - if (!noGlobalActivityCache) globalActivityCache.start(); - // Declare Validation Middleware Service // middleware to use for all requests const dbValidationService: DatabaseValidationService = @@ -168,7 +162,6 @@ export async function main( app, db, viewCountCache, - globalActivityCache, dbValidationService, cacheDecorator, ); diff --git a/packages/commonwealth/server/api/external-router.ts b/packages/commonwealth/server/api/external-router.ts index 1a10e9033bf..7d07060ccee 100644 --- a/packages/commonwealth/server/api/external-router.ts +++ b/packages/commonwealth/server/api/external-router.ts @@ -36,17 +36,26 @@ const { createComment, updateComment, deleteComment, createCommentReaction } = const { getNewContent } = user.trpcRouter; const api = { - getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User, true), - getUserActivity: trpc.query(Feed.GetUserActivity, trpc.Tag.User, true), + getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User, { + forceSecure: true, + ttlSecs: 60 * 5, + }), + getUserActivity: trpc.query(Feed.GetUserActivity, trpc.Tag.User, { + forceSecure: true, + }), getNewContent, - getCommunities: trpc.query( - Community.GetCommunities, - trpc.Tag.Community, - true, - ), - getCommunity: trpc.query(Community.GetCommunity, trpc.Tag.Community, true), - getMembers: trpc.query(Community.GetMembers, trpc.Tag.Community, true), - getComments: trpc.query(Comment.GetComments, trpc.Tag.Comment, true), + getCommunities: trpc.query(Community.GetCommunities, trpc.Tag.Community, { + forceSecure: true, + }), + getCommunity: trpc.query(Community.GetCommunity, trpc.Tag.Community, { + forceSecure: true, + }), + getMembers: trpc.query(Community.GetMembers, trpc.Tag.Community, { + forceSecure: true, + }), + getComments: trpc.query(Comment.GetComments, trpc.Tag.Comment, { + forceSecure: true, + }), createCommunity, updateCommunity, createTopic, diff --git a/packages/commonwealth/server/api/feed.ts b/packages/commonwealth/server/api/feed.ts index f97c222894e..aa14140eafa 100644 --- a/packages/commonwealth/server/api/feed.ts +++ b/packages/commonwealth/server/api/feed.ts @@ -2,6 +2,8 @@ import { trpc } from '@hicommonwealth/adapters'; import { Feed } from '@hicommonwealth/model'; export const trpcRouter = trpc.router({ - getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User), + getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User, { + ttlSecs: 60 * 5, + }), getUserActivity: trpc.query(Feed.GetUserActivity, trpc.Tag.User), }); diff --git a/packages/commonwealth/server/api/threads.ts b/packages/commonwealth/server/api/threads.ts index db78c142144..42fe481a1f7 100644 --- a/packages/commonwealth/server/api/threads.ts +++ b/packages/commonwealth/server/api/threads.ts @@ -1,5 +1,6 @@ import { trpc } from '@hicommonwealth/adapters'; -import { GlobalActivityCache, Reaction, Thread } from '@hicommonwealth/model'; +import { CacheNamespaces, cache } from '@hicommonwealth/core'; +import { Reaction, Thread } from '@hicommonwealth/model'; import { MixpanelCommunityInteractionEvent } from '../../shared/analytics/types'; import { applyCanvasSignedDataMiddleware } from '../federation'; @@ -36,11 +37,13 @@ export const trpcRouter = trpc.router({ deleteThread: trpc.command( Thread.DeleteThread, trpc.Tag.Thread, - async (_, output) => { - // Using track output middleware to invalidate gac + async () => { + // Using track output middleware to invalidate global activity cache // TODO: Generalize output middleware to cover (analytics, gac invalidation, canvas, etc) - const gac = GlobalActivityCache.getInstance(); - gac && (await gac.deleteActivityFromCache(output.thread_id)); + void cache().deleteKey( + CacheNamespaces.Query_Response, + 'GetGlobalActivity', + ); return undefined; }, applyCanvasSignedDataMiddleware, diff --git a/packages/commonwealth/server/controllers/server_comments_controller.ts b/packages/commonwealth/server/controllers/server_comments_controller.ts index 101d5dca915..dacfcb2a9a9 100644 --- a/packages/commonwealth/server/controllers/server_comments_controller.ts +++ b/packages/commonwealth/server/controllers/server_comments_controller.ts @@ -1,4 +1,4 @@ -import { DB, GlobalActivityCache } from '@hicommonwealth/model'; +import { DB } from '@hicommonwealth/model'; import { SearchCommentsOptions, SearchCommentsResult, @@ -10,10 +10,7 @@ import { * */ export class ServerCommentsController { - constructor( - public models: DB, - public globalActivityCache?: GlobalActivityCache, - ) {} + constructor(public models: DB) {} /** * Returns comment search results. diff --git a/packages/commonwealth/server/controllers/server_threads_controller.ts b/packages/commonwealth/server/controllers/server_threads_controller.ts index d979eb7cadb..1386b768d83 100644 --- a/packages/commonwealth/server/controllers/server_threads_controller.ts +++ b/packages/commonwealth/server/controllers/server_threads_controller.ts @@ -1,4 +1,4 @@ -import { DB, GlobalActivityCache } from '@hicommonwealth/model'; +import { DB } from '@hicommonwealth/model'; import { CountThreadsOptions, CountThreadsResult, @@ -34,10 +34,7 @@ import { * Implements methods related to threads */ export class ServerThreadsController { - constructor( - public models: DB, - public globalActivityCache?: GlobalActivityCache, - ) {} + constructor(public models: DB) {} async getThreadsByIds( this: ServerThreadsController, diff --git a/packages/commonwealth/server/routes/viewGlobalActivity.ts b/packages/commonwealth/server/routes/viewGlobalActivity.ts deleted file mode 100644 index 4f46af1f2ca..00000000000 --- a/packages/commonwealth/server/routes/viewGlobalActivity.ts +++ /dev/null @@ -1,16 +0,0 @@ -import type { DB } from '@hicommonwealth/model'; -import { GlobalActivityCache } from '@hicommonwealth/model'; -import type { TypedRequestBody } from '../types'; -import { success } from '../types'; - -const viewGlobalActivity = async ( - models: DB, - globalActivityCache: GlobalActivityCache, - req: TypedRequestBody>, - res, -) => { - const activity = await globalActivityCache.getGlobalActivity(); - return success(res, activity); -}; - -export default viewGlobalActivity; diff --git a/packages/commonwealth/server/routing/router.ts b/packages/commonwealth/server/routing/router.ts index 4117cdc2b62..4766ea0e53f 100644 --- a/packages/commonwealth/server/routing/router.ts +++ b/packages/commonwealth/server/routing/router.ts @@ -39,7 +39,6 @@ import setDefaultRole from '../routes/setDefaultRole'; import upgradeMember, { upgradeMemberValidation, } from '../routes/upgradeMember'; -import viewGlobalActivity from '../routes/viewGlobalActivity'; import getUploadSignature from '../routes/getUploadSignature'; @@ -51,7 +50,7 @@ import updateCommunityCustomDomain from '../routes/updateCommunityCustomDomain'; import updateCommunityPriority from '../routes/updateCommunityPriority'; import type ViewCountCache from '../util/viewCountCache'; -import { type DB, type GlobalActivityCache } from '@hicommonwealth/model'; +import { type DB } from '@hicommonwealth/model'; import banAddress from '../routes/banAddress'; import setAddressWallet from '../routes/setAddressWallet'; @@ -125,14 +124,13 @@ function setupRouter( app: Express, models: DB, viewCountCache: ViewCountCache, - globalActivityCache: GlobalActivityCache, databaseValidationService: DatabaseValidationService, cacheDecorator: CacheDecorator, ) { // controllers const serverControllers: ServerControllers = { - threads: new ServerThreadsController(models, globalActivityCache), - comments: new ServerCommentsController(models, globalActivityCache), + threads: new ServerThreadsController(models), + comments: new ServerCommentsController(models), analytics: new ServerAnalyticsController(), profiles: new ServerProfilesController(models), communities: new ServerCommunitiesController(models), @@ -491,13 +489,6 @@ function setupRouter( getUploadSignature.bind(this, models), ); - registerRoute( - router, - 'post', - '/viewGlobalActivity', - viewGlobalActivity.bind(this, models, globalActivityCache), - ); - registerRoute( router, 'post', From 242aa7b92f58ff127840f74dbe5b5e6ef0465cfe Mon Sep 17 00:00:00 2001 From: rotorsoft Date: Tue, 15 Oct 2024 16:29:27 -0400 Subject: [PATCH 04/25] fix lint errors --- .../client/scripts/state/api/feeds/fetchUserActivity.ts | 5 +---- packages/commonwealth/main.ts | 2 -- packages/commonwealth/server/api/threads.ts | 4 ++-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/packages/commonwealth/client/scripts/state/api/feeds/fetchUserActivity.ts b/packages/commonwealth/client/scripts/state/api/feeds/fetchUserActivity.ts index d1cfd6cf701..6863416f830 100644 --- a/packages/commonwealth/client/scripts/state/api/feeds/fetchUserActivity.ts +++ b/packages/commonwealth/client/scripts/state/api/feeds/fetchUserActivity.ts @@ -6,10 +6,7 @@ const GLOBAL_ACTIVITY_STALE_TIME = 5 * 60 * 1_000; // 5 minutes (backend caches export const useFetchGlobalActivityQuery = () => { return trpc.feed.getGlobalActivity.useQuery( - { - thread_limit: 50, - comment_limit: 3, - }, + {}, { staleTime: GLOBAL_ACTIVITY_STALE_TIME, cacheTime: USER_ACTIVITY_CACHE_TIME, diff --git a/packages/commonwealth/main.ts b/packages/commonwealth/main.ts index 5bad18566d8..448b6e72358 100644 --- a/packages/commonwealth/main.ts +++ b/packages/commonwealth/main.ts @@ -40,12 +40,10 @@ export async function main( db: DB, { port, - noGlobalActivityCache = true, withLoggingMiddleware = false, withPrerender = false, }: { port: number; - noGlobalActivityCache?: boolean; withLoggingMiddleware?: boolean; withPrerender?: boolean; }, diff --git a/packages/commonwealth/server/api/threads.ts b/packages/commonwealth/server/api/threads.ts index 42fe481a1f7..cee71742a6a 100644 --- a/packages/commonwealth/server/api/threads.ts +++ b/packages/commonwealth/server/api/threads.ts @@ -42,9 +42,9 @@ export const trpcRouter = trpc.router({ // TODO: Generalize output middleware to cover (analytics, gac invalidation, canvas, etc) void cache().deleteKey( CacheNamespaces.Query_Response, - 'GetGlobalActivity', + 'GetGlobalActivity_{}', // this is the global activity cache key ); - return undefined; + return Promise.resolve(undefined); }, applyCanvasSignedDataMiddleware, ), From 86c602d374438440032b533750091aa372f97a01 Mon Sep 17 00:00:00 2001 From: rotorsoft Date: Tue, 15 Oct 2024 16:35:56 -0400 Subject: [PATCH 05/25] fix lint errors --- packages/commonwealth/server.ts | 1 - packages/commonwealth/server/api/external-router.ts | 2 +- packages/commonwealth/server/api/feed.ts | 3 ++- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/commonwealth/server.ts b/packages/commonwealth/server.ts index 48cc9edb8ad..0704f3ed3d7 100644 --- a/packages/commonwealth/server.ts +++ b/packages/commonwealth/server.ts @@ -76,7 +76,6 @@ const start = async () => { main(app, models, { port: config.PORT, - noGlobalActivityCache: config.NO_GLOBAL_ACTIVITY_CACHE, withLoggingMiddleware: true, withPrerender: config.APP_ENV === 'production' && !config.NO_PRERENDER, }) diff --git a/packages/commonwealth/server/api/external-router.ts b/packages/commonwealth/server/api/external-router.ts index 7d07060ccee..742475cc0da 100644 --- a/packages/commonwealth/server/api/external-router.ts +++ b/packages/commonwealth/server/api/external-router.ts @@ -38,7 +38,7 @@ const { getNewContent } = user.trpcRouter; const api = { getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User, { forceSecure: true, - ttlSecs: 60 * 5, + ttlSecs: config.NO_GLOBAL_ACTIVITY_CACHE ? undefined : 60 * 5, }), getUserActivity: trpc.query(Feed.GetUserActivity, trpc.Tag.User, { forceSecure: true, diff --git a/packages/commonwealth/server/api/feed.ts b/packages/commonwealth/server/api/feed.ts index aa14140eafa..b5a9d49a66b 100644 --- a/packages/commonwealth/server/api/feed.ts +++ b/packages/commonwealth/server/api/feed.ts @@ -1,9 +1,10 @@ import { trpc } from '@hicommonwealth/adapters'; import { Feed } from '@hicommonwealth/model'; +import { config } from '../../server/config'; export const trpcRouter = trpc.router({ getGlobalActivity: trpc.query(Feed.GetGlobalActivity, trpc.Tag.User, { - ttlSecs: 60 * 5, + ttlSecs: config.NO_GLOBAL_ACTIVITY_CACHE ? undefined : 60 * 5, }), getUserActivity: trpc.query(Feed.GetUserActivity, trpc.Tag.User), }); From 1ad0b6587a62d8f47bd093c1c80647d5fde264ce Mon Sep 17 00:00:00 2001 From: Marcin Date: Wed, 16 Oct 2024 09:31:58 +0200 Subject: [PATCH 06/25] Add namespace handling to topic flow and update WVMethodSelection logic. --- .../CommunityManagement/Topics/Topics.tsx | 38 ++++++++++++++++++- .../WVMethodSelection/WVMethodSelection.tsx | 17 ++++++--- .../pages/CommunityManagement/Topics/utils.ts | 1 + .../CommunityStakeStep/CommunityStakeStep.tsx | 10 ++++- 4 files changed, 59 insertions(+), 7 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/Topics.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/Topics.tsx index 573b51d34c5..329344a53dd 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/Topics.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/Topics.tsx @@ -6,6 +6,7 @@ import CWFormSteps from 'views/components/component_kit/new_designs/CWFormSteps' import CWPageLayout from 'views/components/component_kit/new_designs/CWPageLayout'; import StakeIntegration from 'views/pages/CommunityManagement/StakeIntegration'; +import CommunityStakeStep from '../../CreateCommunity/steps/CommunityStakeStep'; import TopicDetails from './TopicDetails'; import WVConsent from './WVConsent'; import WVERC20Details from './WVERC20Details'; @@ -14,7 +15,9 @@ import { CreateTopicStep, getCreateTopicSteps } from './utils'; import { notifyError } from 'controllers/app/notifications'; import { useCommonNavigate } from 'navigation/helpers'; +import { useGetCommunityByIdQuery } from 'state/api/communities'; import { useCreateTopicMutation } from 'state/api/topics'; +import useUserStore from 'state/ui/user'; import './Topics.scss'; @@ -52,6 +55,19 @@ export const Topics = () => { const navigate = useCommonNavigate(); const { mutateAsync: createTopic } = useCreateTopicMutation(); + const { data: community } = useGetCommunityByIdQuery({ + id: app.activeChainId() || '', + includeNodeInfo: true, + }); + + const user = useUserStore(); + + const selectedAddress = user.addresses.find( + (x) => + x.address === user.activeAccount?.address && + x.community?.id === community?.id, + ); + const handleSetTopicFormData = (data: Partial) => { setTopicFormData((prevState) => ({ ...prevState, ...data })); }; @@ -113,7 +129,27 @@ export const Topics = () => { /> ); case CreateTopicStep.WVMethodSelection: - return ; + return ( + + ); + case CreateTopicStep.WVNamespaceEnablement: + return ( + + setCreateCommunityStep(CreateTopicStep.WVERC20Details) + } + createdCommunityName={community?.name} + createdCommunityId={community?.id || ''} + onTopicFlowStepChange={setCreateCommunityStep} + selectedAddress={selectedAddress!} + chainId={String(community?.ChainNode?.eth_chain_id)} + onlyNamespace + isTopicFlow + /> + ); case CreateTopicStep.WVERC20Details: return ( void; + hasNamespace: boolean; } enum WVMethod { @@ -22,18 +23,24 @@ enum WVMethod { Stake = 'Stake', } -const WVMethodSelection = ({ onStepChange }: WVMethodSelectionProps) => { +const WVMethodSelection = ({ + onStepChange, + hasNamespace, +}: WVMethodSelectionProps) => { const [selectedWVMethod, setSelectedWVMethod] = useState( null, ); const handleContinue = () => { - if (selectedWVMethod === WVMethod.ERC20) { - onStepChange(CreateTopicStep.WVERC20Details); - return; + if (selectedWVMethod === WVMethod.Stake) { + return onStepChange(CreateTopicStep.WVStake); } - onStepChange(CreateTopicStep.WVStake); + onStepChange( + hasNamespace + ? CreateTopicStep.WVERC20Details + : CreateTopicStep.WVNamespaceEnablement, + ); }; const canEnableStake = chainIdsWithStakeEnabled.includes( diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/utils.ts b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/utils.ts index c07776a9e5e..f9d49f9f54b 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/utils.ts +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/utils.ts @@ -4,6 +4,7 @@ export enum CreateTopicStep { TopicDetails = 'TopicDetails', WVMethodSelection = 'WVMethodSelection', WVConsent = 'WVConsent', + WVNamespaceEnablement = 'WVNamespaceEnablement', WVERC20Details = 'WVERC20Details', WVStake = 'WVStake', } diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx index 20bcbf2088b..fb8f670bf9c 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx @@ -53,7 +53,15 @@ const CommunityStakeStep = ({ }; const onSuccessSignTransactions = () => { - isTopicFlow ? refetchStakeQuery?.() : goToSuccessStep(); + if (isTopicFlow) { + if (onlyNamespace) { + return goToSuccessStep(); + } else { + return refetchStakeQuery?.(); + } + } + + goToSuccessStep(); }; const onCancelSignTransactions = () => { From a9da4f99d5d6dbc2fe24c9f3d3dab6b78d93452c Mon Sep 17 00:00:00 2001 From: Marcin Date: Wed, 16 Oct 2024 10:02:17 +0200 Subject: [PATCH 07/25] 9552 Update FeatureHint based on weightedTopicsEnabled flag. --- .../CommunityInformationStep.tsx | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityInformationStep/CommunityInformationStep.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityInformationStep/CommunityInformationStep.tsx index e2f4090eb8e..c8d2b5b8874 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityInformationStep/CommunityInformationStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityInformationStep/CommunityInformationStep.tsx @@ -1,6 +1,7 @@ import { notifyError } from 'controllers/app/notifications'; import useAppStatus from 'hooks/useAppStatus'; import { useBrowserAnalyticsTrack } from 'hooks/useBrowserAnalyticsTrack'; +import { useFlag } from 'hooks/useFlag'; import React from 'react'; import { BaseMixpanelPayload, @@ -34,6 +35,7 @@ const CommunityInformationStep = ({ handleSelectedChainId, }: CommunityInformationStepProps) => { const { isAddedToHomeScreen } = useAppStatus(); + const weightedTopicsEnabled = useFlag('weightedTopics'); const { trackAnalytics } = useBrowserAnalyticsTrack< MixpanelLoginPayload | BaseMixpanelPayload @@ -109,9 +111,22 @@ const CommunityInformationStep = ({
Date: Wed, 16 Oct 2024 13:48:25 +0200 Subject: [PATCH 08/25] Refactor: Replace chainId with nodeEthChainId. Fix token finder --- .../state/api/tokens/getTokenMetadata.ts | 20 ++++++++++--------- .../components/TokenFinder/useTokenFinder.ts | 6 +++--- .../FundContestDrawer/useFundContestForm.ts | 2 +- .../Contests/ManageContest/ManageContest.tsx | 4 ++-- .../steps/DetailsFormStep/DetailsFormStep.tsx | 3 +-- .../Topics/WVERC20Details/WVERC20Details.tsx | 16 +++++++-------- .../pages/discussions/DiscussionsPage.tsx | 4 +--- 7 files changed, 26 insertions(+), 29 deletions(-) diff --git a/packages/commonwealth/client/scripts/state/api/tokens/getTokenMetadata.ts b/packages/commonwealth/client/scripts/state/api/tokens/getTokenMetadata.ts index e2dd2f44b81..879135472a5 100644 --- a/packages/commonwealth/client/scripts/state/api/tokens/getTokenMetadata.ts +++ b/packages/commonwealth/client/scripts/state/api/tokens/getTokenMetadata.ts @@ -4,7 +4,7 @@ import { fetchCachedNodes } from 'state/api/nodes'; interface UseTokenMetadataQueryProps { tokenId: string; - chainId: number; + nodeEthChainId: number; apiEnabled?: boolean; } @@ -17,15 +17,17 @@ export type GetTokenMetadataResponse = { const getTokenMetadata = async ({ tokenId, - chainId, + nodeEthChainId, }: UseTokenMetadataQueryProps): Promise => { - const ethereumNode = fetchCachedNodes()?.find((n) => n?.id === chainId); + const node = fetchCachedNodes()?.find( + (n) => n?.ethChainId === nodeEthChainId, + ); - if (!ethereumNode) { - throw new Error('Ethereum node not found'); + if (!node) { + throw new Error('Node not found'); } - const response = await axios.post(ethereumNode.url, { + const response = await axios.post(node.url, { params: [tokenId], method: 'alchemy_getTokenMetadata', }); @@ -35,12 +37,12 @@ const getTokenMetadata = async ({ const useTokenMetadataQuery = ({ tokenId, - chainId, + nodeEthChainId, apiEnabled = true, }: UseTokenMetadataQueryProps) => { return useQuery({ - queryKey: [tokenId, chainId], - queryFn: () => getTokenMetadata({ tokenId, chainId }), + queryKey: [tokenId, nodeEthChainId], + queryFn: () => getTokenMetadata({ tokenId, nodeEthChainId }), enabled: !!tokenId && apiEnabled, retry: false, }); diff --git a/packages/commonwealth/client/scripts/views/components/TokenFinder/useTokenFinder.ts b/packages/commonwealth/client/scripts/views/components/TokenFinder/useTokenFinder.ts index 06b93b06d07..42aa9cf029f 100644 --- a/packages/commonwealth/client/scripts/views/components/TokenFinder/useTokenFinder.ts +++ b/packages/commonwealth/client/scripts/views/components/TokenFinder/useTokenFinder.ts @@ -3,17 +3,17 @@ import { useTokenMetadataQuery } from 'state/api/tokens'; import { useDebounce } from 'usehooks-ts'; type UseTokenFinderProps = { - chainId: number; + nodeEthChainId: number; }; -const useTokenFinder = ({ chainId }: UseTokenFinderProps) => { +const useTokenFinder = ({ nodeEthChainId }: UseTokenFinderProps) => { const [tokenValue, setTokenValue] = useState(''); const debouncedTokenValue = useDebounce(tokenValue, 500); const { data: tokenMetadata, isLoading: tokenMetadataLoading } = useTokenMetadataQuery({ tokenId: debouncedTokenValue, - chainId, + nodeEthChainId, }); const getTokenError = () => { diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/FundContestDrawer/useFundContestForm.ts b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/FundContestDrawer/useFundContestForm.ts index d89e355ee10..36086376b58 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/FundContestDrawer/useFundContestForm.ts +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/FundContestDrawer/useFundContestForm.ts @@ -31,7 +31,7 @@ const useFundContestForm = ({ }: UseFundContestFormProps) => { const [tokenAmount, setTokenAmount] = useState(INITIAL_AMOUNT); const { data: tokenMetadata } = useTokenMetadataQuery({ - chainId: chainNodeId, + nodeEthChainId: ethChainId, tokenId: fundingTokenAddress || '', }); const { data: tokenUsdRateData } = useFetchTokenUsdRateQuery({ diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/ManageContest.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/ManageContest.tsx index 98d2a6e8e16..d8bbf8ef885 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/ManageContest.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/ManageContest.tsx @@ -37,10 +37,10 @@ const ManageContest = ({ contestAddress }: ManageContestProps) => { contestAddress, }); - const chainId = app.chain.meta.ChainNode?.id || 0; + const nodeEthChainId = app.chain.meta.ChainNode?.eth_chain_id || 0; const { data: tokenMetadata } = useTokenMetadataQuery({ tokenId: contestFormData?.fundingTokenAddress || '', - chainId, + nodeEthChainId, apiEnabled: !!contestFormData?.fundingTokenAddress, }); diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/steps/DetailsFormStep/DetailsFormStep.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/steps/DetailsFormStep/DetailsFormStep.tsx index df11a0a264e..92604be93a9 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/steps/DetailsFormStep/DetailsFormStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Contests/ManageContest/steps/DetailsFormStep/DetailsFormStep.tsx @@ -97,7 +97,6 @@ const DetailsFormStep = ({ const { mutateAsync: updateContest } = useUpdateContestMutation(); - const chainId = app.chain.meta.ChainNode?.id || 0; const { tokenValue, setTokenValue, @@ -106,7 +105,7 @@ const DetailsFormStep = ({ tokenMetadata, tokenMetadataLoading, } = useTokenFinder({ - chainId: chainId, + nodeEthChainId: app.chain.meta.ChainNode?.eth_chain_id || 0, }); const communityId = app.activeChainId() || ''; diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx index 2c3bc91c284..ab4e65a17a5 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx @@ -1,6 +1,6 @@ import React, { useState } from 'react'; -import { commonProtocol } from '@hicommonwealth/shared'; +import app from 'state'; import { alphabeticallySortedChains } from 'views/components/CommunityInformationForm/constants'; import { CWDivider } from 'views/components/component_kit/cw_divider'; import { CWText } from 'views/components/component_kit/cw_text'; @@ -16,8 +16,6 @@ import TokenFinder, { useTokenFinder } from 'views/components/TokenFinder'; import { HandleCreateTopicProps } from 'views/pages/CommunityManagement/Topics/Topics'; import './WVERC20Details.scss'; -const ETH_CHAIN_NODE_ID = 37; - interface WVConsentProps { onStepChange: (step: CreateTopicStep) => void; onCreateTopic: (props: HandleCreateTopicProps) => Promise; @@ -25,8 +23,9 @@ interface WVConsentProps { const WVERC20Details = ({ onStepChange, onCreateTopic }: WVConsentProps) => { const options = alphabeticallySortedChains.filter((c) => c.hasStakeEnabled); + const defaultChain = options.find( - (o) => o.value === commonProtocol.ValidChains.Base, + (o) => o.value === app.chain.meta.ChainNode?.eth_chain_id, ); const [selectedChain, setSelectedChain] = useState(defaultChain); @@ -39,7 +38,7 @@ const WVERC20Details = ({ onStepChange, onCreateTopic }: WVConsentProps) => { tokenMetadataLoading, tokenValue, } = useTokenFinder({ - chainId: ETH_CHAIN_NODE_ID, + nodeEthChainId: app.chain.meta.ChainNode?.eth_chain_id || 0, }); const editMode = false; @@ -72,13 +71,12 @@ const WVERC20Details = ({ onStepChange, onCreateTopic }: WVConsentProps) => { Connect ERC20 token - Supported chains + Your community chain - The following are the pre-selected chain(s) all token features will be - interacting with. + All token features will be interacting with following chain. { const { data: tokenMetadata } = useTokenMetadataQuery({ tokenId: topicObj?.tokenAddress || '', - chainId: ETH_CHAIN_NODE_ID, + nodeEthChainId: app?.chain.meta?.ChainNode?.eth_chain_id || 0, }); const { fetchNextPage, data, isInitialLoading, hasNextPage } = From 6d2b7806355542d57bbe25dccec82a94f911a4f5 Mon Sep 17 00:00:00 2001 From: Marcin Date: Wed, 16 Oct 2024 14:53:07 +0200 Subject: [PATCH 09/25] Update popover content for ERC20 and Stake options with detailed descriptions. --- .../WVMethodSelection/WVMethodSelection.tsx | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVMethodSelection/WVMethodSelection.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVMethodSelection/WVMethodSelection.tsx index 61e2a1b19a0..ce9723cf6e2 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVMethodSelection/WVMethodSelection.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVMethodSelection/WVMethodSelection.tsx @@ -66,7 +66,16 @@ const WVMethodSelection = ({ onSelect={setSelectedWVMethod} label="Connect ERC20 token" description="Only ERC20s" - popover={{ title: 'Example', body: <>lorem ipsum }} + popover={{ + title: 'ERC20', + body: ( + + Use any ERC 20 token that is on the same chain as your + community. ERC20s can be used for weighted voting and running + contests + + ), + }} isSelected={selectedWVMethod === WVMethod.ERC20} /> @@ -77,7 +86,16 @@ const WVMethodSelection = ({ description="Use non-transferable tokens" popover={ canEnableStake - ? { title: 'Example', body: <>lorem ipsum } + ? { + title: 'Stake', + body: ( + + Community Stake lets you buy a stake in your community + using a fungible non transferable token. This token can + be used for weighted voting and running contests + + ), + } : { title: 'Disabled', body: 'Stake is not supported on your network', From 250b9f2cf6051561dd6ea9e8c18842e44b1f9bde Mon Sep 17 00:00:00 2001 From: Marcin Date: Wed, 16 Oct 2024 15:58:01 +0200 Subject: [PATCH 10/25] Update descriptions to specify 'onchain' features and clarify chain selection. --- .../Topics/WVERC20Details/WVERC20Details.tsx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx index ab4e65a17a5..2115e354156 100644 --- a/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CommunityManagement/Topics/WVERC20Details/WVERC20Details.tsx @@ -73,7 +73,8 @@ const WVERC20Details = ({ onStepChange, onCreateTopic }: WVConsentProps) => { Your community chain - All token features will be interacting with following chain. + All onchain features will be interacting with the following chain. Chain + selection is only available when the community is created { Primary token - Any token features such as voting or tipping require your community to + Any onchain features such as voting or tipping require your community to connect a primary token. Date: Wed, 16 Oct 2024 16:01:29 +0200 Subject: [PATCH 11/25] Remove useFlag import and weightedTopicsEnabled; refactor handleOptInEnablingStake. --- .../steps/CommunityStakeStep/CommunityStakeStep.tsx | 7 +++++-- .../views/pages/CreateCommunity/useCreateCommunity.ts | 3 --- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx index fb8f670bf9c..57df3c13408 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/steps/CommunityStakeStep/CommunityStakeStep.tsx @@ -41,8 +41,11 @@ const CommunityStakeStep = ({ symbol: symbol || (createdCommunityName || '').toUpperCase().slice(0, 4), }); - const handleOptInEnablingStake = ({ namespace, symbol }) => { - setCommunityStakeData({ namespace, symbol }); + const handleOptInEnablingStake = (stakeData: { + namespace: string; + symbol: string; + }) => { + setCommunityStakeData(stakeData); setEnableStakePage(false); }; diff --git a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts index f0856743fd7..1f4120681b4 100644 --- a/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts +++ b/packages/commonwealth/client/scripts/views/pages/CreateCommunity/useCreateCommunity.ts @@ -1,4 +1,3 @@ -import { useFlag } from 'hooks/useFlag'; import AddressInfo from 'models/AddressInfo'; import { useState } from 'react'; import { chainIdsWithStakeEnabled } from 'views/components/CommunityInformationForm/constants'; @@ -13,8 +12,6 @@ const useCreateCommunity = () => { { type: null, chainBase: null }, ); - const weightedTopicsEnabled = useFlag('weightedTopics'); - // @ts-expect-error StrictNullChecks const [selectedAddress, setSelectedAddress] = useState(null); const [selectedChainId, setSelectedChainId] = useState(null); From cccf4d1f015950cd68289379ca2618b5dc6e2008 Mon Sep 17 00:00:00 2001 From: kassad Date: Wed, 16 Oct 2024 08:32:28 -0700 Subject: [PATCH 12/25] Fixed datadog --- Dockerfile.datadog | 2 ++ scripts/datadog-entrypoint.sh | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Dockerfile.datadog b/Dockerfile.datadog index 1563a60318b..60b6aa2d250 100644 --- a/Dockerfile.datadog +++ b/Dockerfile.datadog @@ -25,4 +25,6 @@ RUN apt-get update && apt-get -y install --reinstall datadog-agent # Expose DogStatsD and trace-agent ports EXPOSE 8125/udp 8126/tcp +RUN mkdir -p /var/run/datadog + COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 97f518f497a..25a7c3edfd4 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,6 +1,6 @@ #!/bin/sh -if [ "$APP_ENV" = "production" ]; then +if [ "$APP_ENV" = "production" ] || [ "$APP_ENV" = "frick" ]; then datadog-agent run & /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & @@ -12,4 +12,4 @@ if [ -z "$1" ]; then exit 1 fi -exec "$@" \ No newline at end of file +exec "$@" From 5a5063647b005e779b6b4684997205b180e3210f Mon Sep 17 00:00:00 2001 From: kassad Date: Wed, 16 Oct 2024 11:01:41 -0700 Subject: [PATCH 13/25] Fixed datadog --- scripts/datadog-entrypoint.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 25a7c3edfd4..fc8c6514eb4 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,5 +1,7 @@ #!/bin/sh +DD_HOSTNAME=$(hostname) + if [ "$APP_ENV" = "production" ] || [ "$APP_ENV" = "frick" ]; then datadog-agent run & /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & From 77602f56e3f5ce5febfd175ac1d120ea1bee29f2 Mon Sep 17 00:00:00 2001 From: kassad Date: Wed, 16 Oct 2024 11:04:39 -0700 Subject: [PATCH 14/25] Fixed datadog --- scripts/datadog-entrypoint.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index fc8c6514eb4..37ad3669869 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,8 +1,8 @@ #!/bin/sh -DD_HOSTNAME=$(hostname) +export DD_HOSTNAME=$(hostname) -if [ "$APP_ENV" = "production" ] || [ "$APP_ENV" = "frick" ]; then +if [ "$APP_ENV" = "production" ]; then datadog-agent run & /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & From cca55f2bb7b3dd9f324f5054ae20d7b0048ff357 Mon Sep 17 00:00:00 2001 From: israellund Date: Wed, 16 Oct 2024 18:27:13 -0400 Subject: [PATCH 15/25] changed collorators to names and made clickable --- .../AuthorAndPublishInfo.scss | 9 ++++++ .../AuthorAndPublishInfo.tsx | 29 ++++++++++++------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.scss b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.scss index e05bf8c9bdc..7945635197f 100644 --- a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.scss +++ b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.scss @@ -120,4 +120,13 @@ display: flex; flex-direction: column; gap: 4px; + + .collaborator-user-name { + color: $neutral-600; + } + + .collaborator-user-name:hover { + text-decoration: underline; + text-decoration-color: $neutral-600; + } } diff --git a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx index 612ea2df977..f9151a98747 100644 --- a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx +++ b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx @@ -2,6 +2,7 @@ import { PopperPlacementType } from '@mui/base/Popper'; import { threadStageToLabel } from 'helpers'; import moment from 'moment'; import React, { useRef } from 'react'; +import { Link } from 'react-router-dom'; import { useGetCommunityByIdQuery } from 'state/api/communities'; import { ArchiveTrayWithTooltip } from 'views/components/ArchiveTrayWithTooltip'; import { LockWithTooltip } from 'views/components/LockWithTooltip'; @@ -188,17 +189,23 @@ export const AuthorAndPublishInfo = ({ content={
{/*@ts-expect-error */} - {collaboratorsInfo.map(({ address, community_id, User }) => { - return ( - - ); - })} + {collaboratorsInfo.map( + ({ + User, + }: { + address: string; + community_id: string; + User: { id: number; profile: UserProfile }; + }) => { + return ( + + + {User.profile.name} + + + ); + }, + )}
} {...popoverProps} From 70d8017b75f37d029d76d0d7fda63ea29972a0b8 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 01:59:54 +0300 Subject: [PATCH 16/25] datadog script fixes v1 --- Dockerfile.datadog | 1 + datadog-config/datadog.yaml | 4553 +++++++++++++++++++++++++++++++++ scripts/datadog-entrypoint.sh | 163 +- 3 files changed, 4712 insertions(+), 5 deletions(-) diff --git a/Dockerfile.datadog b/Dockerfile.datadog index 60b6aa2d250..186bd2d097d 100644 --- a/Dockerfile.datadog +++ b/Dockerfile.datadog @@ -27,4 +27,5 @@ EXPOSE 8125/udp 8126/tcp RUN mkdir -p /var/run/datadog +# output dir MUST match directory set to DD_CONF_DIR in datadog-entrypoint.sh COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file diff --git a/datadog-config/datadog.yaml b/datadog-config/datadog.yaml index e69de29bb2d..af5c31ab8c1 100644 --- a/datadog-config/datadog.yaml +++ b/datadog-config/datadog.yaml @@ -0,0 +1,4553 @@ +{{ if .Common }} +######################### +## Basic Configuration ## +######################### + +## @param api_key - string - required +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +## Create a new API key here: https://app.datadoghq.com/organization-settings/api-keys . +## Read more about API keys here: https://docs.datadoghq.com/account_management/api-app-keys/#api-keys . +#api_key: + +## @param app_key - string - optional +## The application key used to access Datadog's programatic API. +## Create a new application key here: https://app.datadoghq.com/organization-settings/application-keys . +## Read more about application keys here: https://docs.datadoghq.com/account_management/api-app-keys/#application-keys . +# +# app_key: + +## @param site - string - optional - default: datadoghq.com +## @env DD_SITE - string - optional - default: datadoghq.com +## The site of the Datadog intake to send Agent data to. +## Set to 'datadoghq.eu' to send data to the EU site. +## Set to 'us3.datadoghq.com' to send data to the US3 site. +## Set to 'us5.datadoghq.com' to send data to the US5 site. +## Set to 'ap1.datadoghq.com' to send data to the AP1 site. +## Set to 'ddog-gov.com' to send data to the US1-FED site. +# +# site: datadoghq.com + +## @param dd_url - string - optional - default: https://app.datadoghq.com +## @env DD_DD_URL - string - optional - default: https://app.datadoghq.com +## @env DD_URL - string - optional - default: https://app.datadoghq.com +## The host of the Datadog intake server to send metrics to, only set this option +## if you need the Agent to send metrics to a custom URL, it overrides the site +## setting defined in "site". It does not affect APM, Logs or Live Process intake which have their +## own "*_dd_url" settings. +## If DD_DD_URL and DD_URL are both set, DD_DD_URL is used in priority. +# +# dd_url: https://app.datadoghq.com + +## @param proxy - custom object - optional +## @env DD_PROXY_HTTP - string - optional +## @env DD_PROXY_HTTPS - string - optional +## @env DD_PROXY_NO_PROXY - space separated list of strings - optional +## If you need a proxy to connect to the Internet, provide it here (default: +## disabled). Refer to https://docs.datadoghq.com/agent/proxy/ to understand how to use these settings. +## For Logs proxy information, refer to https://docs.datadoghq.com/agent/proxy/#proxy-for-logs +# +# proxy: +# https: http://:@: +# http: http://:@: +# no_proxy: +# - +# - + +## @param skip_ssl_validation - boolean - optional - default: false +## @env DD_SKIP_SSL_VALIDATION - boolean - optional - default: false +## Setting this option to "true" tells the Agent to skip validation of SSL/TLS certificates. +# +# skip_ssl_validation: false + +## @param sslkeylogfile - string - optional - default: "" +## @env DD_SSLKEYLOGFILE - string - optional - default: "" +## sslkeylogfile specifies a destination for TLS master secrets +## in NSS key log format to allow external programs +## such as Wireshark to decrypt TLS connections. +## For more details, see https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. +## Use of sslkeylogfile compromises security and should only be +## used for debugging. +# sslkeylogfile: "" + + +## @param min_tls_version - string - optional - default: "tlsv1.2" +## @env DD_MIN_TLS_VERSION - string - optional - default: "tlsv1.2" +## This option defines the minimum TLS version that will be used when +## submitting data to the Datadog intake specified in "site" or "dd_url". +## This parameter defaults to "tlsv1.2". +## Possible values are: tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3; values are case- +## insensitive. +# +# min_tls_version: "tlsv1.2" + +## @param hostname - string - optional - default: auto-detected +## @env DD_HOSTNAME - string - optional - default: auto-detected +## Force the hostname name. +# +# hostname: + +## @param hostname_file - string - optional +## @env DD_HOSTNAME_FILE - string - optional +## In some environments, auto-detection of the hostname is not adequate and +## environment variables cannot be used to set the value. In such cases, the +## file on the host can also be used provide an appropriate value. If +## 'hostname' value has been set to a non-empty value, this option is ignored. +# +# hostname_file: /var/lib/cloud/data/instance-id + +## @param hostname_fqdn - boolean - optional - default: false +## @env DD_HOSTNAME_FQDN - boolean - optional - default: false +## When the Agent relies on the OS to determine the hostname, make it use the +## FQDN instead of the short hostname. Recommended value: true +## More information at https://dtdg.co/flag-hostname-fqdn +# +# hostname_fqdn: false + +## @param hostname_trust_uts_namespace - boolean - optional - default: false +## @env DD_HOSTNAME_TRUST_UTS_NAMESPACE - boolean - optional - default: false +## By default the Agent does not trust the hostname value retrieved from non-root UTS namespace, +## as it's usually a generated name, unrelated to the host (e.g. when running in a container). +## When enabled, the Agent will trust the value retrieved from non-root UTS namespace instead of failing +## hostname resolution. +## (Linux only) +# +# hostname_trust_uts_namespace: false + +## @param host_aliases - list of strings - optional +## @env DD_HOST_ALIASES - space separated list of strings - optional +## List of host aliases to report in addition to any aliases collected +## automatically from cloud providers. +## More information at +## https://docs.datadoghq.com/agent/faq/how-datadog-agent-determines-the-hostname/?tab=agentv6v7#host-aliases +# +# host_aliases: +# - +# - + +## @param tags - list of key:value elements - optional +## @env DD_TAGS - space separated list of strings - optional +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +## +## This configuration value merges with `DD_EXTRA_TAGS`, allowing some +## tags to be set in a configuration file (`tags`), and additional tags to be added +## with an environment variable (`DD_EXTRA_TAGS`). +## +## Learn more about tagging: https://docs.datadoghq.com/tagging/ +# +# tags: +# - team:infra +# - : + +## @param extra_tags - list of key:value elements - optional +## @env DD_EXTRA_TAGS - space separated list of strings - optional +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +## +## This configuration value merges with `tags`, allowing some +## tags to be set in a configuration file (`tags`), and additional tags to be added +## with an environment variable (`DD_EXTRA_TAGS`). +## +## Learn more about tagging: https://docs.datadoghq.com/tagging/ +# +# extra_tags: +# - region:northerly +# - : + +## @param env - string - optional +## @env DD_ENV - string - optional +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +# +# env: + +## @param tag_value_split_separator - map - optional +## @env DD_TAG_VALUE_SPLIT_SEPARATOR - list of key:value strings - optional +## Split tag values according to a given separator. Only applies to host tags, +## and tags coming from container integrations. It does not apply to tags on dogstatsd metrics, +## and tags collected by other integrations. +## +## Example use-case: +## +## With a raw collected tag "foo:1;2;3", using the following configuration: +## +## tag_value_split_separator: +## foo: ; +## +## results in the raw tag being transformed into "foo:1", "foo:2", "foo:3" tags +# +# tag_value_split_separator: +# : + +## @param checks_tag_cardinality - string - optional - default: low +## @env DD_CHECKS_TAG_CARDINALITY - string - optional - default: low +## Configure the level of granularity of tags to send for checks metrics and events. Choices are: +## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) +## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality +## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) +## WARNING: sending container tags for checks metrics may create more metrics +## (one per container instead of one per host). This may impact your custom metrics billing. +# +# checks_tag_cardinality: low + +## @param dogstatsd_tag_cardinality - string - optional - default: low +## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low +## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are: +## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) +## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality +## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) +## +## WARNING: sending container tags for dogstatsd metrics may create more metrics +## (one per container instead of one per host). This may impact your custom metrics billing. +# +# dogstatsd_tag_cardinality: low + +## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"] +## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count +## Configure which aggregated value to compute. +## Possible values are: min, max, median, avg, sum and count. +# +# histogram_aggregates: +# - max +# - median +# - avg +# - count + +## @param histogram_percentiles - list of strings - optional - default: ["0.95"] +## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95 +## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1. +## Warning: percentiles must be specified as yaml strings +# +# histogram_percentiles: +# - "0.95" + +## @param histogram_copy_to_distribution - boolean - optional - default: false +## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false +## Copy histogram values to distributions for true global distributions (in beta) +## Note: This increases the number of custom metrics created. +# +# histogram_copy_to_distribution: false + +## @param histogram_copy_to_distribution_prefix - string - optional +## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION_PREFIX - string - optional +## A prefix to add to distribution metrics created when histogram_copy_to_distributions is true +# +# histogram_copy_to_distribution_prefix: "" + +## @param aggregator_stop_timeout - integer - optional - default: 2 +## @env DD_AGGREGATOR_STOP_TIMEOUT - integer - optional - default: 2 +## When stopping the agent, the Aggregator will try to flush out data ready for +## aggregation (metrics, events, ...). Data are flushed to the Forwarder in order +## to be sent to Datadog, therefore the Agent might take at most +## 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit. +## +## You can set the maximum amount of time, in seconds, allocated to the +## Aggregator to do so. You can disable this feature by setting +## 'aggregator_stop_timeout' to 0. +# +# aggregator_stop_timeout: 2 + +## @param aggregator_buffer_size - integer - optional - default: 100 +## @env DD_AGGREGATOR_BUFFER_SIZE - integer - optional - default: 100 +## The default buffer size for the aggregator use a sane value for most of the +## use cases, however, it could be useful to manually set it in order to trade +## RSS usage with better performances. +# +# aggregator_buffer_size: 100 + +## @param forwarder_timeout - integer - optional - default: 20 +## @env DD_FORWARDER_TIMEOUT - integer - optional - default: 20 +## Forwarder timeout in seconds +# +# forwarder_timeout: 20 + +## @param forwarder_retry_queue_payloads_max_size - integer - optional - default: 15728640 (15MB) +## @env DD_FORWARDER_RETRY_QUEUE_PAYLOADS_MAX_SIZE - integer - optional - default: 15728640 (15MB) +## It defines the maximum size in bytes of all the payloads in the forwarder's retry queue. +## The actual memory used is greater than the payloads size as there are extra fields like HTTP headers, +## but no more than 2.5 times the payload size. +# +# forwarder_retry_queue_payloads_max_size: 15728640 + +## @param forwarder_num_workers - integer - optional - default: 1 +## @env DD_FORWARDER_NUM_WORKERS - integer - optional - default: 1 +## The number of workers used by the forwarder. +# +# forwarder_num_workers: 1 + +## @param forwarder_stop_timeout - integer - optional - default: 2 +## @env DD_FORWARDER_STOP_TIMEOUT - integer - optional - default: 2 +## When stopping the agent, the Forwarder will try to flush all new +## transactions (not the ones in retry state). New transactions will be created +## as the Aggregator flush it's internal data too, therefore the Agent might take +## at most 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit. +## +## You can set the maximum amount of time, in seconds, allocated to the +## Forwarder to send those transactions. You can disable this feature by setting +## 'forwarder_stop_timeout' to 0. +# +# forwarder_stop_timeout: 2 + +## @param forwarder_storage_max_size_in_bytes - integer - optional - default: 0 +## @env DD_FORWARDER_STORAGE_MAX_SIZE_IN_BYTES - integer - optional - default: 0 +## When the retry queue of the forwarder is full, `forwarder_storage_max_size_in_bytes` +## defines the amount of disk space the Agent can use to store transactions on the disk. +## When `forwarder_storage_max_size_in_bytes` is `0`, the transactions are never stored on the disk. +# +# forwarder_storage_max_size_in_bytes: 50000000 + +## @param forwarder_storage_max_disk_ratio - float - optional - default: 0.8 +## @env DD_FORWARDER_STORAGE_MAX_DISK_RATIO - float - optional - default: 0.8 +## `forwarder_storage_max_disk_ratio` defines the disk capacity limit for storing transactions. +## `0.8` means the Agent can store transactions on disk until `forwarder_storage_max_size_in_bytes` +## is reached or when the disk mount for `forwarder_storage_path` exceeds 80% of the disk capacity, +## whichever is lower. +# +# forwarder_storage_max_disk_ratio: 0.8 + +## @param forwarder_outdated_file_in_days - integer - optional - default: 10 +## @env DD_FORWARDER_OUTDATED_FILE_IN_DAYS - integer - optional - default: 10 +## This value specifies how many days the overflow transactions will remain valid before +## being discarded. During the Agent restart, if a retry file contains transactions that were +## created more than `forwarder_outdated_file_in_days` days ago, they are removed. +# +# forwarder_outdated_file_in_days: 10 + +## @param forwarder_high_prio_buffer_size - int - optional - default: 100 +## Defines the size of the high prio buffer. +## Increasing the buffer size can help if payload drops occur due to high prio buffer being full. +# +# forwarder_high_prio_buffer_size: 100 + +## @param forwarder_low_prio_buffer_size - int - optional - default: 100 +## Defines the size of the low prio buffer. +# +# forwarder_low_prio_buffer_size: 100 + +## @param forwarder_requeue_buffer_size - int - optional - default: 100 +## Defines the size of the requeue prio buffer. +# +# forwarder_requeue_buffer_size: 100 + +## @param forwarder_backoff_base - int - optional - default: 2 +## @env DD_FORWARDER_BACKOFF_BASE - integer - optional - default: 2 +## Defines the rate of exponential growth, and the first retry interval range. +## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a +## higher rate of exponential growth. +# forwarder_backoff_base: 2 + +## @param forwarder_backoff_max - int - optional - default: 64 +## @env DD_FORWARDER_BACKOFF_MAX - integer - optional - default: 64 +## Defines the maximum number of seconds to wait for a retry. +## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a +## higher maximum backoff time. +# forwarder_backoff_max: 64 + +## @param cloud_provider_metadata - list of strings - optional - default: ["aws", "gcp", "azure", "alibaba", "oracle", "ibm"] +## @env DD_CLOUD_PROVIDER_METADATA - space separated list of strings - optional - default: aws gcp azure alibaba oracle ibm +## This option restricts which cloud provider endpoint will be used by the +## agent to retrieve metadata. By default the agent will try # AWS, GCP, Azure +## and alibaba providers. Some cloud provider are not enabled by default to not +## trigger security alert when querying unknown IP (for example, when enabling +## Tencent on AWS). +## Setting an empty list will disable querying any cloud metadata endpoints +## (falling back on system metadata). Disabling metadata for the cloud provider in which an Agent runs may result in +## duplicated hosts in your Datadog account and missing Autodiscovery features +## +## Possible values are: +## "aws" AWS EC2, ECS/Fargate +## "gcp" Google Cloud Provider +## "azure" Azure +## "alibaba" Alibaba +## "tencent" Tencent +## "oracle" Oracle Cloud +## "ibm" IBM Cloud +# +# cloud_provider_metadata: +# - "aws" +# - "gcp" +# - "azure" +# - "alibaba" +# - "oracle" +# - "ibm" + +## @param collect_ec2_tags - boolean - optional - default: false +## @env DD_COLLECT_EC2_TAGS - boolean - optional - default: false +## Collect AWS EC2 custom tags as host tags. +## Requires one of: +## - `collect_ec2_tags_use_imds: true` and configuration of the +## EC2 instance to allow tags in instance metadata; or +## - configuration of the EC2 instance to have an IAM role with +## the `EC2:DescribeTags` permission. +## See docs for further details: +## https://docs.datadoghq.com/integrations/faq/how-do-i-pull-my-ec2-tags-without-using-the-aws-integration/ +# +# collect_ec2_tags: false + +## @param exclude_ec2_tags - list of strings - optional - default: [] +## @env DD_EXCLUDE_EC2_TAGS - space separated list of strings - optional - default: [] +## EC2 tags to exclude from being converted into host tags -- only applicable when collect_ec2_tags is true. This does +## not impact tags collected by the AWS Integration (see https://docs.datadoghq.com/integrations/amazon_web_services/ +## for more information on the AWS integration). +# +# exclude_ec2_tags: [] + +## @param collect_ec2_tags_use_imds - boolean - optional - default: false +## @env DD_COLLECT_EC2_TAGS_USE_IMDS - boolean - optional - default: false +## Use instance metadata service (IMDS) instead of EC2 API to collect AWS EC2 custom tags. +## Requires `collect_ec2_tags`. +# +# collect_ec2_tags_use_imds: false + +## @param ec2_metadata_timeout - integer - optional - default: 300 +## @env DD_EC2_METADATA_TIMEOUT - integer - optional - default: 300 +## Timeout in milliseconds on calls to the AWS EC2 metadata endpoints. +# +# ec2_metadata_timeout: 300 + +## @param ec2_prefer_imdsv2 - boolean - optional - default: false +## @env DD_EC2_PREFER_IMDSV2 - boolean - optional - default: false +## If this flag is true then the agent will request EC2 metadata using IMDS v2, +## which offers additional security for accessing metadata. However, in some +## situations (such as a containerized agent on a plain EC2 instance) it may +## require additional configuration on the AWS side. See the AWS guidelines +## for further details: +## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#instance-metadata-transition-to-version-2 +# +# ec2_prefer_imdsv2: false + +## @param collect_gce_tags - boolean - optional - default: true +## @env DD_COLLECT_GCE_TAGS - boolean - optional - default: true +## Collect Google Cloud Engine metadata as host tags +# +# collect_gce_tags: true + +## @param exclude_gce_tags - list of strings - optional - default: ["bosh_settings" ,"cli-cert" ,"common-psm1" ,"configure-sh" ,"containerd-configure-sh" ,"disable-address-manager" ,"disable-legacy-endpoints" ,"enable-oslogin" ,"gce-container-declaration" ,"google-container-manifest" ,"ipsec-cert" ,"k8s-node-setup-psm1" ,"kube-env" ,"kubeconfig" ,"kubelet-config" ,"serial-port-logging-enable" ,"shutdown-script" ,"ssh-keys" ,"sshKeys" ,"ssl-cert" ,"startup-script" ,"user-data" ,"windows-keys" ,"windows-startup-script-ps1"] +## @env DD_EXCLUDE_GCE_TAGS - space separated list of strings - optional - default: bosh_settings cli-cert common-psm1 configure-sh containerd-configure-sh disable-address-manager disable-legacy-endpoints enable-oslogin gce-container-declaration google-container-manifest ipsec-cert k8s-node-setup-psm1 kube-env kubeconfig kubelet-config serial-port-logging-enable shutdown-script ssh-keys sshKeys ssl-cert startup-script user-data windows-keys windows-startup-script-ps1 +## Google Cloud Engine metadata attribute to exclude from being converted into +## host tags -- only applicable when collect_gce_tags is true. +# +# exclude_gce_tags: +# - "bosh_settings" +# - "cli-cert" +# - "common-psm1" +# - "configure-sh" +# - "containerd-configure-sh" +# - "disable-address-manager" +# - "disable-legacy-endpoints" +# - "enable-oslogin" +# - "gce-container-declaration" +# - "google-container-manifest" +# - "ipsec-cert" +# - "k8s-node-setup-psm1" +# - "kube-env" +# - "kubeconfig" +# - "kubelet-config" +# - "serial-port-logging-enable" +# - "shutdown-script" +# - "ssh-keys" +# - "sshKeys" +# - "ssl-cert" +# - "startup-script" +# - "user-data" +# - "windows-keys" +# - "windows-startup-script-ps1" + +## @param gce_send_project_id_tag - bool - optional - default: false +## @env DD_GCE_SEND_PROJECT_ID_TAG - bool - optional - default: false +## Send the project ID host tag with the `project_id:` tag key in addition to +## the `project:` tag key. +# +# gce_send_project_id_tag: false + +## @param gce_metadata_timeout - integer - optional - default: 1000 +## @env DD_GCE_METADATA_TIMEOUT - integer - optional - default: 1000 +## Timeout in milliseconds on calls to the GCE metadata endpoints. +# +# gce_metadata_timeout: 1000 + +## @param azure_hostname_style - string - optional - default: "os" +## @env DD_AZURE_HOSTNAME_STYLE - string - optional - default: "os" +## Changes how agent hostname is set on Azure virtual machines. +## +## Possible values: +## "os" - use the hostname reported by the operating system (default) +## "name" - use the instance name +## "name_and_resource_group" - use a combination of the instance name and resource group name +## "full" - use a combination of the instance name, resource group name and subscription id +## "vmid" - use the instance id +# +# azure_hostname_style: "os" + +## @param scrubber - custom object - optional +## Configuration for scrubbing sensitive information from the Agent's logs, configuration and flares. +# +# scrubber: +# + ## @param scrubber.additional_keys - list of strings - optional + ## @env DD_SCRUBBER_ADDITIONAL_KEYS - space-separated list of strings - optional + ## By default, the Agent removes known sensitive keys from Agent and integrations YAML configs before + ## including them in the flare. + ## Use this parameter to define additional sensitive keys that the Agent should scrub from + ## the YAML files included in the flare. + # + # additional_keys: + # - "sensitive_key_1" + # - "sensitive_key_2" + +## @param no_proxy_nonexact_match - boolean - optional - default: false +## @env DD_NO_PROXY_NONEXACT_MATCH - boolean - optional - default: false +## Enable more flexible no_proxy matching. See https://godoc.org/golang.org/x/net/http/httpproxy#Config +## for more information on accepted matching criteria. +# +# no_proxy_nonexact_match: false + +## @param use_proxy_for_cloud_metadata - boolean - optional - default: false +## @env DD_USE_PROXY_FOR_CLOUD_METADATA - boolean - optional - default: false +## By default cloud provider IP's are added to the transport's `no_proxy` list. +## Use this parameter to remove them from the `no_proxy` list. +# +# use_proxy_for_cloud_metadata: false + +## @param inventories_configuration_enabled - boolean - optional - default: true +## @env DD_INVENTORIES_CONFIGURATION_ENABLED - boolean - optional - default: true +## By default the Agent sends its own configuration to Datadog to be displayed in the `Agent Configuration` section of the host +## detail panel. See https://docs.datadoghq.com/infrastructure/list/#agent-configuration for more information. +## +## The Agent configuration is scrubbed of any sensitive information. +# +# inventories_configuration_enabled: true + +## @param auto_exit - custom object - optional +## Configuration for the automatic exit mechanism: the Agent stops when some conditions are met. +# +# auto_exit: + + ## @param noprocess - custom object - optional + ## Configure the `noprocess` automatic exit method. + ## Detect when no other processes (non-agent) are running to trigger automatic exit. `HOST_PROC` is taken into account when gathering processes. + ## Feature is only supported on POSIX systems. + # + # noprocess: + ## @param enabled - boolean - optional - default: false + ## @env DD_AUTO_EXIT_NOPROCESS_ENABLED - boolean - optional - default: false + ## Enable the `noprocess` method + # + # enabled: false + + ## @param excluded_processes - list of strings - optional + ## @env DD_AUTO_EXIT_NOPROCESS_EXCLUDED_PROCESSES - space separated list of strings - optional + ## List of regular expressions to exclude extra processes (on top of built-in list). + # + # excluded_processes: [] + + ## @param validation_period - integer - optional - default: 60 + ## @env DD_AUTO_EXIT_VALIDATION_PERIOD - integer - optional - default: 60 + ## Time (in seconds) delay during which the auto exit validates that the selected method continuously detects an exit condition, before exiting. + ## The value is verified every 30s. By default, three consecutive checks need to return true to trigger an automatic exit. + # + # validation_period: 60 + + +## @param fips - custom object - optional +## [BETA] Enter specific configurations for using the FIPS proxy. +## Uncomment this parameter and the one below to enable them. +# +# fips: + + ## @param enabled - boolean - optional - default: false + ## @env DD_FIPS_ENABLED - boolean - optional - default: false + ## This feature is in BETA. + ## + ## Enable the use of the FIPS proxy to send data to the DataDog backend. Enabling this will force all outgoing traffic + ## from the Agent to the local proxy. + ## It's important to note that enabling this will not make the Datadog Agent FIPS compliant, but will force all outgoing + ## traffic to a local FIPS compliant proxy. The FIPS proxy need to be installed locally in addition to the agent. + ## + ## When setting this to true the following settings would be overridden, ignoring the values from the + ## configuration: + ## - dd_url + ## - apm_config.apm_dd_url + ## - apm_config.profiling_dd_url + ## - apm_config.telemetry.dd_url + ## - process_config.process_dd_url + ## - logs_config.use_http + ## - logs_config.logs_no_ssl + ## - logs_config.logs_dd_url + ## - database_monitoring.metrics.dd_url + ## - database_monitoring.activity.dd_url + ## - database_monitoring.samples.dd_url + ## - compliance_config.endpoints.dd_url + ## - runtime_security_config.endpoints.dd_url + ## - network_devices.metadata.dd_url + # + ## The agent will also ignore 'proxy.*' settings and environment variables related to proxy (HTTP_PROXY, HTTPS_PROXY, + ## DD_PROXY_HTTP and DD_PROXY_HTTPS). + # + # enabled: false + + ## @param local_address - string - optional - default: localhost + ## @env DD_FIPS_LOCAL_ADDRESS - string - optional - default: localhost + ## The local address that the FIPS proxy will bind ports on. + # + # local_address: localhost + +## @param observability_pipelines_worker - custom object - optional +## Configuration for forwarding telemetry to an Observability Pipelines Worker instead of Datadog. +## https://www.datadoghq.com/product/observability-pipelines/ +## Note: This config is interchangeable with `vector` +# +# observability_pipelines_worker: + + ## @param metrics - custom object - optional + ## Specific configurations for metrics + # + # metrics: + + ## @param enabled - boolean - optional - default: false + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_ENABLED - boolean - optional - default: false + ## Enables forwarding of metrics to an Observability Pipelines Worker + # + # enabled: false + + ## @param url - string - optional - default: "" + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_URL - string - optional - default: "" + ## URL endpoint for the Observability Pipelines Worker to send metrics to + # + # url: "http//127.0.0.1:8080" + + ## @param logs - custom object - optional + ## Specific configurations for logs + # + # logs: + + ## @param enabled - boolean - optional - default: false + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_ENABLED - boolean - optional - default: false + ## Enables forwarding of logs to an Observability Pipelines Worker + # + # enabled: false + + ## @param url - string - optional - default: "" + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_URL - string - optional - default: "" + ## URL endpoint for the Observability Pipelines Worker to send logs to + # + # url: "http//127.0.0.1:8080" + + ## @param traces - custom object - optional + ## Specific configurations for traces + # + # traces: + + ## @param enabled - boolean - optional - default: false + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_ENABLED - boolean - optional - default: false + ## Enables forwarding of traces to an Observability Pipelines Worker + # + # enabled: false + + ## @param url - string - optional - default: "" + ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_URL - string - optional - default: "" + ## URL endpoint for the Observability Pipelines Worker to send traces to + # + # url: "http//127.0.0.1:8080" + +{{ end }} +{{- if .Agent }} +{{- if .Python }} +{{- if .BothPythonPresent -}} +## @param python_version - integer - optional - default: 2 +## @env DD_PYTHON_VERSION - integer - optional - default: 2 +## The major version of Python used to run integrations and custom checks. +## The only supported values are 2 (to use Python 2) or 3 (to use Python 3). +## Do not change this option when using the official Docker Agent images. +# +# python_version: 2 + +{{ end -}} +{{ end }} + +############################ +## Advanced Configuration ## +############################ + +## @param confd_path - string - optional +## @env DD_CONFD_PATH - string - optional +## The path containing check configuration files. By default, uses the conf.d folder +## located in the Agent configuration folder. +# +# confd_path: "" + +## @param additional_checksd - string - optional +## @env DD_ADDITIONAL_CHECKSD - string - optional +## Additional path indicating where to search for Python checks. By default, uses the checks.d folder +## located in the Agent configuration folder. +# +# additional_checksd: + +## @param expvar_port - integer - optional - default: 5000 +## @env DD_EXPVAR_PORT - integer - optional - default: 5000 +## The port for the go_expvar server. +# +# expvar_port: 5000 + +## @param cmd_port - integer - optional - default: 5001 +## @env DD_CMD_PORT - integer - optional - default: 5001 +## The port on which the IPC api listens. +# +# cmd_port: 5001 + +## @param GUI_port - integer - optional +## @env DD_GUI_PORT - integer - optional +## The port for the browser GUI to be served. +## Setting 'GUI_port: -1' turns off the GUI completely +## Default is: +## * Windows & macOS : `5002` +## * Linux: `-1` +## +# +# GUI_port: + +## @param GUI_session_expiration - duration - optional +## @env GUI_SESSION_EXPIRATION - duration - optional +## The duration after which a GUI session will expire. +## Setting 'GUI_SESSION_EXPIRATION: 0' disable session expiration. +## Default is "0s" (sessions do not expire). +# +# GUI_session_expiration: + +## @param health_port - integer - optional - default: 0 +## @env DD_HEALTH_PORT - integer - optional - default: 0 +## The Agent can expose its health check on a dedicated http port. +## This is useful for orchestrators that support http probes. +## Default is 0 (disabled), set a valid port number (eg. 5555) to enable. +# +# health_port: 0 + +## @param check_runners - integer - optional - default: 4 +## @env DD_CHECK_RUNNERS - integer - optional - default: 4 +## The `check_runners` refers to the number of concurrent check runners available for check instance execution. +## The scheduler attempts to spread the instances over the collection interval and will _at most_ be +## running the number of check runners instances concurrently. +## Setting the value to 1 would result in checks running sequentially. +## +## This is a sensitive setting, and we do NOT recommend changing the default number +## of check runners in the general case. The level of concurrency has effects on +## the Agent's: RSS memory, CPU load, resource contention overhead, etc. +# +# check_runners: 4 + +## @param enable_metadata_collection - boolean - optional - default: true +## @env DD_ENABLE_METADATA_COLLECTION - boolean - optional - default: true +## Metadata collection should always be enabled, except if you are running several +## agents/dsd instances per host. In that case, only one Agent should have it on. +## WARNING: disabling it on every Agent leads to display and billing issues. +# +# enable_metadata_collection: true + +## @param enable_gohai - boolean - optional - default: true +## @env DD_ENABLE_GOHAI - boolean - optional - default: true +## Enable the gohai collection of systems data. +# +# enable_gohai: true + +## @param enable_signing_metadata_collection - boolean - optional - default: true +## @env DD_ENABLE_SIGNING_METADATA_COLLECTION - boolean - optional - default: true +## Enable the Linux package signing medatada collection. +# +# enable_signing_metadata_collection: true + +## @param server_timeout - integer - optional - default: 30 +## @env DD_SERVER_TIMEOUT - integer - optional - default: 30 +## IPC api server timeout in seconds. +# +# server_timeout: 30 + +## @param procfs_path - string - optional +## @env DD_PROCFS_PATH - string - optional +## Some environments may have the procfs file system mounted in a miscellaneous +## location. The procfs_path configuration parameter provides a mechanism to +## override the standard default location: '/proc' - this setting trickles down to +## integrations and affect their behavior if they rely on the psutil python package. +# +# procfs_path: +{{ if .Python }} +## @param disable_py3_validation - boolean - optional - default: false +## @env DD_DISABLE_PY3_VALIDATION - boolean - optional - default: false +## Disable Python3 validation of python checks. +# +# disable_py3_validation: false +# +## @param python3_linter_timeout - integer - optional - default: 120 +## @env DD_PYTHON3_LINTER_TIMEOUT - integer - optional - default: 120 +## Timeout in seconds for validation of compatibility with python 3 when running python 2. +# +# python3_linter_timeout: 120 + +## @param memtrack_enabled - boolean - optional - default: true +## @env DD_MEMTRACK_ENABLED - boolean - optional - default: true +## Enables tracking of memory allocations made from the python runtime loader. +# +# memtrack_enabled: true + +## @param tracemalloc_debug - boolean - optional - default: false +## @env DD_TRACEMALLOC_DEBUG - boolean - optional - default: false +## Enables debugging with tracemalloc for python checks. +## Please note that this option is only available when python_version is set to "3". +## Additionally when this option becomes effective the number of check runners is +## overridden to 1. +# +# tracemalloc_debug: false + +## @param tracemalloc_include - string - optional +## @env DD_TRACEMALLOC_INCLUDE - string - optional +## Comma-separated list of Python checks to enable tracemalloc for when `tracemalloc_debug` is true. +## By default, all Python checks are enabled. +# +# tracemalloc_include: + +## @param tracemalloc_exclude - string - optional +## @env DD_TRACEMALLOC_EXCLUDE - string - optional +## Comma-separated list of Python checks to disable tracemalloc for when `tracemalloc_debug` is true. +## By default, all Python checks are enabled. This setting takes precedence over `tracemalloc_include`. +# +# tracemalloc_exclude: + +## @param windows_use_pythonpath - boolean - optional +## @env DD_WINDOWS_USE_PYTHONPATH - boolean - optional +## Whether to honour the value of the PYTHONPATH env var when set on Windows. +## Disabled by default, so we only load Python libraries bundled with the Agent. +# +# windows_use_pythonpath: false +{{ end }} +## @param secret_backend_command - string - optional +## @env DD_SECRET_BACKEND_COMMAND - string - optional +## `secret_backend_command` is the path to the script to execute to fetch secrets. +## The executable must have specific rights that differ on Windows and Linux. +## +## For more information see: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md +# +# secret_backend_command: + +## @param secret_backend_arguments - list of strings - optional +## @env DD_SECRET_BACKEND_ARGUMENTS - space separated list of strings - optional +## If secret_backend_command is set, specify here a list of arguments to give to the command at each run. +# +# secret_backend_arguments: +# - +# - + +## @param secret_backend_output_max_size - integer - optional - default: 1048576 +## @env DD_SECRET_BACKEND_OUTPUT_MAX_SIZE - integer - optional - default: 1048576 +## The size in bytes of the buffer used to store the command answer (apply to both stdout and stderr) +# +# secret_backend_output_max_size: 1048576 + +## @param secret_backend_timeout - integer - optional - default: 30 +## @env DD_SECRET_BACKEND_TIMEOUT - integer - optional - default: 30 +## The timeout to execute the command in second +# +# secret_backend_timeout: 30 + +## @param secret_backend_skip_checks - boolean - optional - default: false +## @env DD_SECRET_BACKEND_SKIP_CHECKS - boolean - optional - default: false +## Disable fetching secrets for check configurations +# +# secret_backend_skip_checks: false +# + +## @param secret_backend_remove_trailing_line_break - boolean - optional - default: false +## @env DD_SECRET_BACKEND_REMOVE_TRAILING_LINE_BREAK - boolean - optional - default: false +## Remove trailing line breaks from secrets returned by the secret_backend_command. Some secret management tools automatically +## add a line break when exporting secrets through files. +# +# secret_backend_remove_trailing_line_break: false + + +{{- if .InternalProfiling -}} +## @param profiling - custom object - optional +## Enter specific configurations for internal profiling. +## +## Please note that: +## 1. This does *not* enable profiling for user applications. +## 2. This only enables internal profiling of the agent go runtime. +## 3. To enable profiling for user apps please refer to +## https://docs.datadoghq.com/tracing/profiling/ +## 4. Enabling this feature will incur in billing charges and other +## unexpected side-effects (ie. agent profiles showing with your +## services). +## +## Uncomment this parameter and the one below to enable profiling. +# +# internal_profiling: +# + ## @param enabled - boolean - optional - default: false + ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false + ## Enable internal profiling for the Agent process. + # + # enabled: false + +{{ end }} + + +{{ end -}} + +{{- if .LogsAgent }} + +################################## +## Log collection Configuration ## +################################## + +## @param logs_enabled - boolean - optional - default: false +## @env DD_LOGS_ENABLED - boolean - optional - default: false +## Enable Datadog Agent log collection by setting logs_enabled to true. +# +# logs_enabled: false + +## @param logs_config - custom object - optional +## Enter specific configurations for your Log collection. +## Uncomment this parameter and the one below to enable them. +## See https://docs.datadoghq.com/agent/logs/ +# +# logs_config: + + ## @param container_collect_all - boolean - optional - default: false + ## @env DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL - boolean - optional - default: false + ## Enable container log collection for all the containers (see ac_exclude to filter out containers) + # + # container_collect_all: false + + ## @param logs_dd_url - string - optional + ## @env DD_LOGS_CONFIG_LOGS_DD_URL - string - optional + ## Define the endpoint and port to hit when using a proxy for logs. The logs are forwarded in TCP + ## therefore the proxy must be able to handle TCP connections. + # + # logs_dd_url: : + + ## @param logs_no_ssl - boolean - optional - default: false + ## @env DD_LOGS_CONFIG_LOGS_NO_SSL - optional - default: false + ## Disable the SSL encryption. This parameter should only be used when logs are + ## forwarded locally to a proxy. It is highly recommended to then handle the SSL encryption + ## on the proxy side. + # + # logs_no_ssl: false + + ## @param processing_rules - list of custom objects - optional + ## @env DD_LOGS_CONFIG_PROCESSING_RULES - list of custom objects - optional + ## Global processing rules that are applied to all logs. The available rules are + ## "exclude_at_match", "include_at_match" and "mask_sequences". More information in Datadog documentation: + ## https://docs.datadoghq.com/agent/logs/advanced_log_collection/#global-processing-rules + # + # processing_rules: + # - type: + # name: + # pattern: + + ## @param force_use_http - boolean - optional - default: false + ## @env DD_LOGS_CONFIG_FORCE_USE_HTTP - boolean - optional - default: false + ## By default, the Agent sends logs in HTTPS batches to port 443 if HTTPS connectivity can + ## be established at Agent startup, and falls back to TCP otherwise. Set this parameter to `true` to + ## always send logs with HTTPS (recommended). + ## Warning: force_use_http means HTTP over TCP, not HTTP over HTTPS. Please use logs_no_ssl for HTTP over HTTPS. + # + # force_use_http: true + + ## @param force_use_tcp - boolean - optional - default: false + ## @env DD_LOGS_CONFIG_FORCE_USE_TCP - boolean - optional - default: false + ## By default, logs are sent through HTTPS if possible, set this parameter + ## to `true` to always send logs via TCP. If `force_use_http` is set to `true`, this parameter + ## is ignored. + # + # force_use_tcp: true + + ## @param use_compression - boolean - optional - default: true + ## @env DD_LOGS_CONFIG_USE_COMPRESSION - boolean - optional - default: true + ## This parameter is available when sending logs with HTTPS. If enabled, the Agent + ## compresses logs before sending them. + # + # use_compression: true + + ## @param compression_level - integer - optional - default: 6 + ## @env DD_LOGS_CONFIG_COMPRESSION_LEVEL - boolean - optional - default: false + ## The compression_level parameter accepts values from 0 (no compression) + ## to 9 (maximum compression but higher resource usage). Only takes effect if + ## `use_compression` is set to `true`. + # + # compression_level: 6 + + ## @param batch_wait - integer - optional - default: 5 + ## @env DD_LOGS_CONFIG_BATCH_WAIT - integer - optional - default: 5 + ## The maximum time (in seconds) the Datadog Agent waits to fill each batch of logs before sending. + # + # batch_wait: 5 + + ## @param open_files_limit - integer - optional - default: 500 + ## @env DD_LOGS_CONFIG_OPEN_FILES_LIMIT - integer - optional - default: 500 + ## The maximum number of files that can be tailed in parallel. + ## Note: the default for Mac OS is 200. The default for + ## all other systems is 500. + # + # open_files_limit: 500 + + ## @param file_wildcard_selection_mode - string - optional - default: `by_name` + ## @env DD_LOGS_CONFIG_FILE_WILDCARD_SELECTION_MODE - string - optional - default: `by_name` + ## The strategy used to prioritize wildcard matches if they exceed the open file limit. + ## + ## Choices are `by_name` and `by_modification_time`. + ## + ## `by_name` means that each log source is considered and the matching files are ordered + ## in reverse name order. While there are less than `logs_config.open_files_limit` files + ## being tailed, this process repeats, collecting from each configured source. + ## + ## `by_modification_time` takes all log sources and first adds any log sources that + ## point to a specific file. Next, it finds matches for all wildcard sources. + ## This resulting list is ordered by which files have been most recently modified + ## and the top `logs_config.open_files_limit` most recently modified files are + ## chosen for tailing. + ## + ## WARNING: `by_modification_time` is less performant than `by_name` and will trigger + ## more disk I/O at the configured wildcard log paths. + # + # file_wildcard_selection_mode: by_name + + ## @param max_message_size_bytes - integer - optional - default: 256000 + ## @env DD_LOGS_CONFIG_MAX_MESSAGE_SIZE_BYTES - integer - optional - default : 256000 + ## The maximum size of single log message in bytes. If maxMessageSizeBytes exceeds + ## the documented API limit of 1MB - any payloads larger than 1MB will be dropped by the intake. + # https://docs.datadoghq.com/api/latest/logs/ + # + # max_message_size_bytes: 256000 + + ## @param integrations_logs_files_max_size - integer - optional - default: 10 + ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_FILES_MAX_SIZE - integer - optional - default: 10 + ## The max size in MB that an integration logs file is allowed to use + # + # integrations_logs_files_max_size + + ## @param integrations_logs_total_usage - integer - optional - default: 100 + ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_TOTAL_USAGE - integer - optional - default: 100 + ## The total combined usage all integrations logs files can use + # + # integrations_logs_total_usage + +{{ end -}} +{{- if .TraceAgent }} + +#################################### +## Trace Collection Configuration ## +#################################### + +## @param apm_config - custom object - optional +## Enter specific configurations for your trace collection. +## Uncomment this parameter and the one below to enable them. +## See https://docs.datadoghq.com/agent/apm/ +# +# apm_config: + + ## @param enabled - boolean - optional - default: true + ## @env DD_APM_ENABLED - boolean - optional - default: true + ## Set to true to enable the APM Agent. + # + # enabled: true + + ## @param env - string - optional - default: none + ## @env DD_APM_ENV - string - optional - default: none + ## The environment tag that Traces should be tagged with. + ## If not set the value will be inherited, in order, from the top level + ## "env" config option if set and then from the 'env:' tag if present in the + ## 'tags' top level config option. + # + # env: none + + ## @param receiver_port - integer - optional - default: 8126 + ## @env DD_APM_RECEIVER_PORT - integer - optional - default: 8126 + ## The port that the trace receiver should listen on. + ## Set to 0 to disable the HTTP receiver. + # + # receiver_port: 8126 + +{{- if (eq .OS "windows")}} + ## Please note that UDS receiver is not available in Windows. + #@ Enabling this setting may result in unexpected behavior. + ## @param receiver_socket - string - optional - default: "" + ## @env DD_APM_RECEIVER_SOCKET - string - optional - default: "" + ## Accept traces through Unix Domain Sockets. + ## Set to "" to disable the UDS receiver. + # + # receiver_socket: "" +{{else}} + ## @param receiver_socket - string - optional - default: unix:///var/run/datadog/apm.socket + ## @env DD_APM_RECEIVER_SOCKET - string - optional - default: unix:///var/run/datadog/apm.socket + ## Accept traces through Unix Domain Sockets. + ## Set to "" to disable the UDS receiver. + # + # receiver_socket: /var/run/datadog/apm.socket +{{ end }} + + ## @param apm_non_local_traffic - boolean - optional - default: false + ## @env DD_APM_NON_LOCAL_TRAFFIC - boolean - optional - default: false + ## Set to true so the Trace Agent listens for non local traffic, + ## i.e if Traces are being sent to this Agent from another host/container + # + # apm_non_local_traffic: false + + ## @param apm_dd_url - string - optional + ## @env DD_APM_DD_URL - string - optional + ## Define the endpoint and port to hit when using a proxy for APM. The traces are forwarded in TCP + ## therefore the proxy must be able to handle TCP connections. + # + # apm_dd_url: : + + ## DEPRECATED - please use `target_traces_per_second` instead. + ## @param max_traces_per_second - integer - optional - default: 10 + ## @env DD_APM_MAX_TPS - integer - optional - default: 10 + ## The target traces per second to sample. Sampling rates to apply are adjusted given + ## the received traffic and communicated to tracers. This configures head base sampling. + ## As of 7.35.0 sampling cannot be disabled and setting 'max_traces_per_second' to 0 no longer + ## disables sampling, but instead sends no traces to the intake. To avoid rate limiting, set this + ## value sufficiently high for your traffic pattern. + # + # max_traces_per_second: 10 + + ## @param target_traces_per_second - integer - optional - default: 10 + ## @env DD_APM_TARGET_TPS - integer - optional - default: 10 + ## The target traces per second to sample. Sampling rates to apply are adjusted given + ## the received traffic and communicated to tracers. This configures head-based sampling. + ## As of 7.35.0 sampling cannot be disabled and setting 'max_traces_per_second' to 0 no longer + ## disables sampling, but instead sends no traces to the intake. To avoid rate limiting, set this + ## value sufficiently high for your traffic pattern. + # + # target_traces_per_second: 10 + + ## @param errors_per_second - integer - optional - default: 10 + ## @env DD_APM_ERROR_TPS - integer - optional - default: 10 + ## The target error trace chunks to receive per second. The TPS is spread + ## to catch all combinations of service, name, resource, http.status, and error.type. + ## Set to 0 to disable the errors sampler. + # + # errors_per_second: 10 + + ## @param max_events_per_second - integer - optional - default: 200 + ## @env DD_APM_MAX_EPS - integer - optional - default: 200 + ## Maximum number of APM events per second to sample. + # + # max_events_per_second: 200 + + ## @param max_memory - integer - optional - default: 500000000 + ## @env DD_APM_MAX_MEMORY - integer - optional - default: 500000000 + ## This value is what the Agent aims to use in terms of memory. If surpassed, the API + ## rate limits incoming requests to aim and stay below this value. + ## Note: The Agent process is killed if it uses more than 150% of `max_memory`. + ## Set the `max_memory` parameter to `0` to disable the memory limitation. + # + # max_memory: 500000000 + + ## @param max_cpu_percent - integer - optional - default: 50 + ## @env DD_APM_MAX_CPU_PERCENT - integer - optional - default: 50 + ## The CPU percentage that the Agent aims to use. If surpassed, the API rate limits + ## incoming requests to aim and stay below this value. Examples: 50 = half a core, 200 = two cores. + ## Set `max_cpu_percent` to `0` to disable rate limiting based on CPU usage. + # + # max_cpu_percent: 50 + + ## @param obfuscation - object - optional + ## Defines obfuscation rules for sensitive data. Disabled by default. + ## See https://docs.datadoghq.com/tracing/setup_overview/configure_data_security/#agent-trace-obfuscation + # + # obfuscation: + # credit_cards: + ## @param DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED - boolean - optional + ## Enables obfuscation rules for credit cards. Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN - boolean - optional + ## Enables a Luhn checksum check in order to eliminate false negatives. Disabled by default. + # luhn: false + # + # elasticsearch: + ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED - boolean - optional + ## Enables obfuscation rules for spans of type "elasticsearch". Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES - object - optional + ## List of keys that should not be obfuscated. + # keep_values: + # - client_id + ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES - boolean - optional + ## The set of keys for which their values will be passed through SQL obfuscation + # obfuscate_sql_values: + # - val1 + # + # opensearch: + ## @param DD_APM_OBFUSCATION_OPENSEARCH_ENABLED - boolean - optional + ## Enables obfuscation rules for spans of type "opensearch". Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES - object - optional + ## List of keys that should not be obfuscated. + # keep_values: + # - client_id + ## @param DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES - boolean - optional + ## The set of keys for which their values will be passed through SQL obfuscation + # obfuscate_sql_values: + # - val1 + # + # http: + ## @param DD_APM_OBFUSCATION_HTTP_REMOVE_QUERY_STRING - boolean - optional + ## Enables obfuscation of query strings in URLs + # remove_query_string: false + ## @param DD_APM_OBFUSCATION_HTTP_REMOVE_PATHS_WITH_DIGITS - boolean - optional + ## If enabled, path segments in URLs containing digits are replaced by "?" + # remove_path_with_digits: false + # + # memcached: + ## @param DD_APM_OBFUSCATION_MEMCACHED_ENABLED - boolean - optional + ## Enables obfuscation rules for spans of type "memcached". Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND - boolean - optional + ## If enabled, the full command for the query will be kept, including any lookup + ## keys that could be present. The value for storage commands will still be + ## redacted if Memcached obfuscation is enabled. + # keep_command: false + # + # mongodb: + ## @param DD_APM_OBFUSCATION_MONGODB_ENABLED - boolean - optional + ## Enables obfuscation rules for spans of type "mongodb". Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_MONGODB_KEEP_VALUES - object - optional + ## List of keys that should not be obfuscated. + # keep_values: + # - document_id + ## @param DD_APM_OBFUSCATION_MONGODB_OBFUSCATE_SQL_VALUES - object - optional + ## The set of keys for which their values will be passed through SQL obfuscation + # obfuscate_sql_values: + # - val1 + # + # redis: + ## @param DD_APM_OBFUSCATION_REDIS_ENABLED - boolean - optional + ## Enables obfuscation rules for spans of type "redis". Enabled by default. + # enabled: true + ## @param DD_APM_OBFUSCATION_REDIS_REMOVE_ALL_ARGS - boolean - optional + ## When true, replaces all arguments of a redis command with a single "?". Disabled by default. + # remove_all_args: false + # + ## @param DD_APM_OBFUSCATION_REMOVE_STACK_TRACES - boolean - optional + ## Enables removing stack traces to replace them with "?". Disabled by default. + # remove_stack_traces: false + # + # sql_exec_plan: + ## @param DD_APM_SQL_EXEC_PLAN_ENABLED - boolean - optional + ## Enables obfuscation rules for JSON query execution plans. Disabled by default. + # enabled: false + ## @param DD_APM_SQL_EXEC_PLAN_KEEP_VALUES - object - optional + ## List of keys that should not be obfuscated. + # keep_values: + # - id1 + ## @param DD_APM_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES - boolean - optional + ## The set of keys for which their values will be passed through SQL obfuscation + # obfuscate_sql_values: + # - val1 + # + # sql_exec_plan_normalize: + ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_ENABLED - boolean - optional + ## Enables obfuscation rules for JSON query execution plans, including cost and row estimates. + ## Produces a normalized execution plan. Disabled by default. + # enabled: false + ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES - object - optional + ## List of keys that should not be obfuscated. + # keep_values: + # - id1 + ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES - boolean - optional + ## The set of keys for which their values will be passed through SQL obfuscation + # obfuscate_sql_values: + # - val1 + + ## @param filter_tags - object - optional + ## @env DD_APM_FILTER_TAGS_REQUIRE - object - optional + ## @env DD_APM_FILTER_TAGS_REJECT - object - optional + ## Defines rules by which to filter traces based on tags. + ## * require - list of key or key/value strings - traces must have those tags in order to be sent to Datadog + ## * reject - list of key or key/value strings - traces with these tags are dropped by the Agent + ## Please note that: + ## 1. Rules take into account the intersection of tags defined. + ## 2. When `filter_tags` and `filter_tags_regex` are used at the same time, all rules are united for filtering. + ## In cases where rules in `filter_tags` and `filter_tags_regex` match the same key, the rule from `filter_tags` + ## takes precendence over the rule from `filter_tags_regex`. + ## + ## For example, in the case of the following configuration: + ## filter_tags: + ## require: ["foo:bar"] + ## filter_tags_regex: + ## require: ["foo:^bar[0-9]{1}$"] + ## With these rules, traces with a tag `foo:bar1` will be dropped, and those with a `foo:bar` tag will be kept + # + # filter_tags: + # require: [] + # reject: [] + + ## @param filter_tags_regex - object - optional + ## Defines rules by which to filter traces based on tags with regex pattern for tag values. + ## * require - list of key or key/value regex pattern strings - traces must have those tags in order to be sent to Datadog + ## * reject - list of key or key/value regex pattern strings - traces with these tags are dropped by the Agent + ## Note: Rules take into account the intersection of tags defined. + ## Using regexp patterns for tag filtering can have performance implications, and is slower than typical tag filtering + ## without regexp. However, this regexp is only run on the root span of a trace, so should not have a critical impact + ## on overall performance. + ## More detailed information can be found in the description of the `filter_tags` parameter above + # + # filter_tags_regex: + # require: [] # e.g. [":"] + # reject: [] # e.g. [":"] + + ## @param replace_tags - list of objects - optional + ## @env DD_APM_REPLACE_TAGS - list of objects - optional + ## Defines a set of rules to replace or remove certain resources, tags containing + ## potentially sensitive information. + ## Each rules has to contain: + ## * name - string - The tag name to replace, for resources use "resource.name". + ## * pattern - string - The pattern to match the desired content to replace + ## * repl - string - what to inline if the pattern is matched + ## + ## See https://docs.datadoghq.com/tracing/setup_overview/configure_data_security/#replace-rules-for-tag-filtering + ## + # + # replace_tags: + # - name: "" + # pattern: "" + # repl: "" + + ## @param ignore_resources - list of strings - optional + ## @env DD_APM_IGNORE_RESOURCES - comma separated list of strings - optional + ## An exclusion list of regular expressions can be provided to disable certain traces based on their resource name + ## all entries must be surrounded by double quotes and separated by commas. + # + # ignore_resources: ["(GET|POST) /healthcheck"] + + ## @param log_file - string - optional + ## @env DD_APM_LOG_FILE - string - optional + ## The full path to the file where APM-agent logs are written. + # + # log_file: + + ## @param connection_limit - integer - default: 2000 + ## @env DD_APM_CONNECTION_LIMIT - integer - default: 2000 + ## The APM connection limit for the Agent. + ## See https://docs.datadoghq.com/tracing/troubleshooting/agent_rate_limits/#max-connection-limit + # + # connection_limit: 2000 + + ## @param compute_stats_by_span_kind - bool - default: false + ## @env DD_APM_COMPUTE_STATS_BY_SPAN_KIND - bool - default: false + ## [BETA] Enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer). + ## If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. + ## NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. + ## If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. + ## If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and remove the "enable_otlp_compute_top_level_by_span_kind" APM feature if present. + # compute_stats_by_span_kind: false + + ## @param peer_service_aggregation - bool - default: false + ## @env DD_APM_PEER_SERVICE_AGGREGATION - bool - default: false + ## DEPRECATED - please use `peer_tags_aggregation` instead. + # peer_service_aggregation: false + + ## @param peer_tags_aggregation - bool - default: false + ## @env DD_APM_PEER_TAGS_AGGREGATION - bool - default: false + ## [BETA] Enables aggregation of peer related tags (e.g., `peer.service`, `db.instance`, etc.) in the Agent. + ## If disabled, aggregated trace stats will not include these tags as dimensions on trace metrics. + ## For the best experience with peer tags, Datadog also recommends enabling `compute_stats_by_span_kind`. + ## If you are using an OTel tracer, it's best to have both enabled because client/producer spans with relevant peer tags + ## may not be marked by the Agent as top-level spans. + ## If enabling both causes the Agent to consume too many resources, try disabling `compute_stats_by_span_kind` first. + ## A high cardinality of peer tags or APM resources can also contribute to higher CPU and memory consumption. + ## You can check for the cardinality of these fields by making trace search queries in the Datadog UI. + ## The default list of peer tags can be found in pkg/trace/stats/concentrator.go. + # peer_tags_aggregation: false + + ## @param peer_tags - list of strings - optional + ## @env DD_APM_PEER_TAGS - list of strings - optional + ## [BETA] Optional list of supplementary peer tags that go beyond the defaults. The Datadog backend validates all tags + ## and will drop ones that are unapproved. + # peer_tags: [] + + ## @param features - list of strings - optional + ## @env DD_APM_FEATURES - comma separated list of strings - optional + ## Configure additional beta APM features. + ## The list of items available under apm_config.features is not guaranteed to persist across versions; + ## a feature may eventually be promoted to its own configuration option on the agent, or dropped entirely. + # + # features: ["error_rare_sample_tracer_drop","table_names","component2name","sql_cache","sqllexer","enable_otlp_compute_top_level_by_span_kind","enable_receive_resource_spans_v2"] + + ## @param additional_endpoints - object - optional + ## @env DD_APM_ADDITIONAL_ENDPOINTS - object - optional + ## Enables sending data to multiple endpoints and/or with multiple API keys via dual shipping. + ## See https://docs.datadoghq.com/agent/guide/dual-shipping + # + # additional_endpoints: + # "https://trace.agent.datadoghq.com": + # - apikey2 + # - apikey3 + # "https://trace.agent.datadoghq.eu": + # - apikey4 + + ## @param debug - custom object - optional + ## Specifies settings for the debug server of the trace agent. + # + # debug: + + ## @param port - integer - optional - default: 5012 + ## @env DD_APM_DEBUG_PORT - string - optional - default: 5012 + ## Port for the debug endpoints for the trace Agent. Set it to 0 to disable the server. + # + # port: 5012 + + ## @param instrumentation_enabled - boolean - default: false + ## @env DD_APM_INSTRUMENTATION_ENABLED - boolean - default: false + ## Enables Single Step Instrumentation in the cluster (in beta) + # + # instrumentation_enabled: false + + ## @param instrumentation_enabled_namespaces - list of strings - optional + ## @env DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES - space separated list of strings - optional + ## Enables Single Step Instrumentation in specific namespaces, while Single Step Instrumentation is off in the whole cluster (in beta) + ## Can only be set if DD_APM_INSTRUMENTATION_ENABLED is false. Cannot be set together with DD_APM_INSTRUMENTATION_DISABLED_NAMESPACES. + # + # instrumentation_enabled_namespaces: + # - ns1 + # - apps + + ## @param instrumentation_disabled_namespaces - list of strings - optional + ## @env DD_APM_INSTRUMENTATION_DISABLED_NAMESPACES - space separated list of strings - optional + ## Disables Single Step Instrumentation in specific namespaces, while Single Step Instrumentation is enabled in the whole cluster (in beta) + ## Can only be set if DD_APM_INSTRUMENTATION_ENABLED is true. Cannot be set together with DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES. + # + # instrumentation_disabled_namespaces: + # - ns2 + # - system-ns + + ## @param trace_buffer - integer - optional - default: 0 + ## @env DD_APM_TRACE_BUFFER - integer - optional - default: 0 + ## + ## WARNING: Do not use this config. It is here for debugging and + ## as a temporary fix in certain load scenarios. Setting this + ## results in a performance deterioration and an increase in memory + ## usage when the Trace Agent is under load. This config may be + ## removed in a future version. + ## + ## Specifies the number of trace payloads to buffer after decoding. + ## Traces can be buffered when receiving traces faster than the + ## processor can process them. + ## + # + # trace_buffer: 0 + + ## @param probabilistic_sampler - object - optional + ## Enables and configures the Probabilistic Sampler, compatible with the + ## OTel Probabilistic Sampler Processor ( https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/probabilisticsamplerprocessor#probabilistic-sampling-processor ) + ## + #probabilistic_sampler: + ## @env DD_APM_PROBABILISTIC_SAMPLER_ENABLED - boolean - optional - default: false + ## Enables or disables the probabilistic sampler + # enabled: false + # + ## @env DD_APM_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE - float - optional - default: 0 + ## Samples this percentage (0-100) of traffic + # sampling_percentage: 0 + # + ## @env DD_APM_PROBABILISTIC_SAMPLER_HASH_SEED - integer - optional - default: 0 + ## hash_seed: A seed used for the hash algorithm. This must match other agents and OTel + ## collectors using the probabilistic sampler to ensure consistent sampling. + # hash_seed: 0 + + + {{- if .InternalProfiling -}} + ## @param profiling - custom object - optional + ## Enter specific configurations for internal profiling. + ## + ## Please note that: + ## 1. This does *not* enable profiling for user applications. + ## 2. This only enables internal profiling of the agent go runtime. + ## 3. To enable profiling for user apps please refer to + ## https://docs.datadoghq.com/tracing/profiling/ + ## 4. Enabling this feature will incur in billing charges and other + ## unexpected side-effects (ie. agent profiles showing with your + ## services). + ## + ## Uncomment this parameter and the one below to enable profiling. + # + # internal_profiling: + # + ## @param enabled - boolean - optional - default: false + ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false + ## Enable internal profiling for the trace-agent process. + # + # enabled: false + + {{ end -}} +{{ end -}} +{{- if .ProcessAgent }} + +###################################### +## Process Collection Configuration ## +###################################### + +## @param process_config - custom object - optional +## Enter specific configurations for your Process data collection. +## Uncomment this parameter and the one below to enable them. +## See https://docs.datadoghq.com/graphing/infrastructure/process/ +# +# process_config: + + {{- if (eq .OS "linux")}} + ## @param run_in_core_agent - custom object - optional + ## Controls whether the process Agent or core Agent collects process and/or container information (Linux only). + # run_in_core_agent: + ## @param enabled - boolean - optional - default: false + ## Enables process/container collection on the core Agent instead of the process Agent. + # enabled: false + {{ end }} + + ## @param process_collection - custom object - optional + ## Specifies settings for collecting processes. + # process_collection: + ## @param enabled - boolean - optional - default: false + ## Enables collection of information about running processes. + # enabled: false + + ## @param container_collection - custom object - optional + ## Specifies settings for collecting containers. + # container_collection: + ## @param enabled - boolean - optional - default: true + ## Enables collection of information about running containers. + # enabled: true + + ## Deprecated - use `process_collection.enabled` and `container_collection.enabled` instead + ## @param enabled - string - optional - default: "false" + ## @env DD_PROCESS_CONFIG_ENABLED - string - optional - default: "false" + ## A string indicating the enabled state of the Process Agent: + ## * "false" : The Agent collects only containers information. + ## * "true" : The Agent collects containers and processes information. + ## * "disabled" : The Agent process collection is disabled. + # + # enabled: "true" + + ## @param expvar_port - string - optional - default: 6062 + ## @env DD_PROCESS_CONFIG_EXPVAR_PORT - string - optional - default: 6062 + ## Port for the debug endpoints for the process Agent. + # + # expvar_port: 6062 + + ## @param cmd_port - string - optional - default: 6162 + ## Port for configuring runtime settings for the process Agent. + # + # cmd_port: 6162 + + ## @param log_file - string - optional + ## @env DD_PROCESS_CONFIG_LOG_FILE - string - optional + ## The full path to the file where process Agent logs are written. + # + # log_file: + + ## @param intervals - custom object - optional - default: 10s for normal checks and 2s for others. + ## @env DD_PROCESS_CONFIG_INTERVALS_CONTAINER - integer - optional - default: 10 + ## @env DD_PROCESS_CONFIG_INTERVALS_CONTAINER_REALTIME - integer - optional - default: 2 + ## @env DD_PROCESS_CONFIG_INTERVALS_PROCESS - integer - optional - default: 10 + ## @env DD_PROCESS_CONFIG_INTERVALS_PROCESS_REALTIME - integer - optional - default: 2 + ## The interval, in seconds, at which the Agent runs each check. If you want consistent + ## behavior between real-time, set the `container_realtime` and `process_realtime` intervals to 10. + # + # intervals: + # container: 10 + # container_realtime: 2 + # process: 10 + # process_realtime: 2 + + ## @param process_discovery - custom object - optional + ## Specifies custom settings for the `process_discovery` object. + # process_discovery: + ## @param enabled - boolean - optional - default: true + ## Toggles the `process_discovery` check. If enabled, this check gathers information about running integrations. + # enabled: true + + ## @param interval - duration - optional - default: 4h - minimum: 10m + ## An interval in hours that specifies how often the process discovery check should run. + # interval: 4h + + + ## @param blacklist_patterns - list of strings - optional + ## @env DD_PROCESS_CONFIG_BLACKLIST_PATTERNS - space separated list of strings - optional + ## A list of regex patterns that exclude processes if matched. + # + # blacklist_patterns: + # - + + ## @param queue_size - integer - optional - default: 256 + ## @env DD_PROCESS_CONFIG_QUEUE_SIZE - integer - optional - default: 256 + ## The number of check results to buffer in memory when a POST fails. + # + # queue_size: 256 + + ## @param process_queue_bytes - integer - optional - default: 60000000 + ## @env DD_PROCESS_CONFIG_PROCESS_QUEUE_BYTES - integer - optional - default: 60000000 + ## The amount of data (in bytes) to buffer in memory when a POST fails. + # + # process_queue_bytes: 60000000 + + ## @param rt_queue_size - integer - optional - default: 5 + ## @env DD_PROCESS_CONFIG_RT_QUEUE_SIZE - integer - optional - default: 5 + ## The number of realtime check results to buffer in memory when a POST fails. + # + # rt_queue_size: 5 + + ## @param max_per_message - integer - optional - default: 100 + ## @env DD_PROCESS_CONFIG_MAX_PER_MESSAGE - integer - optional - default: 100 + ## The maximum number of processes or containers per message. + # + # max_per_message: 100 + + ## @param dd_agent_bin - string - optional + ## @env DD_PROCESS_CONFIG_DD_AGENT_BIN - string - optional + ## Overrides the path to the Agent bin used for getting the hostname. Defaults are: + ## * Windows: \embedded\\agent.exe + ## * Unix: /opt/datadog-agent/bin/agent/agent + # + # dd_agent_bin: + + ## @param dd_agent_env - string - optional - default: "" + ## @env DD_PROCESS_CONFIG_DD_AGENT_ENV - string - optional - default: "" + ## Overrides of the environment we pass to fetch the hostname. + # + # dd_agent_env: "" + + ## @param scrub_args - boolean - optional - default: true + ## @env DD_PROCESS_CONFIG_SCRUB_ARGS - boolean - optional - default: true + ## Hide sensitive data on the Live Processes page. + # + # scrub_args: true + + ## @param custom_sensitive_words - list of strings - optional + ## @env DD_PROCESS_CONFIG_CUSTOM_SENSITIVE_WORDS - space separated list of strings - optional + ## Define your own list of sensitive data to be merged with the default one. + ## Read more on Datadog documentation: + ## https://docs.datadoghq.com/graphing/infrastructure/process/#process-arguments-scrubbing + # + # custom_sensitive_words: + # - 'personal_key' + # - '*token' + # - 'sql*' + # - '*pass*d*' + + ## @param disable_realtime_checks - boolean - optional - default: false + ## @env DD_PROCESS_CONFIG_DISABLE_REALTIME - boolean - optional - default: false + ## Disable realtime process and container checks + # + # disable_realtime_checks: false + +{{- if .InternalProfiling -}} + ## @param profiling - custom object - optional + ## Enter specific configurations for internal profiling. + ## + ## Please note that: + ## 1. This does *not* enable profiling for user applications. + ## 2. This only enables internal profiling of the agent go runtime. + ## 3. To enable profiling for user apps please refer to + ## https://docs.datadoghq.com/tracing/profiling/ + ## 4. Enabling this feature will incur in billing charges and other + ## unexpected side-effects (ie. agent profiles showing with your + ## services). + ## + ## Uncomment this parameter and the one below to enable profiling. + # + # internal_profiling: + # + ## @param enabled - boolean - optional - default: false + ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false + ## Enable internal profiling for the Process Agent process. + # + # enabled: false + +{{ end }} + +{{- if .NetworkPath }} +{{- if (ne .OS "darwin")}} + +######################################## +## Network Path Configuration ## +######################################## + +# network_path: + ## @param connections_monitoring - custom object - optional + ## Specific configurations for monitoring network connections via Network Path. + # + # connections_monitoring: + + ## @param enabled - bool - optional - default: false + ## @env DD_NETWORK_PATH_CONNECTIONS_MONITORING_ENABLED - bool - optional - default: false + ## [Beta] Enables monitoring network connections via Network Path. + # + # enabled: false + + ## @param collector - custom object - optional + ## Configuration related to Network Path Collector. + # + # collector: + + ## @param workers - integer - optional - default: 4 + ## @env DD_WORKERS - integer - optional - default: 4 + ## The `workers` refers to the number of concurrent workers available for network path execution. + # + # workers: 4 + +{{ end -}} +{{ end -}} +{{ end -}} +{{- if .Compliance }} +############################################# +## Security Agent Compliance Configuration ## +############################################# + +## @param compliance_config - custom object - optional +## Enter specific configuration for continuous compliance checks. +# compliance_config: + + ## @param enabled - boolean - optional - default: false + ## @env DD_COMPLIANCE_CONFIG_ENABLED - boolean - optional - default: false + ## Set to true to enable Cloud Security Posture Management (CSPM). + # + # enabled: false + + ## @param dir - string - optional - default: /etc/datadog-agent/compliance.d + ## @env DD_COMPLIANCE_CONFIG_DIR - string - optional - default: /etc/datadog-agent/compliance.d + ## Directory path for compliance checks configuration containing enabled benchmarks + # + # dir: /etc/datadog-agent/compliance.d + + ## @param check_interval - duration - optional - default: 20m + ## @env DD_COMPLIANCE_CONFIG_CHECK_INTERVAL - duration - optional - default: 20m + ## Check interval (see https://golang.org/pkg/time/#ParseDuration for available options) + # check_interval: 20m + + ## @param check_max_events_per_run - integer - optional - default: 100 + ## @env DD_COMPLIANCE_CONFIG_CHECK_MAX_EVENTS_PER_RUN - integer - optional - default: 100 + ## + # check_max_events_per_run: 100 +{{ end -}} + +{{- if .SBOM }} +## @param sbom - custom object - optional +## Enter specific configuration for the Cloud Security Management Vulnerability Management feature +# sbom: + ## @param enabled - boolean - optional - default: false + ## set to true to enable Cloud Security Management Vulnerability Management + # enabled: false + + ## uncomment the sections below to enable where the vulnerability scanning is done + + ## @param enabled - boolean - optional - default: false + ## set to true to enable Infrastructure Vulnerabiltilies + # host: + # enabled: false +{{- if (eq .OS "linux")}} + + + # container_image: + # enabled: false +{{ end -}} +{{ end -}} +{{- if .SystemProbe }} + +################################## +## System Probe Configuration ## +################################## + +## @param system_probe_config - custom object - optional +## Enter specific configurations for your System Probe data collection. +## Uncomment this parameter and the one below to enable them. +# +# system_probe_config: +{{- if (eq .OS "windows")}} + ## @param sysprobe_socket - string - optional - default: localhost:3333 + ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: localhost:3333 + ## The TCP address where system probes are accessed. + # + # sysprobe_socket: localhost:3333 +{{else}} + ## @param sysprobe_socket - string - optional - default: /opt/datadog-agent/run/sysprobe.sock + ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: /opt/datadog-agent/run/sysprobe.sock + ## The full path to the location of the unix socket where system probes are accessed. + # + # sysprobe_socket: /opt/datadog-agent/run/sysprobe.sock +{{ end }} + ## @param log_file - string - optional - default: /var/log/datadog/system-probe.log + ## @env DD_SYSTEM_PROBE_CONFIG_LOG_FILE - string - optional - default: /var/log/datadog/system-probe.log + ## The full path to the file where system-probe logs are written. + # + # log_file: /var/log/datadog/system-probe.log + + ## @param langauge_detection - custom object - optional + ## Enter specific configurations for language detection + ## Uncomment this parameter and the one below to enable them. + # + # language_detection: + + ## @param enabled - bool - optional - default: false + ## @env DD_SYSTEM_PROBE_CONFIG_LANGUAGE_DETECTION_ENABLED - bool - optional - default: false + ## [Beta] Enables language detection via binary analysis in the system probe. + # + # enabled: false + + ## @param health_port - integer - optional - default: 0 + ## @env DD_SYSTEM_PROBE_HEALTH_PORT - integer - optional - default: 0 + ## The Agent can expose its health check on a dedicated HTTP port. + ## This is useful for orchestrators that support HTTP probes. + ## Default is 0 (disabled). Set a valid port number (example: 5558) to enable. + # + # health_port: 0 + +{{- if .InternalProfiling -}} + ## @param profiling - custom object - optional + ## Enter specific configurations for internal profiling. + ## + ## Please note that: + ## 1. This does *not* enable profiling for user applications. + ## 2. This only enables internal profiling of the agent go runtime. + ## 3. To enable profiling for user apps please refer to + ## https://docs.datadoghq.com/tracing/profiling/ + ## 4. Enabling this feature will incur in billing charges and other + ## unexpected side-effects (ie. agent profiles showing with your + ## services). + ## + ## Uncomment this parameter and the one below to enable profiling. + # + # internal_profiling: + # + ## @param enabled - boolean - optional - default: false + ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false + ## Enable internal profiling for the System Probe process. + # + # enabled: false + + ## @param memory_controller - custom object - optional + ## Cgroup memory controller for internal memory profiling. + ## + ## memory_controller: + # + ## @param enabled - boolean - optional - default: false + ## Enable cgroup memory controller. + # + # enabled: false + # + ## @param thresholds - map of strings - optional + ## Thresholds and the respective active actions to trigger when + ## memory usage is above the specified threshold. + ## Threshold can be either an absolute value - such as 500MB or 2GB - + ## or a percentage of the cgroup allocated memory such as 50%. + ## The action can be: + ## - gc: to trigger the Go garbage collector + ## - profile: to generate a system-probe memory profile in /tmp + ## - log: to simply log that the threshold was reached + # + # thresholds: + # 500MB: gc + # 50%: profile + + ## @param pressure_levels - map of strings - optional + ## Pressure levels and the respective active actions to trigger when + ## memory usage reaches the specified level. + ## The pressure level is 'low', 'medium' or 'critical'. + ## The actions are the same for thresholds (see above). + # + # pressure_levels: + # medium: gc +{{ end }} + +{{- if .NetworkModule }} + +######################################## +## System Probe Network Configuration ## +######################################## + +# network_config: +{{- if (eq .OS "windows")}} + ## Please note that enabling the Network Module of the System + ## Probe will result in a kernel driver being loaded. +{{ end }} + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the Network Module of the System Probe + # + # enabled: false + +{{ end -}} + +{{- if .UniversalServiceMonitoringModule }} + +############################################################# +## System Probe Universal Service monitoring Configuration ## +############################################################# + +# service_monitoring_config: +{{- if (eq .OS "windows")}} + ## Please note that enabling the Universal Service Monitoring + ## Module of the System Probe will result in a kernel driver + ## being loaded. +{{ end }} + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the Universal Service Monitoring Module of the System Probe + # + # enabled: false + +{{ end -}} + +{{- if .PingModule }} + +##################################### +## System Probe Ping Configuration ## +##################################### + +# ping: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the Ping Module of the System Probe + # + # enabled: false + +{{ end -}} + + +{{- if .TracerouteModule }} + +########################################### +## System Probe Traceroute Configuration ## +########################################### + +# traceroute: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the Traceroute Module of the System Probe + # + # enabled: false + +{{ end -}} + + +{{- if .SecurityModule }} + +########################################## +## Security Agent Runtime Configuration ## +## ## +## Settings to sent logs to Datadog are ## +## fetched from section `logs_config` ## +########################################## + +# runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## @env DD_RUNTIME_SECURITY_CONFIG_ENABLED - boolean - optional - default: false + ## Set to true to enable Cloud Workload Security (CWS). + # + # enabled: false + + ## @param fim_enabled - boolean - optional - default: false + ## Set to true to enable the File Integrity Monitoring (FIM) feature of Cloud Workload Security (CWS). + # + # fim_enabled: false + +{{- if (eq .OS "windows")}} + ## @param sysprobe_socket - string - optional - default: localhost:3334 + ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: localhost:3334 + ## The TCP address where the security runtime module is accessed. + # + # socket: localhost:3334 +{{else}} + ## @param socket - string - optional - default: /opt/datadog-agent/run/runtime-security.sock + ## @env DD_RUNTIME_SECURITY_CONFIG_SOCKET - string - optional - default: /opt/datadog-agent/run/runtime-security.sock + ## The full path to the location of the unix socket where security runtime module is accessed. + # + # socket: /opt/datadog-agent/run/runtime-security.sock +{{ end }} + ## @param policies - custom object - optional + ## Policy files + # policies: +{{- if (eq .OS "windows")}} + ## @param dir - string - default: %ProgramData%\Datadog\runtime-security.d + ## @env DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR - string - default: /etc/datadog-agent/runtime-security.d + ## Path from where the policy files are loaded + # + # dir: c:\ProgramData\Datadog\runtime-security.d +{{else}} + ## @param dir - string - default: /etc/datadog-agent/runtime-security.d + ## @env DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR - string - default: /etc/datadog-agent/runtime-security.d + ## Path from where the policy files will be loaded + # + # dir: /etc/datadog-agent/runtime-security.d +{{ end }} + ## @param syscall_monitor - custom object - optional + ## Syscall monitoring + # + # syscall_monitor: + + ## @param enabled - boolean - optional - default: false + ## @env DD_RUNTIME_SECURITY_CONFIG_SYSCALL_MONITOR_ENABLED - boolean - optional - default: false + ## Set to true to enable the Syscall monitoring (recommended for troubleshooting only). + # + # enabled: false + + ## @param custom_sensitive_words - list of strings - optional + ## @env DD_RUNTIME_SECURITY_CONFIG_CUSTOM_SENSITIVE_WORDS - space separated list of strings - optional + ## Define your own list of sensitive data to be merged with the default one. + ## Read more on Datadog documentation: + ## https://docs.datadoghq.com/graphing/infrastructure/process/#process-arguments-scrubbing + # + # custom_sensitive_words: + # - 'personal_key' + # - '*token' + # - 'sql*' + # - '*pass*d*' + + ## @param envs_with_value - list of strings - optional + ## @env DD_RUNTIME_SECURITY_CONFIG_ENVS_WITH_VALUE - space separated list of strings - optional + ## Define your own list of non-sensitive environment variable names whose value will not be + ## concealed by the runtime security module. + ## Default: LD_PRELOAD, LD_LIBRARY_PATH, PATH, HISTSIZE, HISTFILESIZE, GLIBC_TUNABLES + # + # envs_with_value: + # - LD_PRELOAD + # - LD_LIBRARY_PATH + # - PATH + # - HISTSIZE + # - HISTFILESIZE + + ## @param activity_dump - custom object - optional + ## Activity dump section configures if/how the Agent sends activity dumps to Datadog + # + # activity_dump: + + ## @param enabled - boolean - optional - default: false + ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_ENABLED - boolean - optional - default: false + ## Set to true to activate the security profiles feature. + # + # enabled: false + + ## @param traced_cgroups_count - integer - optional - default: 5 + ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_TRACED_CGROUPS_COUNT - integer - optional - default: 5 + ## Defines the number of concurrent cgroups to be traced. + # + # traced_cgroups_count: 5 + + ## @param dump_duration - duration - optional - default: 30m + ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_DUMP_DURATION - duration - optional - default: 30m + ## Defines the duration of cgroups learning phase. Minimum value is 10m. + # + # dump_duration: 30m + + ## @param network - custom object - optional + ## Network section is used to configure Cloud Workload Security (CWS) network features. + # + # network: + + ## @param enabled - boolean - optional - default: true + ## @env DD_RUNTIME_SECURITY_CONFIG_NETWORK_ENABLED - boolean - optional - default: true + ## Set to true to activate the CWS network detections. + # + # enabled: true + +{{- if (eq .OS "windows")}} + +##################################################### +## Datadog Agent Windows Crash Detection module +##################################################### + +# windows_crash_detection: + ## @param enabled - boolean - optional - default: false + ## Enables the system probe module which supports the Windows crash detection check. + # + # enabled: false +{{ end }} + +{{ end -}} +{{ end -}} + +{{- if .SecurityAgent -}} +#################################### +## Runtime Security configuration ## +#################################### + +# runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Cloud Workload Security (CWS). + # + # enabled: false + +{{- if (eq .OS "windows")}} + ## @param socket - string - optional - default: localhost:3334 + ## The local address and port where the security runtime module is accessed + # + # socket: localhost:3334 +{{else}} + ## @param socket - string - optional - default: /opt/datadog-agent/run/runtime-security.sock + ## The full path to the location of the unix socket where security runtime module is accessed. + # + # socket: /opt/datadog-agent/run/runtime-security.sock +{{ end }} + +########################################## +## Compliance monitoring configuration ## +########################################## + +# compliance_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Cloud Security Posture Management (CSPM). + # + # enabled: false +{{ end -}} +{{- if .Dogstatsd }} + +############################# +## DogStatsD Configuration ## +############################# + +## @param use_dogstatsd - boolean - optional - default: true +## @env DD_USE_DOGSTATSD - boolean - optional - default: true +## Set this option to false to disable the Agent DogStatsD server. +# +# use_dogstatsd: true + +## @param dogstatsd_port - integer - optional - default: 8125 +## @env DD_DOGSTATSD_PORT - integer - optional - default: 8125 +## Override the Agent DogStatsD port. +## Note: Make sure your client is sending to the same UDP port. +# +# dogstatsd_port: 8125 + +## @param bind_host - string - optional - default: localhost +## @env DD_BIND_HOST - string - optional - default: localhost +## The host to listen on for Dogstatsd and traces. This is ignored by APM when +## `apm_config.apm_non_local_traffic` is enabled and ignored by DogStatsD when `dogstatsd_non_local_traffic` +## is enabled. The trace-agent uses this host to send metrics to. +## The `localhost` default value is invalid in IPv6 environments where dogstatsd listens on "::1". +## To solve this problem, ensure Dogstatsd is listening on IPv4 by setting this value to "127.0.0.1". +# +# bind_host: localhost + +{{- if (eq .OS "windows")}} +## Please note that UDS receiver is not available in Windows. +#@ Enabling this setting may result in unexpected behavior. +## @param dogstatsd_socket - string - optional - default: "" +## @env DD_DOGSTATSD_SOCKET - string - optional - default: "" +## Listen for Dogstatsd metrics on a Unix Socket (*nix only). +## Set to "" to disable this feature. +# +# dogstatsd_socket: "" +{{else}} +## @param dogstatsd_socket - string - optional - default: "/var/run/datadog/dsd.socket" +## @env DD_DOGSTATSD_SOCKET - string - optional - default: "/var/run/datadog/dsd.socket" +## Listen for Dogstatsd metrics on a Unix Socket (*nix only). +## Set to "" to disable this feature. +# +# dogstatsd_socket: "/var/run/datadog/dsd.socket" +{{ end }} + +## @param dogstatsd_origin_detection - boolean - optional - default: false +## @env DD_DOGSTATSD_ORIGIN_DETECTION - boolean - optional - default: false +## When using Unix Socket, DogStatsD can tag metrics with container metadata. +## If running DogStatsD in a container, host PID mode (e.g. with --pid=host) is required. +# +# dogstatsd_origin_detection: false + +## @param dogstatsd_origin_detection_client - boolean - optional - default: false +## @env DD_DOGSTATSD_ORIGIN_DETECTION_CLIENT - boolean - optional - default: false +## Whether the Agent should use a client-provided container ID to enrich the metrics, events and service checks with container tags. +## Note: This requires using a client compatible with DogStatsD protocol version 1.2. +# +# dogstatsd_origin_detection_client: false + +## @param dogstatsd_buffer_size - integer - optional - default: 8192 +## @env DD_DOGSTATSD_BUFFER_SIZE - integer - optional - default: 8192 +## The buffer size use to receive statsd packets, in bytes. +# +# dogstatsd_buffer_size: 8192 + +## @param dogstatsd_non_local_traffic - boolean - optional - default: false +## @env DD_DOGSTATSD_NON_LOCAL_TRAFFIC - boolean - optional - default: false +## Set to true to make DogStatsD listen to non local UDP traffic. +# +# dogstatsd_non_local_traffic: false + +## @param dogstatsd_stats_enable - boolean - optional - default: false +## @env DD_DOGSTATSD_STATS_ENABLE - boolean - optional - default: false +## Publish DogStatsD's internal stats as Go expvars. +# +# dogstatsd_stats_enable: false + +## @param dogstatsd_logging_enabled - boolean - optional - default: true +## Set to true to write DogstatsD metrics received by the Agent to dogstats_stats log files. +## Requires `dogstatsd_stats_enable: true`. +# +# dogstatsd_logging_enabled: true + +## @param dogstatsd_log_file_max_size - custom - optional - default: 10MB +## Maximum size of dogstatsd log file. Use either a size (for example, 10MB) or +## provide value in bytes (for example, 10485760.) +# +# dogstatsd_log_file_max_size: 10MB + +## @param dogstatsd_queue_size - integer - optional - default: 1024 +## @env DD_DOGSTATSD_QUEUE_SIZE - integer - optional - default: 1024 +## Configure the internal queue size of the Dogstatsd server. +## Reducing the size of this queue will reduce the maximum memory usage of the +## Dogstatsd server but as a trade-off, it could increase the number of packet drops. +# +# dogstatsd_queue_size: 1024 + +## @param dogstatsd_stats_buffer - integer - optional - default: 10 +## @env DD_DOGSTATSD_STATS_BUFFER - integer - optional - default: 10 +## Set how many items should be in the DogStatsD's stats circular buffer. +# +# dogstatsd_stats_buffer: 10 + +## @param dogstatsd_stats_port - integer - optional - default: 5000 +## @env DD_DOGSTATSD_STATS_PORT - integer - optional - default: 5000 +## The port for the go_expvar server. +# +# dogstatsd_stats_port: 5000 + +## @param dogstatsd_so_rcvbuf - integer - optional - default: 0 +## @env DD_DOGSTATSD_SO_RCVBUF - integer - optional - default: 0 +## The number of bytes allocated to DogStatsD's socket receive buffer (POSIX system only). +## By default, the system sets this value. If you need to increase the size of this buffer +## but keep the OS default value the same, you can set DogStatsD's receive buffer size here. +## The maximum accepted value might change depending on the OS. +# +# dogstatsd_so_rcvbuf: 0 + +## @param dogstatsd_metrics_stats_enable - boolean - optional - default: false +## @env DD_DOGSTATSD_METRICS_STATS_ENABLE - boolean - optional - default: false +## Set this parameter to true to have DogStatsD collects basic statistics (count/last seen) +## about the metrics it processed. Use the Agent command "dogstatsd-stats" to visualize +## those statistics. +# +# dogstatsd_metrics_stats_enable: false + +## @param dogstatsd_tags - list of key:value elements - optional +## @env DD_DOGSTATSD_TAGS - list of key:value elements - optional +## Additional tags to append to all metrics, events and service checks received by +## this DogStatsD server. +# +# dogstatsd_tags: +# - : +# +## @param dogstatsd_mapper_profiles - list of custom object - optional +## @env DD_DOGSTATSD_MAPPER_PROFILES - list of custom object - optional +## The profiles will be used to convert parts of metrics names into tags. +## If a profile prefix is matched, other profiles won't be tried even if that profile matching rules doesn't match. +## The profiles and matching rules are processed in the order defined in this configuration. +## +## For each profile, following fields are available: +## name (required): profile name +## prefix (required): mapping only applies to metrics with the prefix. If set to `*`, it will match everything. +## mappings: mapping rules, see below. +## For each mapping, following fields are available: +## match (required): pattern for matching the incoming metric name e.g. `test.job.duration.*` +## match_type (optional): pattern type can be `wildcard` (default) or `regex` e.g. `test\.job\.(\w+)\.(.*)` +## name (required): the metric name the metric should be mapped to e.g. `test.job.duration` +## tags (optional): list of key:value pair of tag key and tag value +## The value can use $1, $2, etc, that will be replaced by the corresponding element capture by `match` pattern +## This alternative syntax can also be used: ${1}, ${2}, etc +# +# dogstatsd_mapper_profiles: +# - name: # e.g. "airflow", "consul", "some_database" +# prefix: # e.g. "airflow.", "consul.", "some_database." +# mappings: +# - match: # e.g. `test.job.duration.*` to match `test.job.duration.my_job_name` +# match_type: # e.g. `wildcard` or `regex` +# name: # e.g. `test.job.duration` +# tags: +# : # e.g. `job_name: "$1"`, $1 is replaced by value capture by * +# - match: 'test.worker.*.*.start_time' # to match `test.worker...start_time` +# name: 'test.worker.start_time' +# tags: +# worker_type: '$1' +# worker_name: '$2' +# - match: 'test\.task\.duration\.(\w+)\.(.*)' # no need to escape in yaml context using single quote +# match_type: regex +# name: 'test.task' +# tags: +# task_type: '$1' +# task_name: '$2' + +## @param dogstatsd_mapper_cache_size - integer - optional - default: 1000 +## @env DD_DOGSTATSD_MAPPER_CACHE_SIZE - integer - optional - default: 1000 +## Size of the cache (max number of mapping results) used by Dogstatsd mapping feature. +# +# dogstatsd_mapper_cache_size: 1000 + +## @param dogstatsd_entity_id_precedence - boolean - optional - default: false +## @env DD_DOGSTATSD_ENTITY_ID_PRECEDENCE - boolean - optional - default: false +## Disable enriching Dogstatsd metrics with tags from "origin detection" when Entity-ID is set. +# +# dogstatsd_entity_id_precedence: false + + +## @param dogstatsd_no_aggregation_pipeline - boolean - optional - default: true +## @env DD_DOGSTATSD_NO_AGGREGATION_PIPELINE - boolean - optional - default: true +## Enable the no-aggregation pipeline in DogStatsD: a pipeline receiving metrics +## with timestamp and forwarding them to the intake without extra processing except +## for tagging. +# +# dogstatsd_no_aggregation_pipeline: true + +## @param dogstatsd_no_aggregation_pipeline_batch_size - integer - optional - default: 2048 +## @env DD_DOGSTATSD_NO_AGGREGATION_PIPELINE_BATCH_SIZE - integer - optional - default: 2048 +## How many metrics maximum in payloads sent by the no-aggregation pipeline to the intake. +# +# dogstatsd_no_aggregation_pipeline_batch_size: 2048 + +## @param statsd_forward_host - string - optional - default: "" +## @env DD_STATSD_FORWARD_HOST - string - optional - default: "" +## Forward every packet received by the DogStatsD server to another statsd server. +## WARNING: Make sure that forwarded packets are regular statsd packets and not "DogStatsD" packets, +## as your other statsd server might not be able to handle them. +# +# statsd_forward_host: "" + +## @param statsd_forward_port - integer - optional - default: 0 +## @env DD_STATSD_FORWARD_PORT - integer - optional - default: 0 +## Port or the "statsd_forward_host" to forward StatsD packet to. +# +# statsd_forward_port: 0 + +## @param statsd_metric_namespace - string - optional - default: "" +## @env DD_STATSD_METRIC_NAMESPACE - string - optional - default: "" +## Set a namespace for all StatsD metrics coming from this host. +## Each metric received is prefixed with the namespace before it's sent to Datadog. +# +# statsd_metric_namespace: "" + +{{ end -}} +{{- if .Metadata }} + +## @param metadata_providers - list of custom object - optional +## @env DD_METADATA_PROVIDERS - list of custom object - optional +## Metadata providers, add or remove from the list to enable or disable collection. +## Intervals are expressed in seconds. You can also set a provider's interval to 0 +## to disable it. +# +# metadata_providers: +# - name: k8s +# interval: 60 + +{{ end -}} +{{- if .JMX }} + +####################### +## JMX Configuration ## +####################### + +## @param jmx_custom_jars - list of strings - optional +## @env DD_JMX_CUSTOM_JARS - space separated list of strings - optional +## If you only run Autodiscovery tests, jmxfetch might fail to pick up custom_jar_paths +## set in the check templates. If that is the case, force custom jars here. +# +# jmx_custom_jars: +# - /jmx-jars/jboss-cli-client.jar + +## @param jmx_use_cgroup_memory_limit - boolean - optional - default: false +## @env DD_JMX_USE_CGROUP_MEMORY_LIMIT - boolean - optional - default: false +## When running in a memory cgroup, openjdk 8u131 and higher can automatically adjust +## its heap memory usage in accordance to the cgroup/container's memory limit. +## The Agent set a Xmx of 200MB if none is configured. +## Note: OpenJDK version < 8u131 or >= 10 as well as other JVMs might fail +## to start if this option is set. +# +# jmx_use_cgroup_memory_limit: false + +## @param jmx_use_container_support - boolean - optional - default: false +## @env DD_JMX_USE_CONTAINER_SUPPORT - boolean - optional - default: false +## When running in a container, openjdk 10 and higher can automatically detect +## container specific configuration instead of querying the operating system +## to adjust resources allotted to the JVM. +## Note: openjdk versions prior to 10 and other JVMs might fail to start if +## this option is set. +# +# jmx_use_container_support: false + +## @param jmx_max_ram_percentage - float - optional - default: 25.0 +## @env DD_JMX_MAX_RAM_PERCENTAGE - float - optional - default: 25.0 +## When running in a container with jmx_use_container_support enabled, the JVM can +## automatically declare the maximum heap size based off of a percentage of +## total container allocated memory. This option is overwritten if +## you use -Xmx to manually define the size of the heap. This option applies +## to containers with a total memory limit greater than ~250mb. If +## jmx_use_container_support is disabled this option has no effect. +# +# jmx_max_ram_percentage: 25.0 + +## @param jmx_log_file - string - optional +## @env DD_JMX_LOG_FILE - string - optional +## Path of the log file where JMXFetch logs are written. +# +# jmx_log_file: + +## @param jmx_max_restarts - integer - optional - default: 3 +## @env DD_JMX_MAX_RESTARTS - integer - optional - default: 3 +## Number of JMX restarts allowed in the restart-interval before giving up. +# +# jmx_max_restarts: 3 + +## @param jmx_restart_interval - integer - optional - default: 5 +## @env DD_JMX_RESTART_INTERVAL - integer - optional - default: 5 +## Duration of the restart interval in seconds. +# +# jmx_restart_interval: 5 + +## @param jmx_check_period - integer - optional - default: 15000 +## @env DD_JMX_CHECK_PERIOD - integer - optional - default: 15000 +## Duration of the period for check collections in milliseconds. +# +# jmx_check_period: 15000 + +## @param jmx_thread_pool_size - integer - optional - default: 3 +## @env DD_JMX_THREAD_POOL_SIZE - integer - optional - default: 3 +## JMXFetch collects multiples instances concurrently. Defines the maximum level of concurrency: +## * Higher concurrency increases CPU utilization during metric collection. +## * Lower concurrency results in lower CPU usage but may increase the total collection time. +## A value of 1 processes instances serially. +# +# jmx_thread_pool_size: 3 + +## @param jmx_collection_timeout - integer - optional - default: 60 +## @env DD_JMX_COLLECTION_TIMEOUT - integer - optional - default: 60 +## Defines the maximum waiting period in seconds before timing up on metric collection. +# +# jmx_collection_timeout: 60 + +## @param jmx_reconnection_thread_pool_size - integer - optional - default: 3 +## @env DD_JMX_RECONNECTION_THREAD_POOL_SIZE - integer - optional - default: 3 +## JMXFetch reconnects to multiples instances concurrently. Defines the maximum level of concurrency: +## * Higher concurrency increases CPU utilization during reconnection. +## * Lower concurrency results in lower CPU usage but may increase the total reconnection time +## A value of 1 processes instance reconnections serially. +# +# jmx_reconnection_thread_pool_size: 3 + +## @param jmx_reconnection_timeout - integer - optional - default: 60 +## @env DD_JMX_RECONNECTION_TIMEOUT - integer - optional - default: 60 +## Determines the maximum waiting period in seconds before timing up on instance reconnection. +# +# jmx_reconnection_timeout: 60 + +## @param jmx_statsd_telemetry_enabled - boolean - optional - default: false +## @env DD_JMX_STATSD_TELEMETRY_ENABLED - boolean - optional - default: false +## Specifies whether the JMXFetch statsd client telemetry is enabled. +# +# jmx_statsd_telemetry_enabled: false + +## @param jmx_telemetry_enabled - boolean - optional - default: false +## @env DD_JMX_TELEMETRY_ENABLED - boolean - optional - default: false +## Specifies whether additional JMXFetch telemetry is enabled. +# +# jmx_telemetry_enabled: false + +{{ end -}} +{{- if .Logging }} + +########################### +## Logging Configuration ## +########################### + +## @param log_level - string - optional - default: info +## @env DD_LOG_LEVEL - string - optional - default: info +## Minimum log level of the Datadog Agent. +## Valid log levels are: trace, debug, info, warn, error, critical, and off. +## Note: When using the 'off' log level, quotes are mandatory. +# +# log_level: 'info' + +## @param log_file - string - optional +## @env DD_LOG_FILE - string - optional +## Path of the log file for the Datadog Agent. +## See https://docs.datadoghq.com/agent/guide/agent-log-files/ +# +# log_file: + +## @param log_format_json - boolean - optional - default: false +## @env DD_LOG_FORMAT_JSON - boolean - optional - default: false +## Set to 'true' to output Agent logs in JSON format. +# +# log_format_json: false + +## @param log_to_console - boolean - optional - default: true +## @env DD_LOG_TO_CONSOLE - boolean - optional - default: true +## Set to 'false' to disable Agent logging to stdout. +# +# log_to_console: true + +## @param disable_file_logging - boolean - optional - default: false +## @env DD_DISABLE_FILE_LOGGING - boolean - optional - default: false +## Set to 'true' to disable logging to the log file. +# +# disable_file_logging: false + +## @param log_file_max_size - custom - optional - default: 10MB +## @env DD_LOG_FILE_MAX_SIZE - custom - optional - default: 10MB +## Maximum size of one log file. Use either a size (e.g. 10MB) or +## provide value in bytes: 10485760 +# +# log_file_max_size: 10MB + +## @param log_file_max_rolls - integer - optional - default: 1 +## @env DD_LOG_FILE_MAX_ROLLS - integer - optional - default: 1 +## Maximum amount of "old" log files to keep. +## Set to 0 to not limit the number of files to create. +# +# log_file_max_rolls: 1 + +## @param log_to_syslog - boolean - optional - default: false +## @env DD_LOG_TO_SYSLOG - boolean - optional - default: false +## Set to 'true' to enable logging to syslog. +## Note: Even if this option is set to 'false', the service launcher of your environment +## may redirect the Agent process' stdout/stderr to syslog. In that case, if you wish +## to disable logging to syslog entirely, set 'log_to_console' to 'false' as well. +# +# log_to_syslog: false + +## @param syslog_uri - string - optional +## @env DD_SYSLOG_URI - string - optional +## Define a custom remote syslog uri if needed. If 'syslog_uri' is left undefined/empty, +## a local domain socket connection is attempted. +# +# syslog_uri: + +## @param syslog_rfc - boolean - optional - default: false +## @env DD_SYSLOG_RFC - boolean - optional - default: false +## Set to 'true' to output in an RFC 5424-compliant format for Agent logs. +# +# syslog_rfc: false + +## @param syslog_pem - string - optional +## @env DD_SYSLOG_PEM - string - optional +## If TLS enabled, you must specify a path to a PEM certificate here. +# +# syslog_pem: + +## @param syslog_key - string - optional +## @env DD_SYSLOG_KEY - string - optional +## If TLS enabled, you must specify a path to a private key here. +# +# syslog_key: + +## @param syslog_tls_verify - boolean - optional - default: true +## @env DD_SYSLOG_TLS_VERIFY - boolean - optional - default: true +## If TLS enabled, you may enforce TLS verification here. +# +# syslog_tls_verify: true + +## @param log_format_rfc3339 - boolean - optional - default false +## @env DD_LOG_FORMAT_RFC3339 - boolean - optional - default false +## If enabled the Agent will log using the RFC3339 format for the log time. +# +# log_format_rfc3339: false + +## @param log_all_goroutines_when_unhealthy - boolean - optional - default false +## @env DD_LOG_ALL_GOROUTINES_WHEN_UNHEALTHY - boolean - optional - default false +## If enabled, when the health probe of an internal component fails, the stack traces +## of all the goroutines are logged. +# +# log_all_goroutines_when_unhealthy: false + +{{ end -}} +{{- if .Autoconfig }} + +############################## +## Autoconfig Configuration ## +############################## + +## @param autoconf_template_dir - string - optional - default: /datadog/check_configs +## @env DD_AUTOCONF_TEMPLATE_DIR - string - optional - default: /datadog/check_configs +## Directory containing configuration templates for Autoconfig. +# +# autoconf_template_dir: /datadog/check_configs + +## @param autoconf_config_files_poll - boolean - optional - default: false +## @env DD_AUTOCONF_CONFIG_FILES_POLL - boolean - optional - default: false +## Should the we check for new/updated integration configuration files on disk. +## WARNING: Only files containing checks configuration are supported (logs configuration are not supported). +# +# autoconf_config_files_poll: false + +## @param autoconf_config_files_poll_interval - integer - optional - default: 60 +## @env DD_AUTOCONF_CONFIG_FILES_POLL_INTERVAL - integer - optional - default: 60 +## How frequently should the Agent check for new/updated integration configuration files (in seconds). +## This value must be >= 1 (i.e. 1 second). +## WARNING: Only files containing checks configuration are supported (logs configuration are not supported). +# +# autoconf_config_files_poll_interval: 60 + +## @param config_providers - List of custom object - optional +## @env DD_CONFIG_PROVIDERS - List of custom object - optional +## The providers the Agent should call to collect checks configurations. Available providers are: +## * kubelet - The kubelet provider handles templates embedded in pod annotations. +## * docker - The Docker provider handles templates embedded in container labels. +## * clusterchecks - The clustercheck provider retrieves cluster-level check configurations from the cluster-agent. +## * kube_services - The kube_services provider watches Kubernetes services for cluster-checks +## +## See https://docs.datadoghq.com/guides/autodiscovery/ to learn more +# +# config_providers: +# - name: kubelet +# polling: true +# - name: docker +# polling: true +# - name: clusterchecks +# grace_time_seconds: 60 +{{ if .ClusterChecks }} +# - name: kube_services +# polling: true +{{ end -}} +# - name: etcd +# polling: true +# template_dir: /datadog/check_configs +# template_url: http://127.0.0.1 +# username: +# password: +# - name: consul +# polling: true +# template_dir: datadog/check_configs +# template_url: http://127.0.0.1 +# ca_file: +# ca_path: +# cert_file: +# key_file: +# username: +# password: +# token: +# - name: zookeeper +# polling: true +# template_dir: /datadog/check_configs +# template_url: 127.0.0.1 +# username: +# password: + +## @param extra_config_providers - list of strings - optional +## @env DD_EXTRA_CONFIG_PROVIDERS - space separated list of strings - optional +## Add additional config providers by name using their default settings, and pooling enabled. +## This list is available as an environment variable binding. +# +# extra_config_providers: +# - clusterchecks + +## @param autoconfig_exclude_features - list of comma separated strings - optional +## @env DD_AUTOCONFIG_EXCLUDE_FEATURES - list of space separated strings - optional +## Exclude features automatically detected and enabled by environment autodiscovery. +## Supported syntax is a list of `(:)`. Currently only the `name` attribute is supported. +## When no attribute is present, it defaults to `name:` attribute. +# +# autoconfig_exclude_features: +# - cloudfoundry +# - containerd +# - cri +# - docker +# - ecsec2 +# - ecsfargate +# - eksfargate +# - kubernetes +# - orchestratorexplorer +# - podman + +## @param autoconfig_include_features - list of comma separated strings - optional +## @env DD_AUTOCONFIG_INCLUDE_FEATURES - list of space separated strings - optional +## Force activation of features (as if they were discovered by environment autodiscovery). +# +# autoconfig_include_features: +# - cloudfoundry +# - containerd +# - cri +# - docker +# - ecsec2 +# - ecsfargate +# - eksfargate +# - kubernetes +# - orchestratorexplorer +# - podman + +{{ end -}} +{{- if .Autodiscovery }} + +########################################### +## Container Autodiscovery Configuration ## +########################################### + +## @param container_cgroup_root - string - optional - default: /host/sys/fs/cgroup/ +## @env DD_CONTAINER_CGROUP_ROOT - string - optional - default: /host/sys/fs/cgroup/ +## Change the root directory to look at to get cgroup statistics. +## Default if environment variable "DOCKER_DD_AGENT" is set to "/host/sys/fs/cgroup" +## and "/sys/fs/cgroup" if not. +# +# container_cgroup_root: /host/sys/fs/cgroup/ + +## @param container_proc_root - string - optional - default: /host/proc +## @env DD_CONTAINER_PROC_ROOT - string - optional - default: /host/proc +## Change the root directory to look at to get proc statistics. +## Default if environment variable "DOCKER_DD_AGENT" is set "/host/proc" and "/proc" if not. +# +# container_proc_root: /host/proc + +## @param listeners - list of key:value elements - optional +## @env DD_LISTENERS - list of key:value elements - optional +## Choose "auto" if you want to let the Agent find any relevant listener on your host +## At the moment, the only auto listener supported is Docker +## If you have already set Docker anywhere in the listeners, the auto listener is ignored +# +# listeners: +# - name: auto +# - name: docker + +## @param extra_listeners - list of strings - optional +## @env DD_EXTRA_LISTENERS - space separated list of strings - optional +## You can also add additional listeners by name using their default settings. +## This list is available as an environment variable binding. +# +# extra_listeners: +# - kubelet + +## @param ac_exclude - list of comma separated strings - optional +## @env DD_AC_EXCLUDE - list of space separated strings - optional +## Exclude containers from metrics and AD based on their name or image. +## If a container matches an exclude rule, it won't be included unless it first matches an include rule. +## An excluded container won't get any individual container metric reported for it. +## See: https://docs.datadoghq.com/agent/guide/autodiscovery-management/ +# +# ac_exclude: [] + +## @param ac_include - list of comma separated strings - optional +## @env DD_AC_INCLUDE - list of space separated strings - optional +## Include containers from metrics and AD based on their name or image: +## See: https://docs.datadoghq.com/agent/guide/autodiscovery-management/ +# +# ac_include: [] + +## @param exclude_pause_container - boolean - optional - default: true +## @env DD_EXCLUDE_PAUSE_CONTAINER - boolean - optional - default: true +## Exclude default pause containers from orchestrators. +## By default the Agent doesn't monitor kubernetes/openshift pause container. +## They are still counted in the container count (just like excluded containers). +# +# exclude_pause_container: true + +## @param docker_query_timeout - integer - optional - default: 5 +## @env DD_DOCKER_QUERY_TIMEOUT - integer - optional - default: 5 +## Set the default timeout value when connecting to the Docker daemon. +# +# docker_query_timeout: 5 + +## @param ad_config_poll_interval - integer - optional - default: 10 +## @env DD_AD_CONFIG_POLL_INTERVAL - integer - optional - default: 10 +## The default interval in second to check for new autodiscovery configurations +## on all registered configuration providers. +# +# ad_config_poll_interval: 10 + +## @param cloud_foundry_garden - custom object - optional +## Settings for Cloudfoundry application container autodiscovery. +# +# cloud_foundry_garden: + + ## @param listen_network - string - optional - default: unix + ## @env DD_CLOUD_FOUNDRY_GARDEN_LISTEN_NETWORK - string - optional - default: unix + ## The network on which the garden API is listening. Possible values are `unix` or `tcp` + # + # listen_network: unix + + ## @param listen_address - string - optional - default: /var/vcap/data/garden/garden.sock + ## @env DD_CLOUD_FOUNDRY_GARDEN_LISTEN_ADDRESS - string - optional - default: /var/vcap/data/garden/garden.sock + ## The address on which the garden API is listening. + # + # listen_address: /var/vcap/data/garden/garden.sock + +## @param podman_db_path - string - optional - default: "" +## @env DD_PODMAN_DB_PATH - string - optional - default: "" +## Settings for Podman DB that Datadog Agent collects container metrics. +# +# podman_db_path: "" + +{{ end -}} +{{- if .ClusterAgent }} + +################################# +## Cluster Agent Configuration ## +################################# + +## @param cluster_agent - custom object - optional +## Settings for the Cluster Agent. +## See https://docs.datadoghq.com/agent/cluster_agent/ +# +# cluster_agent: + + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the Cluster Agent. + # + # enabled: false + + ## @param auth_token - string - optional - default: "" + ## Auth token used to make requests to the Kubernetes API server. + # + # auth_token: "" + + ## @param url - string - optional - default: "" + ## The Cluster Agent endpoint. There's no need to set it if "kubernetes_service_name" is set. + # + # url: "" + + ## @param kubernetes_service_name - string - optional - default: "datadog-cluster-agent" + ## Name of the Kubernetes service for the Cluster Agent. + # + # kubernetes_service_name: "datadog-cluster-agent" + + ## @param max_leader_connections - integer - optional - default: 100 + ## Maximum number of connections between a follower and a leader. + # + # max_leader_connections: 100 + + ## @param client_reconnect_period_seconds - integer - optional - default: 1200 + ## Set the refersh period for Agent to Cluster Agent connection (new connection is created, old connection is closed). + ## Set to 0 to disable periodic reconnection. + # + # client_reconnect_period_seconds: 1200 + + ## @param tagging_fallback - boolean - optional - default: false + ## Set to true to enabled fallback to local metamapper when the connection with the Cluster Agent fails. + # + # tagging_fallback: false + + ## @param server - custom object - optional + ## Sets the connection timeouts + # + # server: + + ## @param read_timeout_seconds - integer - optional - default: 2 + ## Read timeout in seconds. + # + # read_timeout_seconds: 2 + + ## @param write_timeout_seconds - integer - optional - default: 2 + ## Write timeout in seconds. + # + # write_timeout_seconds: 2 + + ## @param idle_timeout_seconds - integer - optional - default: 60 + ## Idle timeout in seconds. + # + # idle_timeout_seconds: 60 + +{{ end -}} +{{- if .ClusterChecks }} + +################################# +## Cluster check Configuration ## +################################# + +## @param cluster_checks - custom object - optional +## Enter specific configurations for your cluster check. +## The cluster-agent is able to autodiscover cluster resources and dispatch checks on +## the node-agents (provided the clustercheck config provider is enabled on them). +## Uncomment this parameter and the one below to enable them. +## See https://docs.datadoghq.com/agent/kubernetes/cluster/ +# +# cluster_checks: + + ## @param enabled - boolean - optional - default: false + ## @env DD_CLUSTER_CHECKS_ENABLED - boolean - optional - default: false + ## Set to true to enable the dispatching logic on the leader cluster-agent. + # + # enabled: false + + ## @param node_expiration_timeout - integer - optional - default: 30 + ## @env DD_CLUSTER_CHECKS_NODE_EXPIRATION_TIMEOUT - integer - optional - default: 30 + ## Set "node_expiration_timeout" time in second after which Node-agents that have not + ## queried the cluster-agent are deleted, and their checks re-dispatched to other nodes. + # + # node_expiration_timeout: 30 + + ## @param warmup_duration - integer - optional - default: 30 + ## @env DD_CLUSTER_CHECKS_WARMUP_DURATION - integer - optional - default: 30 + ## Set the "warmup_duration" duration in second for the cluster-agent to wait for all + ## node-agents to report to it before dispatching configurations. + # + # warmup_duration: 30 + + ## @param cluster_tag_name - string - optional - default: cluster_name + ## @env DD_CLUSTER_CHECKS_CLUSTER_TAG_NAME - string - optional - default: cluster_name + ## If a cluster_name value is set or autodetected, a "" tag is added + ## to all cluster-check configurations sent to the node-agents. + ## Set a custom tag name here, or disable it by setting an empty name. + # + # cluster_tag_name: cluster_name + + ## @param extra_tags - list of key:value elements - optional + ## @env DD_CLUSTER_CHECKS_EXTRA_TAGS - list of key:value elements - optional + ## Set a list of additionnal tags can to be added to every cluster-check configuration. + # + # extra_tags: + # - : + + ## @param advanced_dispatching_enabled - boolean - optional - default: false + ## @env DD_CLUSTER_CHECKS_ADVANCED_DISPATCHING_ENABLED - boolean - optional - default: false + ## If advanced_dispatching_enabled is true the leader cluster-agent collects stats + ## from the cluster level check runners to optimize the check dispatching logic. + # + # advanced_dispatching_enabled: false + + ## @param clc_runners_port - integer - optional - default: 5005 + ## @env DD_CLUSTER_CHECKS_CLC_RUNNERS_PORT - integer - optional - default: 5005 + ## Set the "clc_runners_port" used by the cluster-agent client to reach cluster level + ## check runners and collect their stats. + # + # clc_runners_port: 5005 + +{{ end -}} +{{- if .AdmissionController }} + +######################################## +## Admission controller Configuration ## +######################################## + +## @param admission_controller - custom object - optional +## Enter specific configurations for your admission controller. +## The Datadog admission controller is a component of the Datadog Cluster Agent. +## It has two main functionalities: +## Inject environment variables (DD_AGENT_HOST and DD_ENTITY_ID) to configure DogStatsD and APM tracer libraries into your application containers. +## Inject Datadog reserved tags (env, service, version) from application labels into the container environment variables. +## Uncomment this parameter and the one below to enable it. +## See https://docs.datadoghq.com/agent/cluster_agent/admission_controller/ +# +# admission_controller: + + ## @param enabled - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_ENABLED - boolean - optional - default: false + ## Set to true to enable the admission controller in the cluster-agent. + # + # enabled: false + + ## @param validation - custom object - optional + ## The admission controller's validation configuration. + # + # validation: + + ## @param enabled - boolean - optional - default: true + ## @env DD_ADMISSION_CONTROLLER_VALIDATION_ENABLED - boolean - optional - default: true + ## Set to true to enable validation webhooks controller in the cluster-agent. + # + # enabled: true + + ## @param mutation - custom object - optional + ## The admission controller's mutation configuration. + # + # mutation: + + ## @param enabled - boolean - optional - default: true + ## @env DD_ADMISSION_CONTROLLER_MUTATION_ENABLED - boolean - optional - default: true + ## Set to true to enable mutation webhooks controller in the cluster-agent. + # + # enabled: true + + ## @param mutate_unlabelled - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_MUTATE_ENABLED - boolean - optional - default: false + ## Enable injecting config without having the pod label admission.datadoghq.com/enabled="true". + # + # mutate_unlabelled: false + + ## @param port - integer - optional - default: 8000 + ## @env DD_ADMISSION_CONTROLLER_PORT - integer - optional - default: 8000 + ## The admission controller server port. + # + # port: 8000 + + ## @param timeout_seconds - integer - optional - default: 10 + ## @env DD_ADMISSION_CONTROLLER_TIMEOUT_SECONDS - integer - optional - default: 10 + ## The admission controller server timeout in seconds. + # + # timeout_seconds: 10 + + ## @param service_name - string - optional - default: datadog-admission-controller + ## @env DD_ADMISSION_CONTROLLER_SERVICE_NAME - string - optional - default: datadog-admission-controller + ## The name of the Kubernetes service that exposes the admission controller. + # + # service_name: datadog-admission-controller + + ## @param webhook_name - string - optional - default: datadog-webhook + ## @env DD_ADMISSION_CONTROLLER_WEBHOOK_NAME - string - optional - default: datadog-webhook + ## The name of the Kubernetes webhook object. + # + # webhook_name: datadog-webhook + + ## @param pod_owners_cache_validity - integer - optional - default: 10 + ## @env DD_ADMISSION_CONTROLLER_POD_OWNERS_CACHE_VALIDITY - integer - optional - default: pod_owners_cache_validity + ## The in-memory cache TTL for pod owners in minutes. + # + # pod_owners_cache_validity: 10 + + ## @param namespace_selector_fallback - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_NAMESPACE_SELECTOR_FALLBACK - boolean - optional - default: false + ## Use namespace selectors instead of object selectors to watch objects. + ## For Kubernetes versions from 1.10 to 1.14 (inclusive) + # + # namespace_selector_fallback: false + + ## @param certificate - custom object - optional + ## The webhook's certificate configuration. + # + # certificate: + + ## @param validity_bound - integer - optional - default: 8760 + ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_VALIDITY_BOUND - integer - optional - default: 8760 + ## The certificate's validity bound in hours, default 1 year (365*24). + # + # validity_bound: 8760 + + ## @param expiration_threshold - integer - optional - default: 720 + ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_EXPIRATION_THRESHOLD - integer - optional - default: 720 + ## The certificate's refresh threshold in hours, default 1 month (30*24). + # + # expiration_threshold: 720 + + ## @param secret_name - string - optional - default: webhook-certificate + ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_SECRET_NAME - string - optional - default: webhook-certificate + ## Name of the Secret object containing the webhook certificate. + # + # secret_name: webhook-certificate + + ## @param inject_config - custom object - optional + ## Configuration injection parameters. + # + # inject_config: + + ## @param enabled - boolean - optional - default: true + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENABLED - boolean - optional - default: true + ## Enable configuration injection (configure DogStatsD and APM tracer libraries). + # + # enabled: true + + ## @param endpoint - string - optional - default: /injectconfig + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENDPOINT - string - optional - default: /injectconfig + ## Admission controller's endpoint responsible for handling configuration injection requests. + # + # endpoint: /injectconfig + + ## @param mode - string - optional - default: hostip + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE - string - optional - default: hostip + ## The kind of configuration to be injected, it can be "hostip", "service", or "socket". + # + # mode: hostip + + ## @param local_service_name - string - optional - default: datadog + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME - string - optional - default: datadog + ## Configure the local service name that exposes the Datadog Agent. Only applicable in "service" mode. + # + # local_service_name: datadog + + ## @param socket_path - string - optional - default: /var/run/datadog + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_SOCKET_PATH - string - optional - default: /var/run/datadog + ## Configure Datadog Agent's socket path. Only applicable in "socket" mode. + # + # socket_path: /var/run/datadog + + ## @param trace_agent_socket - string - optional - default: unix:///var/run/datadog/apm.socket + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TRACE_AGENT_SOCKET - string - optional - default: unix:///var/run/datadog/apm.socket + ## Configure Trace Agent's socket path in the app container (DD_TRACE_AGENT_URL). + ## Only applicable in "socket" mode. + # + # trace_agent_socket: unix:///var/run/datadog/apm.socket + + ## @param type_socket_volumes - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TYPE_SOCKET_VOLUMES - boolean - optional - default: false + ## When enabled, injected volumes are of type "Socket". This means that + ## injected pods will not start until the Agent creates the dogstatsd and + ## trace-agent sockets. This ensures no lost traces or dogstatsd metrics but + ## can cause the pod to wait if the agent has issues creating the sockets. + # + # type_socket_volumes: false + + ## @param inject_tags - custom object - optional + ## Tags injection parameters. + # + # inject_tags: + + ## @param enabled - boolean - optional - default: true + ## @env DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENABLED - boolean - optional - default: true + ## Enable standard tags injection. + # + # enabled: true + + ## @param endpoint - string - optional - default: /injecttags + ## @env DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENDPOINT - string - optional - default: /injecttags + ## Admission controller's endpoint responsible for handling tags injection requests. + # + # endpoint: /injecttags + + ## @param failure_policy - string - optional - default: Ignore + ## @env DD_ADMISSION_CONTROLLER_FAILURE_POLICY - string - optional - default: Ignore + ## Set the failure policy for dynamic admission control. + ## The default of Ignore means that pods will still be admitted even if the webhook is unavailable to inject them. + ## Setting to Fail will require the admission controller to be present and pods to be injected before they are allowed to run. + # + # failure_policy: Ignore + + ## @param reinvocation_policy - string - optional - default: IfNeeded + ## @env DD_ADMISSION_CONTROLLER_REINVOCATION_POLICY - string - optional - default: IfNeeded + ## Set the reinvocation policy for dynamic admission control. + ## See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy + # + # reinvocation_policy: IfNeeded + + ## @param add_aks_selectors - boolean - optional - default: false + ## @env DD_ADMISSION_CONTROLLER_ADD_AKS_SELECTORS - boolean - optional - default: false + ## Adds in the admission controller webhook the selectors that are required in AKS. + ## See https://docs.microsoft.com/en-us/azure/aks/faq#can-i-use-admission-controller-webhooks-on-aks + # + # add_aks_selectors: false + + ## @param auto_instrumentation - custom object - optional + ## Library injection parameters. + # + # auto_instrumentation: + + ## @param init_resources - custom object - optional + ## CPU and Memory resources of the init containers. + # + # init_resources: + + ## @param cpu - string - optional + ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_RESOURCES_CPU - string - optional + ## Configures the CPU request and limit for the init containers. + # + # cpu: + + ## @param memory - string - optional + ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_RESOURCES_MEMORY - string - optional + ## Configures the memory request and limit for the init containers. + # + # memory: + + ## @param init_security_context - json - optional + ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT - json - optional + ## Security context for the init containers in JSON format. Follows the Kubernetes security context spec, + ## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#securitycontext-v1-core, + ## ignores unknown properties. + # + # init_security_context: '{"privileged": false}' + # + # DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT='{"privileged": false}' +{{ end -}} +{{- if .DockerTagging }} + +######################### +## Container detection ## +######################### + +## @param container_cgroup_prefix - string - optional - default: /docker/ +## @env DD_CONTAINER_CGROUP_PREFIX - string - optional - default: /docker/ +## On hosts with mixed workloads, non-containernized processes can +## mistakenly be detected as containerized. Use this parameter to +## tune the detection logic to your system and avoid false-positives. +# +# container_cgroup_prefix: "/docker/" + +########################### +## Docker tag extraction ## +########################### + +## @param docker_labels_as_tags - map - optional +## @env DD_DOCKER_LABELS_AS_TAGS - json - optional +## The Agent can extract container label values and set them as metric tags values associated to a . +## If you prefix your tag name with `+`, it will only be added to high cardinality metrics (Docker check). +# +# docker_labels_as_tags: +# : +# : + +# +# DD_DOCKER_LABELS_AS_TAGS='{"LABEL_NAME":"tag_key"}' + +## @param docker_env_as_tags - map - optional +## @env DD_DOCKER_ENV_AS_TAGS - json - optional +## The Agent can extract environment variables values and set them as metric tags values associated to a . +## If you prefix your tag name with `+`, it will only be added to high cardinality metrics (Docker check). +# +# docker_env_as_tags: +# : +# +# DD_DOCKER_ENV_AS_TAGS='{"ENVVAR_NAME": "tag_key"}' + +{{ end -}} +{{- if .KubernetesTagging }} + +############################### +## Kubernetes tag extraction ## +############################### + +## @param kubernetes_pod_labels_as_tags - map - optional +## @env DD_KUBERNETES_POD_LABELS_AS_TAGS - json - optional +## The Agent can extract pod labels values and set them as metric tags values associated to a . +## If you prefix your tag name with +, it will only be added to high cardinality metrics. +# +# kubernetes_pod_labels_as_tags: +# : +# : + +# +# DD_KUBERNETES_POD_LABELS_AS_TAGS='{"LABEL_NAME":"tag_key"}' + +## @param kubernetes_pod_annotations_as_tags - map - optional +## @env DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS - json - optional +## The Agent can extract annotations values and set them as metric tags values associated to a . +## If you prefix your tag name with +, it will only be added to high cardinality metrics. +# +# kubernetes_pod_annotations_as_tags: +# : +# : + +# +# DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS='{"ANNOTATION_NAME":"tag_key"}' + +## @param kubernetes_namespace_labels_as_tags - map - optional +## @env DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS - json - optional +## The Agent can extract namespace label values and set them as metric tags values associated to a . +## If you prefix your tag name with +, it will only be added to high cardinality metrics. +# +# kubernetes_namespace_labels_as_tags: +# : +# : + +# +# DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS='{"": ""}' + +## @param container_env_as_tags - map - optional +## @env DD_CONTAINER_ENV_AS_TAGS - map - optional +## The Agent can extract environment variable values and set them as metric tags values associated to a . +## Requires the container runtime socket to be reachable. (Supported container runtimes: Containerd, Docker) +# +# container_env_as_tags: +# : + +## @param container_labels_as_tags - map - optional +## @env DD_CONTAINER_LABELS_AS_TAGS - map - optional +## The Agent can extract container label values and set them as metric tags values associated to a . +## If you prefix your tag name with `+`, it will only be added to high cardinality metrics. (Supported container +## runtimes: Containerd, Docker). +# +# container_labels_as_tags: +# : +# : + + +{{ end -}} +{{- if .ECS }} + +################################### +## ECS integration Configuration ## +################################### + +## @param ecs_agent_container_name - string - optional - default: ecs-agent +## @env DD_ECS_AGENT_CONTAINER_NAME - string - optional - default: ecs-agent +## The ECS Agent container should be autodetected when running with the +## default (ecs-agent) name. If not, change the container name here: +# +# ecs_agent_container_name: ecs-agent + +## @param ecs_agent_url - string - optional - default: http://localhost:51678 +## @env DD_ECS_AGENT_URL - string - optional - default: http://localhost:51678 +## The ECS Agent container should be autodetected when running with the +## default (ecs-agent) name. If not, change the container name the +## Agent should look for with ecs_agent_container_name, or force a fixed url here: +# +# ecs_agent_url: http://localhost:51678 + +## @param ecs_collect_resource_tags_ec2 - boolean - optional - default: false +## @env DD_ECS_COLLECT_RESOURCE_TAGS_EC2 - boolean - optional - default: false +## The Agent can collect resource tags from the metadata API exposed by the +## ECS Agent for tasks scheduled with the EC2 launch type. +# +# ecs_collect_resource_tags_ec2: false + +## @param ecs_resource_tags_replace_colon - boolean - optional - default: false +## @env DD_ECS_RESOURCE_TAGS_REPLACE_COLON - boolean - optional - default: false +## The Agent replaces colon `:` characters in the ECS resource tag keys by underscores `_`. +# +# ecs_resource_tags_replace_colon: false + +## @param ecs_metadata_timeout - integer - optional - default: 500 +## @env DD_ECS_METADATA_TIMEOUT - integer - optional - default: 500 +## Timeout in milliseconds on calls to the AWS ECS metadata endpoints. +# +# ecs_metadata_timeout: 500 + +## @param ecs_task_collection_enabled - boolean - optional - default: false +## @env DD_ECS_TASK_COLLECTION_ENABLED - boolean - optional - default: false +## The Agent can collect detailed task information from the metadata API exposed by the ECS Agent, +## which is used for the orchestrator ECS check. +# +# ecs_task_collection_enabled: false + +{{ end -}} +{{- if .CRI }} + +################################### +## CRI integration Configuration ## +################################### + +## @param cri_socket_path - string - optional - default: "" +## @env DD_CRI_SOCKET_PATH - string - optional - default: "" +## To activate the CRI check, indicate the path of the CRI socket you're using +## and mount it in the container if needed. +## If left empty, the CRI check is disabled. +## see: https://docs.datadoghq.com/integrations/cri/ +# +# cri_socket_path: "" + +## @param cri_connection_timeout - integer - optional - default: 1 +## @env DD_CRI_CONNECTION_TIMEOUT - integer - optional - default: 1 +## Configure the initial connection timeout in seconds. +# +# cri_connection_timeout: 1 + +## @param cri_query_timeout - integer - optional - default: 5 +## @env DD_CRI_QUERY_TIMEOUT - integer - optional - default: 5 +## Configure the timeout in seconds for querying the CRI. +# +# cri_query_timeout: 5 + +{{ end -}} +{{- if .Containerd}} + +########################################## +## Containerd integration Configuration ## +########################################## + +## @param cri_socket_path - string - optional - default: /var/run/containerd/containerd.sock +## @env DD_CRI_SOCKET_PATH - string - optional - default: /var/run/containerd/containerd.sock +## To activate the Containerd check, indicate the path of the Containerd socket you're using +## and mount it in the container if needed. +## see: https://docs.datadoghq.com/integrations/containerd/ +# +# cri_socket_path: /var/run/containerd/containerd.sock + +## @param cri_query_timeout - integer - optional - default: 5 +## @env DD_CRI_QUERY_TIMEOUT - integer - optional - default: 5 +## Configure the timeout in seconds for querying the Containerd API. +# +# cri_query_timeout: 5 + +## Deprecated - use `containerd_namespaces` instead +## @param containerd_namespace - list of strings - optional - default: [] +## @env DD_CONTAINERD_NAMESPACE - space separated list of strings - optional - default: [] +## Activating the Containerd check also activates the CRI check, as it contains an additional subset of useful metrics. +## Defaults to [] which configures the agent to report metrics and events from all the containerd namespaces. +## To watch specific namespaces, list them here. +## https://github.com/containerd/cri/blob/release/1.2/pkg/constants/constants.go#L22-L23 +# +# containerd_namespace: +# - k8s.io + +## @param containerd_namespaces - list of strings - optional - default: [] +## @env DD_CONTAINERD_NAMESPACES - space separated list of strings - optional - default: [] +## Activating the Containerd check also activates the CRI check, as it contains an additional subset of useful metrics. +## Defaults to [] which configures the agent to report metrics and events from all the containerd namespaces. +## containerd_namespaces acts as an alias for containerd_namespace. When both containerd_namespaces and containerd_namespace +## are configured, the Agent merges the two lists. +# +# containerd_namespaces: +# - k8s.io +# +## @param containerd_exclude_namespaces - list of strings - optional - default: ["moby"] +## @env DD_CONTAINERD_EXCLUDE_NAMESPACES - space separated list of strings - optional - default: ["moby"] +## When containerd_namespaces is set to [], containerd_exclude_namespaces +## allows the exclusion of containers from specific namespaces. By default it +## excludes "moby", to prevent Docker containers from being detected as +## containerd containers. +# +# containerd_exclude_namespaces: +# - moby + +{{ end -}} +{{- if .Kubelet }} + +################################################### +## Kubernetes kubelet connectivity Configuration ## +################################################### + +## @param kubernetes_kubelet_host - string - optional +## @env DD_KUBERNETES_KUBELET_HOST - string - optional +## The kubelet host should be autodetected when running inside a pod. +## If you run into connectivity issues, set the host here according to your cluster setup. +# +# kubernetes_kubelet_host: + +## @param kubernetes_http_kubelet_port - integer - optional - default: 10255 +## @env DD_KUBERNETES_HTTP_KUBELET_PORT - integer - optional - default: 10255 +## The kubelet http port should be autodetected when running inside a pod. +## If you run into connectivity issues, set the http port here according to your cluster setup. +# +# kubernetes_http_kubelet_port: 10255 + +## @param kubernetes_https_kubelet_port - integer - optional - default: 10250 +## @env DD_KUBERNETES_HTTPS_KUBELET_PORT - integer - optional - default: 10250 +## The kubelet https port should be autodetected when running inside a pod. +## If you run into connectivity issues, set the https port here according to your cluster setup. +# +# kubernetes_https_kubelet_port: 10250 + +## @param kubelet_tls_verify - boolean - optional - default: true +## @env DD_KUBELET_TLS_VERIFY - boolean - optional - default: true +## Set to false if you don't want the Agent to verify the kubelet's certificate when using HTTPS. +# +# kubelet_tls_verify: true + +## @param kubelet_client_ca - string - optional - default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +## @env DD_KUBELET_CLIENT_CA - string - optional - default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +## Kublet client CA file path. +# +# kubelet_client_ca: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + +## @param kubelet_auth_token_path - string - optional +## @env DD_KUBELET_AUTH_TOKEN_PATH - string - optional +## If authentication is needed, the Agent uses the pod's service account's +## credentials. If you want to use a different account, or are running the Agent +## on the host, set a custom token file path here. +# +# kubelet_auth_token_path: + +## @param kubelet_client_crt - string - optional +## @env DD_KUBELET_CLIENT_CRT - string - optional +## Set a custom Client CRT file path. +# +# kubelet_client_crt: + +## @param kubelet_client_key - string - optional +## @env DD_KUBELET_CLIENT_KEY - string - optional +## Set a custom Client key file path. +# +# kubelet_client_key: + +## @param kubelet_wait_on_missing_container - integer - optional - default: 0 +## @env DD_KUBELET_WAIT_ON_MISSING_CONTAINER - integer - optional - default: 0 +## On some kubelet versions, containers can take up to a second to +## register in the podlist. This option allows to wait for up to a given +## number of seconds (in 250ms chunks) when a container does not exist in the podlist. +# +# kubelet_wait_on_missing_container: 0 + +## @param kubelet_cache_pods_duration - integer - optional - default: 5 +## @env DD_KUBELET_CACHE_PODS_DURATION - integer - optional - default: 5 +## Polling frequency in seconds of the Agent to the kubelet "/pods" endpoint. +# +# kubelet_cache_pods_duration: 5 + +## @param kubernetes_pod_expiration_duration - integer - optional - default: 900 +## @env DD_KUBERNETES_POD_EXPIRATION_DURATION - integer - optional - default: 900 +## Set the time in second after which the Agent ignores the pods that have exited. +## Set the duration to 0 to disable this filtering. +# +# kubernetes_pod_expiration_duration: 900 + +## @param kubelet_listener_polling_interval - integer - optional - default: 5 +## @env DD_KUBELET_LISTENER_POLLING_INTERVAL - integer - optional - default: 5 +## Polling frequency in seconds at which autodiscovery will query the pod watcher to detect new pods/containers. +## Note that kubelet_cache_pods_duration needs to be lower than this setting, or autodiscovery will only poll more frequently the same cached data (kubelet_cache_pods_duration controls the cache refresh frequency). +# +# kubelet_listener_polling_interval: 5 + +{{ end -}} +{{- if .KubeApiServer }} + +#################################################### +## Kubernetes apiserver integration Configuration ## +#################################################### + +## @param kubernetes_kubeconfig_path - string - optional - default: "" +## @env DD_KUBERNETES_KUBECONFIG_PATH - string - optional - default: "" +## When running in a pod, the Agent automatically uses the pod's service account +## to authenticate with the API server. +## Provide the path to a custom KubeConfig file if you wish to install the Agent out of a pod +## or customize connection parameters. +## See https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/ +# +# kubernetes_kubeconfig_path: "" + +## @param kubernetes_apiserver_ca_path - string - optional - default: "" +## @env DD_KUBERNETES_APISERVER_CA_PATH - string - optional - default: "" +## When running in a pod, the Agent automatically uses the pod's service account CA. +## Use this option to keep using the InCluster config but overriding the default CA Path. +## This parameter has no effect if `kubernetes_kubeconfig_path` is set. +# +# kubernetes_apiserver_ca_path: "" + +## @param kubernetes_apiserver_tls_verify - boolean - optional - default: true +## @env DD_KUBERNETES_APISERVER_TLS_VERIFY - boolean - optional - default: true +## When running in a pod, the Agent automatically uses the pod's service account CA. +## Use this option to keep using the InCluster config but deactivating TLS verification (in case APIServer CA is not ServiceAccount CA) +## This parameter has no effect if `kubernetes_kubeconfig_path` is set. +# +# kubernetes_apiserver_tls_verify: true + +## @param kubernetes_apiserver_use_protobuf - boolean - optional - default: false +## @env DD_KUBERNETES_APISERVER_USE_PROTOBUF - boolean - optional - default: false +## By default, communication with the apiserver is in json format. Setting the following +## option to true allows communication in the binary protobuf format. +# +# kubernetes_apiserver_use_protobuf: false + +## @param kubernetes_collect_metadata_tags - boolean - optional - default: true +## @env DD_KUBERNETES_COLLECT_METADATA_TAGS - boolean - optional - default: true +## Set this to false to disable tag collection for the Agent. +## Note: In order to collect Kubernetes service names, the Agent needs certain rights. +## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#kubernetes +# +# kubernetes_collect_metadata_tags: true + +## @param kubernetes_metadata_tag_update_freq - integer - optional - default: 60 +## @env DD_KUBERNETES_METADATA_TAG_UPDATE_FREQ - integer - optional - default: 60 +## Set how often in secons the Agent refreshes the internal mapping of services to ContainerIDs. +# +# kubernetes_metadata_tag_update_freq: 60 + +## @param kubernetes_apiserver_client_timeout - integer - optional - default: 10 +## @env DD_KUBERNETES_APISERVER_CLIENT_TIMEOUT - integer - optional - default: 10 +## Set the timeout for the Agent when connecting to the Kubernetes API server. +# +# kubernetes_apiserver_client_timeout: 10 + +## @param collect_kubernetes_events - boolean - optional - default: false +## @env DD_COLLECT_KUBERNETES_EVENTS - boolean - optional - default: false +## Set `collect_kubernetes_events` to true to enable collection of kubernetes +## events to be sent to Datadog. +## Note: leader election must be enabled below to collect events. +## Only the leader Agent collects events. +## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#event-collection +# +# collect_kubernetes_events: false + +## @param kubernetes_event_collection_timeout - integer - optional - default: 100 +## @env DD_KUBERNETES_EVENT_COLLECTION_TIMEOUT - integer - optional - default: 100 +## Set the timeout between two successful event collections in milliseconds. +# +# kubernetes_event_collection_timeout: 100 + +## @param leader_election - boolean - optional - default: false +## @env DD_LEADER_ELECTION - boolean - optional - default: false +## Set the parameter to true to enable leader election on this node. +## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#leader-election +# +# leader_election: false + +## @param leader_lease_duration - integer - optional - default: 60 +## @env DD_LEADER_LEASE_DURATION - integer - optional - default: 60 +## Set the leader election lease in seconds. +# +# leader_lease_duration: 60 + +## @param kubernetes_node_labels_as_tags - map - optional +## @env DD_KUBERNETES_NODE_LABELS_AS_TAGS - json - optional +## Configure node labels that should be collected and their name as host tags. +## Note: Some of these labels are redundant with metadata collected by cloud provider crawlers (AWS, GCE, Azure) +# +# kubernetes_node_labels_as_tags: +# kubernetes.io/hostname: nodename +# beta.kubernetes.io/os: os +# +# DD_KUBERNETES_NODE_LABELS_AS_TAGS='{"NODE_LABEL": "TAG_KEY"}' + +## @param kubernetes_node_annotations_as_tags - map - optional +## @env DD_KUBERNETES_NODE_ANNOTATIONS_AS_TAGS - json - optional +## Configure node annotationss that should be collected and their name as host tags. +# +# kubernetes_node_annotations_as_tags: +# cluster.k8s.io/machine: machine +# +# DD_KUBERNETES_NODE_ANNOTATIONS_AS_TAGS='{"NODE_ANNOTATION": "TAG_KEY"}' + +## @param kubernetes_node_annotations_as_host_aliases - list - optional +## @env DD_KUBERNETES_NODE_ANNOTATIONS_AS_HOST_ALIASES - list - optional +## Configure node annotations that should be collected and used as host aliases. +# +# kubernetes_node_annotations_as_host_aliases: +# - cluster.k8s.io/machine +# +# DD_KUBERNETES_NODE_ANNOTATIONS_AS_HOST_ALIASES='["cluster.k8s.io/machine"]' + +## @param cluster_name - string - optional +## @env DD_CLUSTER_NAME - string - optional +## Set a custom kubernetes cluster identifier to avoid host alias collisions. +## The cluster name can be up to 40 characters with the following restrictions: +## * Lowercase letters, numbers, and hyphens only. +## * Must start with a letter. +## * Must end with a number or a letter. +## +## These are the same rules as the ones enforced by GKE: +## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name +# +# cluster_name: + +## @param disable_cluster_name_tag_key - boolean - optional - default: false +## @env DD_DISABLE_CLUSTER_NAME_TAG_KEY - boolean - optional - default: false +## Disable using the 'cluster_name' tag key to submit orchestrator cluster name tag. +## The Agent will continue sending the cluster name tag with 'kube|ecs_cluster_name' key +## regardless of the value of this parameter. +# +# disable_cluster_name_tag_key: false + +## @param kubernetes_ad_tags_disabled -- list of strings - optional +## @env DD_KUBERNETES_AD_TAGS_DISABLED -- list of strings - optional +## Can only be set to a single valid value: [ "kube_service" ] +## in order to not attach the kube_service tag on ready pods +# +# kubernetes_ad_tags_disabled: +# - kube_service + +{{ end -}} +{{- if .PrometheusScrape }} +## @param prometheus_scrape - custom object - optional +## This section configures the Autodiscovery based on the Prometheus annotations +# +# prometheus: + + ## @param enabled - boolean - optional - default: false + ## Enables the prometheus config provider + # + # enabled: false + + ## @param service_endpoints - boolean - optional - default: false + ## Enables Service Endpoints checks in the prometheus config provider + # + # service_endpoints: false + + ## @param checks - custom object - optional + ## Defines any extra prometheus/openmetrics check configurations to be handled by the prometheus config provider + # + # checks: {} + + ## @param version - integer - optional - default: 1 + ## Version of the openmetrics check to be scheduled by the Prometheus auto-discovery + # + # version: 1 + +{{ end -}} +{{- if .CloudFoundryBBS }} +####################################################### +## Cloud Foundry BBS Configuration for Autodiscovery ## +####################################################### + +## @param cloud_foundry_bbs - custom object - optional +## This section configures how the Cluster Agent accesses BBS API to gather information +## necessary for autodiscovery on BBS-based Cloud Foundry deployments. +# +# cloud_foundry_bbs: + + ## @param url - string - optional - default: https://bbs.service.cf.internal:8889 + ## @env DD_CLOUD_FOUNDRY_BBS_URL - string - optional - default: https://bbs.service.cf.internal:8889 + ## URL of the BBS API. + # + # url: https://bbs.service.cf.internal:8889 + + ## @param poll_interval - integer - optional - default: 15 + ## @env DD_CLOUD_FOUNDRY_BBS_POLL_INTERVAL - integer - optional - default: 15 + ## Refresh rate of BBS API, in seconds. Values lower than 10 might influence + ## performance of other operations in the cluster. + # + # poll_interval: 15 + + ## @param ca_file - string - optional - default: "" + ## @env DD_CLOUD_FOUNDRY_BBS_CA_FILE - string - optional - default: "" + ## PEM-encoded CA certificate used when connecting to the BBS API. + # + # ca_file: "" + + ## @param cert_file - string - optional - default: "" + ## @env DD_CLOUD_FOUNDRY_BBS_CERT_FILE - string - optional - default: "" + ## PEM-encoded client certificate used when connecting to the BBS API. + # + # cert_file: "" + + ## @param key_file - string - optional - default: "" + ## @env DD_CLOUD_FOUNDRY_BBS_KEY_FILE - string - optional - default: "" + ## PEM-encoded client key used when connecting to the BBS API. + # + # key_file: "" + + ## @param env_include - list of strings - optional - default: [] + ## @env DD_CLOUD_FOUNDRY_BBS_ENV_INCLUDE - list of strings - optional + ## List of regular expressions to allow a set of environment variables to be included as container tags + # + # env_include: [] + ## @param env_exclude - list of strings - optional - default: [] + ## @env DD_CLOUD_FOUNDRY_BBS_ENV_EXCLUDE - list of strings - optional + ## List of regular expressions to forbid a set of environment variables to be included as container tags + # + # env_exclude: [] + +{{ end -}} +{{- if .CloudFoundryCC }} +#################################################################### +## Cloud Foundry Cloud Controller Configuration for Autodiscovery ## +#################################################################### + +## @param cloud_foundry_cc - custom object - optional +## This section configures how the Cluster Agent accesses CC API to gather information +## necessary for autodiscovery on Cloud Foundry deployments. +# +# cloud_foundry_cc: + + ## @param url - string - optional - default: https://cloud-controller-ng.service.cf.internal:9024 + ## @env DD_CLOUD_FOUNDRY_CC_URL - string - optional - default: https://cloud-controller-ng.service.cf.internal:9024 + ## URL of the CC API. + # + # url: https://cloud-controller-ng.service.cf.internal:9024 + + ## @param client_id - string - optional + ## @env DD_CLOUD_FOUNDRY_CC_CLIENT_ID + ## Client ID for oauth with UAA to get a token to access the CC API. + # + # client_id: + + ## @param client_secret - string - optional + ## @env DD_CLOUD_FOUNDRY_CC_CLIENT_SECRET + ## Client secrect for oauth with UAA to get a token to access the CC API. + # + # client_secret: + + ## @param skip_ssl_validation - boolean - optional - default: false + ## @env DD_CLOUD_FOUNDRY_CC_SKIP_SSL_VALIDATION + ## Whether or not to skip SSL validation when interacting with CC API. + # + # skip_ssl_validation: false + + ## @param poll_interval - integer - optional - default: 60 + ## @env DD_CLOUD_FOUNDRY_CC_POLL_INTERVAL + ## Refresh rate of CC API, in seconds. Values lower than 10 might influence + ## performance of other operations in the cluster. + # + # poll_interval: 60 + + ## @param apps_batch_size - integer - optional - default: 5000 + ## @env DD_CLOUD_FOUNDRY_CC_APPS_BATCH_SIZE + ## Number of apps per page to collect when calling the list apps endpoint of the CC API. Max 5000. + # + # apps_batch_size: 5000 + +{{ end -}} +{{- if .SNMP }} + +################################### +## Network Devices Configuration ## +################################### + +## @param network_devices - custom object - optional +## Configuration related to Network Devices Monitoring +# +# network_devices: + + ## @param namespace - string - optional - default: default + ## Namespace can be used to disambiguate devices with the same IP. + ## Changing namespace will cause devices being recreated in NDM app. + ## It should contain less than 100 characters and should not contain any of + ## `<`, `>`, `\n`, `\t`, `\r` characters. + ## This field is used by NDM features (SNMP check, SNMP Traps listener, etc). + # + # namespace: default + + ## @param autodiscovery - custom object - optional + ## Creates and schedules a listener to automatically discover your SNMP devices. + ## Discovered devices can then be monitored with the SNMP integration by using + ## the auto_conf.yaml file provided by default. + # + # autodiscovery: + + ## @param workers - integer - optional - default: 2 + ## The number of concurrent tasks used to discover SNMP devices. Increasing this value + ## discovers devices faster but at the cost of increased resource consumption. + # + # workers: 2 + + ## @param discovery_interval - integer - optional - default: 3600 + ## How often to discover new SNMP devices, in seconds. Decreasing this value + ## discovers devices faster (within the limit of the time taken to scan subnets) + ## but at the cost of increased resource consumption. + # + # discovery_interval: 3600 + + ## @param discovery_allowed_failures - integer - optional - default: 3 + ## The number of failed requests to a given SNMP device before removing it from the list of monitored + ## devices. + ## If a device shuts down, the Agent stops monitoring it after `discovery_interval * discovery_allowed_failures` seconds. + # + # discovery_allowed_failures: 3 + + ## @param loader - string - optional - default: python + ## Check loader to use. Available loaders: + ## - core: (recommended) Uses new corecheck SNMP integration + ## - python: Uses legacy python SNMP integration + # + # loader: core + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval for the check instances created + ## from discovered SNMP devices. + ## For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param use_device_id_as_hostname - boolean - optional - default: false + ## Use `device:` (device_id is composed of `:`) as `hostname` + ## for metrics and service checks (meaning that metrics and services checks will have + ## `host:device:` as tag). + ## This option is needed for custom tags. + # + # use_device_id_as_hostname: true + + ## @param collect_topology - boolean - optional - default: true + ## Enable the collection of topology (LLDP/CDP) data + # + # collect_topology: true + + ## @param ping - custom object - optional + ## Configure ICMP pings for all hosts in SNMP autodiscovery + ## Devices will be pinged with these settings each time the SNMP + ## check is run. + ## + ## By default, Datadog tries to use an unprivileged UDP socket to send ICMP + ## pings, but some Linux systems require using a raw socket. + ## + ## If `linux.use_raw_socket` is set, you must enable the `ping` module + ## of system-probe for elevated privileges. See + ## system-probe.yaml.example for details. + # + # ping: + # enabled: true # Disabled by default + # timeout: 3000 # Timeout in milliseconds + # count: 2 # Number of ping packets to send per check run + # interval: 10 # Time between sending pings (up to `count` packets) in milliseconds + # linux: # Linux-specific configuration + # use_raw_socket: true # Send pings in a privileged fashion using a raw socket. + # # This may be required if your system doesn't support + # # sending pings in an unprivileged fashion (using a UDP socket). + # # If `use_raw_socket` is set to true, you MUST also enable + # # system-probe which has elevated privileges. To enable it, see system-probe.yaml.example. + + ## @param configs - list - required + ## The actual list of configurations used to discover SNMP devices in various subnets. + ## Example: + ## configs: + ## - network_address: 10.0.0.0/24 + ## snmp_version: 1 + ## community_string: public + ## - network_address: 10.0.1.0/28 + ## community_string: public + ## ignored_ip_addresses: + ## - 10.0.1.0 + ## - 10.0.1.1 + # + # configs: + ## @param network_address - string - required + ## The subnet in CIDR format to scan for SNMP devices. + ## All unignored IP addresses in the CIDR range are scanned. + ## For optimal discovery time, be sure to use the smallest network mask + ## possible as is appropriate for your network topology. + ## Ex: 10.0.1.0/24 + # + # - network_address: + + ## @param ignored_ip_addresses - list of strings - optional + ## A list of IP addresses to ignore when scanning the network. + # + # ignored_ip_addresses: + # - + # - + + ## @param port - integer - optional - default: 161 + ## The UDP port to use when connecting to SNMP devices. + # + # port: 161 + + ## @param snmp_version - integer - optional - default: + ## Set the version of the SNMP protocol. Available options are: `1`, `2` or `3`. + ## If unset, the Agent tries to guess the correct version based on other configuration + ## parameters, for example: if `user` is set, the Agent uses SNMP v3. + # + # snmp_version: + + ## @param timeout - integer - optional - default: 5 + ## The number of seconds before timing out. + # + # timeout: 5 + + ## @param retries - integer - optional - default: 3 + ## The number of retries before failure. + # + # retries: 3 + + ## @param community_string - string - optional + ## Required for SNMP v1 & v2. + ## Enclose the community string with single quote like below (to avoid special characters being interpreted). + ## Ex: 'public' + # + # community_string: '' + + ## @param user - string - optional + ## The username to connect to your SNMP devices. + ## SNMPv3 only. + # + # user: + + ## @param authKey - string - optional + ## The passphrase to use with your Authentication type. + ## SNMPv3 only. + # + # authKey: + + ## @param authProtocol - string - optional + ## The authentication protocol to use when connecting to your SNMP devices. + ## Available options are: MD5, SHA, SHA224, SHA256, SHA384, SHA512 + ## Defaults to MD5 when `authentication_key` is specified. + ## SNMPv3 only. + # + # authProtocol: + + ## @param privKey - string - optional + ## The passphrase to use with your privacy protocol. + ## SNMPv3 only. + # + # privKey: + + ## @param privProtocol - string - optional + ## The privacy protocol to use when connecting to your SNMP devices. + ## Available options are: DES, AES (128 bits), AES192, AES192C, AES256, AES256C + ## Defaults to DES when `privacy_key` is specified. + ## SNMPv3 only. + # + # privProtocol: + + ## @param context_name - string - optional + ## The name of your context (optional SNMP v3-only parameter). + # + # context_name: + + ## @param tags - list of strings - optional + ## A list of tags to attach to every metric and service check of all devices discovered in the subnet. + ## + ## Learn more about tagging at https://docs.datadoghq.com/tagging + # + # tags: + # - : + # - : + + ## @param ad_identifier - string - optional - default: snmp + ## A unique identifier to attach to devices from that subnetwork. + ## When configuring the SNMP integration in snmp.d/auto_conf.yaml, + ## specify the corresponding ad_identifier at the top of the file. + # + # ad_identifier: snmp + + ## @param loader - string - optional - default: python + ## Check loader to use. Available loaders: + ## - core: will use corecheck SNMP integration + ## - python: will use python SNMP integration + # + # loader: core + + ## @param min_collection_interval - number - optional - default: 15 + ## This changes the collection interval for the check instances created from + ## discovered SNMP devices. It applies to each specific config from `snmp_listener.configs` + ## and has precedence over `snmp_listener.min_collection_interval`. + ## For more information, see: + ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval + # + # min_collection_interval: 15 + + ## @param use_device_id_as_hostname - boolean - optional - default: false + ## Use `device:` (device_id is composed of `:`) as `hostname` + ## for metrics and service checks (meaning that metrics and services checks will have + ## `host:device:` as tag). + ## This option is needed for custom tags. + # + # use_device_id_as_hostname: true + + ## @param oid_batch_size - integer - optional - default: 5 + ## The number of OIDs handled by each batch. + # + # oid_batch_size: 5 + + ## @param interface_configs - map - optional + ## This option is used to override interface inbound/outbound speed and add interface tags + ## Example: + ## interface_configs: + ## "10.0.0.1": # target device IP address + ## - match_field: "name" # (required) the field to match, can be `name` (interface name) or `index` (ifIndex) + ## match_value: "eth0" # (required) the value to match + ## in_speed: 50 # (optional) inbound speed value in bits per sec, no value or 0 means no override + ## out_speed: 25 # (optional) outbound speed value in bits per sec, no value or 0 means no override + ## tags: # (optional) interface level tags + ## - "testTagKey:testTagValue" + ## - "tagKey2:tagValue2" + # + # interface_config: + # "10.0.0.1": + # - match_field: name + # match_value: eth0 + # in_speed: 50 + # out_speed: 25 + # - match_field: index + # match_value: '10' + # in_speed: 50 + # out_speed: 25 + # "10.0.0.2": + # - match_field: name + # match_value: eth3 + # in_speed: 50 + # out_speed: 25 + # "10.0.0.3": + # - match_field: name + # match_value: eth4 + # tags: + # - "monitored:true" + # - "customKey:customValue" + + ## @param ping - custom object - optional + ## Configure ICMP pings for all hosts in SNMP autodiscovery + ## Devices will be pinged with these settings each time the SNMP + ## check is run. + ## + ## By default, Datadog tries to use an unprivileged UDP socket to send ICMP + ## pings, but some linux systems require using a raw socket. + ## + ## If `linux.use_raw_socket` is set, you must enable the `ping` module + ## of system-probe for elevated privileges. See + ## system-probe.yaml.example for details. + # + # ping: + # enabled: true # Disabled by default + # timeout: 3000 # Timeout in milliseconds + # count: 2 # Number of ping packets to send per check run + # interval: 10 # Time between sending pings (up to `count` packets) in milliseconds + # linux: # Linux-specific configuration + # use_raw_socket: true # Send pings in a privileged fashion using a raw socket. + # # This may be required if your system doesn't support + # # sending pings in an unprivileged fashion (using a UDP socket). + # # If `use_raw_socket` is set to true, you MUST also enable + # # system-probe which has elevated privileges. To enable it, see system-probe.yaml.example. + + + ## @param snmp_traps - custom object - optional + ## This section configures SNMP traps collection. + ## Traps are forwarded as logs and can be found in the logs explorer with a source:snmp-traps query + # + # snmp_traps: + + ## @param enabled - boolean - optional - default: false + ## Set to true to enable collection of traps. + # + # enabled: false + + ## @param port - integer - optional - default: 9162 + ## @env DD_SNMP_TRAPS_CONFIG_PORT - integer - optional - default: 9162 + ## The UDP port to use when listening for incoming trap packets. + ## Because the Datadog Agent does not run as root, the port cannot be below 1024. + ## However, if you run `sudo setcap 'cap_net_bind_service=+ep' /opt/datadog-agent/bin/agent/agent`, + ## the Datadog Agent can listen on ports below 1024. + # + # port: 9162 + + ## @param community_strings - list of strings - required + ## A list of known SNMP community strings that devices can use to send traps to the Agent. + ## Traps with an unknown community string are ignored. + ## Enclose the community string with single quote like below (to avoid special characters being interpreted). + ## Must be non-empty. + # + # community_strings: + # - '' + # - '' + + ## @param users - list of custom objects - optional + ## List of SNMPv3 users that can be used to listen for traps. + ## Each user can contain: + ## * user - string - The username used by devices when sending Traps to the Agent. + ## * authKey - string - (Optional) The passphrase to use with the given user and authProtocol + ## * authProtocol - string - (Optional) The authentication protocol to use when listening for traps from this user. + ## Available options are: MD5, SHA, SHA224, SHA256, SHA384, SHA512. + ## Defaults to MD5 when authKey is set. + ## * privKey - string - (Optional) The passphrase to use with the given user privacy protocol. + ## * privProtocol - string - (Optional) The privacy protocol to use when listening for traps from this user. + ## Available options are: DES, AES (128 bits), AES192, AES192C, AES256, AES256C. + ## Defaults to DES when privKey is set. + # + # users: + # - user: + # authKey: + # authProtocol: + # privKey: + # privProtocol: + + ## @param bind_host - string - optional + ## The hostname to listen on for incoming trap packets. + ## Binds to 0.0.0.0 by default (accepting all packets). + # + # bind_host: 0.0.0.0 + + ## stop_timeout - float - optional - default: 5.0 + ## The maximum number of seconds to wait for the trap server to stop when the Agent shuts down. + # + # stop_timeout: 5.0 + + ## @param netflow - custom object - optional + ## This section configures NDM NetFlow (and sFlow, IPFIX) collection. + # + # netflow: + + ## @param enabled - boolean - optional - default: false + ## Set to true to enable collection of NetFlow traffic. + # + # enabled: false + + ## @param listeners - custom object - optional + ## This section configures one or more listeners ports that will receive flow traffic. + ## Each listener have the following options: + ## * flow_type - string - The flow type correspond to the incoming flow protocol. + ## Choices are: netflow5, netflow9, ipfix, sflow5 + ## * port - string - (Optional) The port used to receive incoming flow traffic. + ## Default port differ by flow type: netflow5(2055), netflow9(2055), ipfix(4739), sflow5(6343) + ## * bind_host - string - (Optional) The hostname to listen on for incoming netflow packets. + ## Binds to 0.0.0.0 by default (accepting all packets). + ## * workers - string - (Optional) Number of workers to use for this listener. + ## Defaults to 1. + ## * mapping - (Optional) List of NetflowV9/IPFIX fields to additionally collect. + ## Defaults to None. + ## * field - integer - The Netflow field type ID to collect. + ## * destination - string - Name of the collected field, is queryable under @ in Datadog. + ## Default fields can be overridden, for example, `destination.port` overrides + ## the default destination port collected. + ## * type - string - The field type. + ## Available options are: string, integer, hex. + ## Defaults to hex. + ## * endianness - string - (Optional) If type is integer, endianness can be set using this parameter. + ## Available options are: big, little. + ## Defaults to big. + # + # listeners: + # - flow_type: netflow9 + # port: 2055 + # mapping: + # - field: 1234 + # destination: transport_rtp_ssrc + # type: integer + # - flow_type: netflow5 + # port: 2056 + # - flow_type: ipfix + # port: 4739 + # - flow_type: sflow5 + # port: 6343 + + ## stop_timeout - integer - optional - default: 5 + ## The maximum number of seconds to wait for the NetFlow listeners to stop when the Agent shuts down. + # + # stop_timeout: 5 + + ## @param reverse_dns_enrichment_enabled - boolean - optional - default: false + ## Set to true to enable reverse DNS enrichment of private source and destination IP addresses in NetFlow records. + # reverse_dns_enrichment_enabled: false + +## @param reverse_dns_enrichment - custom object - optional +## This section configures the reverse DNS enrichment component that can be used by other components in the Datadog Agent. +# reverse_dns_enrichment: + + ## @param workers - integer - optional - default: 10 + ## The number of concurrent workers used to perform reverse DNS lookups. + # workers: 10 + + ## @param chan_size - integer - optional - default: 5000 + ## The size of the channel used to send reverse DNS lookup requests to the workers. + # chan_size: 5000 + + ## @param cache - custom object - optional + ## This section configures the cache used by the reverse DNS enrichment component. + # cache: + + ## @param enabled - boolean - optional - default: true + ## Set to true to enable reverse DNS enrichment caching. + # + # enabled: true + + ## @param entry_ttl - duration - optional - default: 24h + ## The amount of time that a cache entry remains valid before it is expired and removed from the cache. + # entry_ttl: 24h + + ## @param clean_interval - duration - optional - default: 2h + ## An interval that specifies how often expired entries are removed from the cache to free space. + # clean_interval: 2h + + ## @param persist_interval - duration - optional - default: 2h + ## An interval that specifies how often the cache is persisted to disk so the cache can be reloaded when the Agent is upgraded or restarted. + # persist_interval: 2h + + ## @param max_retries - integer - optional - default: 10 + ## The maximum number of retries to perform when a DNS lookup operation fails, after which the hostname "" is returned and cached for the IP address. + # max_retries: 10 + + ## @param max_size - integer - optional - default: 1000000 + ## The maximum size in entries of the cache, above which additional entries will not be cached. + # + # max_size: 1000000 + + ## @param rate_limiter - custom object - optional + ## This section configures the rate limiter used by the reverse DNS enrichment component. + # rate_limiter: + + ## @param enabled - boolean - optional - default: true + ## Set to true to enable the reverse DNS enrichment rate limiter. + # + # enabled: true + + ## @param limit_per_sec - integer - optional - default: 1000 + ## The maximum number of reverse DNS lookups allowed per second by the rate limiter. + # limit_per_sec: 1000 + + ## @param limit_throttled_per_sec - integer - optional - default: 1 + ## The maximum number of reverse DNS lookups allowed per second when the rate limiter is throttled due to errors exceeding the threshold. + # limit_throttled_per_sec: 1 + + ## @param throttle_error_threshold - integer - optional - default: 10 + ## The number of consecutive errors that will trigger the rate limiter to throttle down to limit_throttled_per_sec. + # throttle_error_threshold: 10 + + ## @param recovery_intervals - integer - optional - default: 5 + ## The number of intervals over which to increase the rate limit back to limit_per_sec when lookups are again successful after being throttled due to errors. + # recovery_intervals: 5 + + ## @param recovery_interval - duration - optional - default: 5s + ## The interval between incrementally increasing the rate limit back to limit_per_sec when lookups are again successful after being throttled due to errors. + ## The rate limit will be increased by (limit_per_sec - limit_throttled_per_sec) / recovery_intervals every recovery_interval, until it reaches + ## limit_per_sec. For example, with limit_per_sec=1000, limit_throttled_per_sec=1, recovery_intervals=5, recovery_interval=5s, the limit will + ## be increased by 200 every 5 seconds until reaching 1000. + # recovery_interval: 5s + +{{end -}} +{{- if .OTLP }} +################################### +## OpenTelemetry Configuration ## +################################### + +## @param otlp_config - custom object - optional +## This section configures OTLP ingest in the Datadog Agent. +# +# otlp_config: + + ## @param receiver - custom object - optional + ## The receiver configuration. It follows the OpenTelemetry Collector's OTLP Receiver Configuration. + ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation + ## for a full list of available settings: + ## https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/config.md + # + # receiver: + + ## @param protocols - custom object - optional + ## Configuration for the supported protocols. + # + # protocols: + + ## @param grpc - custom object - optional + ## Configuration for OTLP/gRPC listener. + ## Setting this as an empty section enables the OTLP/gRPC listener with default options. + # + # grpc: + + ## @param endpoint - string - optional - default: 0.0.0.0:4317 + ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT - string - optional - default: 0.0.0.0:4317 + ## The OTLP/gRPC listener endpoint. + # + # endpoint: 0.0.0.0:4317 + + ## @param transport - string - optional - default: tcp + ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_TRANSPORT - string - optional - default: tcp + ## The OTLP/gRPC listener transport protocol. + ## Known protocols are "tcp", "udp", "ip", "unix", "unixgram", and "unixpacket". + # + # transport: tcp + + ## @param max_recv_msg_size_mib - number - optional - default: 4 + ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_MAX_RECV_MSG_SIZE_MIB - number - optional - default: 4 + ## The maximum size (in MiB) of messages accepted by the OTLP/gRPC endpoint. + # + # max_recv_msg_size_mib: 4 + + ## @param http - custom object - optional + ## Configuration for OTLP/HTTP listener. + ## Setting this as an empty section enables the OTLP/HTTP listener with default options. + # + # http: + + ## @param endpoint - string - optional - default: 0.0.0.0:4318 + ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT - string - optional - default: 0.0.0.0:4318 + ## The OTLP/HTTP listener endpoint. + # + # endpoint: 0.0.0.0:4318 + + ## @param metrics - custom object - optional + ## Metrics-specific configuration for OTLP ingest in the Datadog Agent. + # + # metrics: + + ## @param enabled - boolean - optional - default: true + ## @env DD_OTLP_CONFIG_METRICS_ENABLED - boolean - optional - default: true + ## Set to false to disable metrics support in the OTLP ingest endpoint. + ## To enable the OTLP ingest, the otlp_config.receiver section must be set. + # + # enabled: true + + ## @param resource_attributes_as_tags - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_METRICS_RESOURCE_ATTRIBUTES_AS_TAGS - boolean - optional - default: false + ## Set to true to add resource attributes of a metric to its metric tags. Please note that any of + ## the subset of resource attributes in this list https://docs.datadoghq.com/opentelemetry/guide/semantic_mapping/ + ## are converted to Datadog conventions and set to to metric tags whether this option is enabled or not. + # + # resource_attributes_as_tags: false + + ## Deprecated - use `instrumentation_scope_metadata_as_tags` instead in favor of + ## https://github.com/open-telemetry/opentelemetry-proto/releases/tag/v0.15.0 + ## Both must not be set at the same time. + ## @param instrumentation_library_metadata_as_tags - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_METRICS_INSTRUMENTATION_LIBRARY_METADATA_AS_TAGS - boolean - optional - default: false + ## Set to true to add metadata about the instrumentation library that created a metric. + # + # instrumentation_library_metadata_as_tags: false + + ## @param instrumentation_scope_metadata_as_tags - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_METRICS_INSTRUMENTATION_SCOPE_METADATA_AS_TAGS - boolean - optional - default: false + ## Set to true to add metadata about the instrumentation scope that created a metric. + # + # instrumentation_scope_metadata_as_tags: false + + ## @param tag_cardinality - string - optional - default: low + ## @env DD_OTLP_CONFIG_METRICS_TAG_CARDINALITY - string - optional - default: low + ## Configure the level of granularity of tags to send for OTLP metrics. Choices are: + ## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) + ## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality + ## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) + ## WARNING: sending container tags for checks metrics may create more metrics + ## (one per container instead of one per host). This may impact your custom metrics billing. + # + # tag_cardinality: low + + ## @param delta_ttl - int - optional - default: 3600 + ## @env DD_OTLP_CONFIG_METRICS_DELTA_TTL - int - optional - default: 3600 + ## The amount of time (in seconds) that values are kept in memory for + ## calculating deltas for cumulative monotonic metrics. + # + # delta_ttl: 3600 + + ## @param histograms - custom object - optional + ## Configuration for OTLP Histograms. + ## See https://docs.datadoghq.com/metrics/otlp/?tab=histogram for details. + # + # histograms: + + ## @param mode - string - optional - default: distributions + ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_MODE - string - optional - default: distributions + ## How to report histograms. Valid values are: + ## + ## - `distributions` to report metrics as Datadog distributions (recommended). + ## - `nobuckets` to not report bucket metrics, + ## - `counters` to report one metric per histogram bucket. + # + # mode: distributions + + ## Deprecated - use `send_aggregation_metrics` instead. This flag will override `send_aggregation_metrics` if both are set. + ## @param send_count_sum_metrics - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_SEND_COUNT_SUM_METRICS - boolean - optional - default: false + ## Whether to report sum, count, min, and max as separate histogram metrics. + # + # send_count_sum_metrics: false + + ## @param send_aggregation_metrics - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_SEND_AGGREGATION_METRICS - boolean - optional - default: false + ## Whether to report sum, count, min, and max as separate histogram metrics. + # + # send_aggregation_metrics: false + + ## @param sums - custom object - optional + ## Configuration for OTLP Sums. + ## See https://docs.datadoghq.com/metrics/otlp/?tab=sum for details. + # + # sums: + + ## @param cumulative_monotonic_mode - string - optional - default: to_delta + ## @env DD_OTLP_CONFIG_METRICS_SUMS_CUMULATIVE_MONOTONIC_MODE - string - optional - default: to_delta + ## How to report cumulative monotonic sums. Valid values are: + ## + ## - `to_delta` to calculate delta for sum in the client side and report as Datadog counts. + ## - `raw_value` to report the raw value as a Datadog gauge. + # + # cumulative_monotonic_mode: to_delta + + ## @param initial_cumulative_monotonic_value - string - optional - default: auto + ## How to report the initial value for cumulative monotonic sums. Valid values are: + ## + ## - `auto` reports the initial value if its start timestamp is set and it happens after the process was started. + ## - `drop` always drops the initial value. + ## - `keep` always reports the initial value. + # + # initial_cumulative_monotonic_value: auto + + ## @param summaries - custom object - optional + ## Configuration for OTLP Summaries. + ## See https://docs.datadoghq.com/metrics/otlp/?tab=summary for more details. + # + # summaries: + + ## @param mode - string - optional - default: gauges + ## @env DD_OTLP_CONFIG_METRICS_SUMMARIES_MODE - string - optional - default: gauges + ## How to report summaries. Valid values are: + ## + ## - `noquantiles` to not report quantile metrics. + ## - `gauges` to report one gauge metric per quantile. + # + # mode: gauges + + ## @param traces - custom object - optional + ## Traces-specific configuration for OTLP ingest in the Datadog Agent. + # + # traces: + + ## @param enabled - boolean - optional - default: true + ## @env DD_OTLP_CONFIG_TRACES_ENABLED - boolean - optional - default: true + ## Set to false to disable traces support in the OTLP ingest endpoint. + ## To enable the OTLP ingest, the otlp_config.receiver section must be set. + # + # enabled: true + + ## @param span_name_as_resource_name - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_TRACES_SPAN_NAME_AS_RESOURCE_NAME - boolean - optional - default: false + ## If set to true the OpenTelemetry span name will used in the Datadog resource name. + ## If set to false the resource name will be filled with the instrumentation library name + span kind. + # + # span_name_as_resource_name: false + + ## @param span_name_remappings - map - optional + ## @env DD_OTLP_CONFIG_TRACES_SPAN_NAME_REMAPPINGS - json - optional + ## Defines a map of span names and preferred names to map to. This can be used to automatically map Datadog Span + ## Operation Names to an updated value. + ## span_name_remappings: + ## "io.opentelemetry.javaagent.spring.client": "spring.client" + ## "instrumentation:express.server": "express" + ## "go.opentelemetry.io_contrib_instrumentation_net_http_otelhttp.client": "http.client" + # + # span_name_remappings: + # : + + ## @param probabilistic_sampler - custom object - optional + ## Probabilistic sampler controlling the rate of ingestion. Using this sampler works consistently + ## in a distributed system where the sampling rate is shared. Exceptions are made for errors and + ## rare traces (if enabled via apm_config.enable_rare_sampler). + # + # probabilistic_sampler: + ## @param sampling_percentage - number - optional - default: 100 + ## @env DD_OTLP_CONFIG_TRACES_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE - number - optional - default: 100 + ## Percentage of traces to ingest (0 100]. Invalid values (<= 0 || > 100) are disconsidered and the default is used. + ## If incoming spans have a sampling.priority set by the user, it will be followed and the sampling percentage will + ## be overridden. + # + # sampling_percentage: 100 + + ## @param logs - custom object - optional + ## Logs-specific configuration for OTLP ingest in the Datadog Agent. + # + # logs: + + ## @param enabled - boolean - optional - default: false + ## @env DD_OTLP_CONFIG_LOGS_ENABLED - boolean - optional - default: false + ## Set to true to enable logs support in the OTLP ingest endpoint. + ## To enable the OTLP ingest, the otlp_config.receiver section must be set. + # + # enabled: true + +## @param debug - custom object - optional + ## Debug-specific configuration for OTLP ingest in the Datadog Agent. + ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation + ## for a full list of available settings: + ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter#getting-started + # + # debug: + ## @param verbosity - string - optional - default: normal + ## @env DD_OTLP_CONFIG_DEBUG_VERBOSITY - string - optional - default: normal + ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. + ## Valid values are basic, normal, detailed, none. + # + # verbosity: normal +{{- if (eq .OS "windows")}} +##################################################### +## Datadog Agent Manager System Tray Configuration ## +##################################################### + +## @param system_tray - custom object - optional +## This section configures the Datadog Agent Manager System Tray +# +# system_tray: + ## @param log_file - string - optional - default: %ProgramData%\Datadog\logs\ddtray.log + ## @env DD_TRAY_LOG_FILE - string - optional + ## The full path to the file where Datadog Agent Manager System Tray logs are written. + # + # log_file: +{{end -}} +{{end -}} + + +{{- if .APMInjection -}} +############################################## +## Datadog APM Auto-injection Configuration ## +############################################## + +## @param injection_controller_config - custom object +## This section configures the Datadog APM Auto Injection controller. +## Uncomment this parameter and the one below to enable them. +# +# injection_controller_config: + + ## @param enabled - boolean - optional - default: false + ## Set to true to enable the APM Auto-injection. + ## Please note that enabling this service will result in a kernel driver being loaded. + # + # enabled: false + + ## @param log_file - string - optional - default: c:\programdata\datadog\logs\apm-inject.log + ## The full path to the file where injection controller logs are written. + # + # log_file: c:\programdata\datadog\logs\apm-inject.log + + ## @param log_level - string - optional - default: info + ## Minimum log level of the injection controller. + ## Valid log levels are: debug, info, warn, and error. + # + # log_level: 'info' + + ## @param log_to_console - boolean - optional - default: true + ## Set to 'false' to disable injection controller logging to stdout. + # + # log_to_console: true + + ## @param socket_port - integer - optional - default: 3030 + ## The port used for the injection controller communications API (served on localhost). + # + # socket_port: 3030 + + # internal_profiling: + # + ## @param enabled - boolean - optional - default: false + ## Enable internal profiling for the injection controller process. + # + # enabled: false + +## @param service_configs - list of custom objects +## This section configures the services which will be automatically injected with APM +## configurations, as well as the APM configurations which will be injected. +# +# service_configs: + + ## @param service configuration - custom object + ## In order to configure APM auto-injection for a service or set of services, an injection condition + ## and APM configuration must be provided. + ## + ## Example: + ## - conditions: + ## command_line_regex: executable_name.exe + ## configuration: + ## service_language: dotnet + ## dd_env: staging + ## dd_service: exampleService + ## dd_version: 1.2.3 + ## + ## To learn about all the available service matching conditions & configuration options, visit + ## https://docs.datadoghq.com/tracing/trace_collection/library_injection_local + +{{end}} \ No newline at end of file diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 37ad3669869..c55582db437 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,11 +1,164 @@ #!/bin/sh -export DD_HOSTNAME=$(hostname) +DD_CONF_DIR="/etc/datadog-agent" -if [ "$APP_ENV" = "production" ]; then - datadog-agent run & - /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & - /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & +# START OF CODE EXTRACTED FROM https://github.com/DataDog/heroku-buildpack-datadog/blob/master/extra/datadog.sh +# This code was extracted in order to maintain functionality when switching from +# Heroku Slugs to Docker container deployments. +export DATADOG_CONF="$DD_CONF_DIR/datadog.yaml" +export INTEGRATIONS_CONF="$DD_CONF_DIR/conf.d" +export POSTGRES_CONF="$INTEGRATIONS_CONF/postgres.d" +export REDIS_CONF="$INTEGRATIONS_CONF/redisdb.d" + +# Get the lower case for the log level +DD_LOG_LEVEL_LOWER=$(echo "$DD_LOG_LEVEL" | tr '[:upper:]' '[:lower:]') + +# Update the Datadog conf yaml to disable cloud provider metadata +sed -i -e"s|^.*cloud_provider_metadata:.*$|cloud_provider_metadata: []|" "$DATADOG_CONF" + +DYNOHOST="$(hostname )" +DYNOTYPE=${DYNO%%.*} +DYNO_TAGS="dyno:$DYNO dynotype:$DYNOTYPE" + +export DD_HOST_ALIASES="$DYNOHOST" + +if [ -n "$HEROKU_APP_NAME" ]; then + DYNO_TAGS="$DYNO_TAGS appname:$HEROKU_APP_NAME" +fi + +if [ -z "$DD_API_KEY" ]; then + echo "DD_API_KEY environment variable not set. Run: heroku config:add DD_API_KEY=" + DISABLE_DATADOG_AGENT=1 +fi + +if [ -z "$DD_HOSTNAME" ]; then + if [ "$DD_DYNO_HOST" == "true" ]; then + # Set the hostname to dyno name and ensure rfc1123 compliance. + HAN="$(echo "$HEROKU_APP_NAME" | sed -e 's/[^a-zA-Z0-9-]/-/g' -e 's/^-//g')" + if [ "$HAN" != "$HEROKU_APP_NAME" ]; then + if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "WARNING: The appname \"$HEROKU_APP_NAME\" contains invalid characters. Using \"$HAN\" instead." + fi + fi + + D="$(echo "$DYNO" | sed -e 's/[^a-zA-Z0-9.-]/-/g' -e 's/^-//g')" + export DD_HOSTNAME="$HAN.$D" + else + # Set the hostname to the dyno host + DD_HOSTNAME="$(echo "$DYNOHOST" | sed -e 's/[^a-zA-Z0-9-]/-/g' -e 's/^-//g')" + export DD_HOSTNAME + fi +else + # Generate a warning about DD_HOSTNAME deprecation. + if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "WARNING: DD_HOSTNAME has been set. Setting this environment variable may result in metrics errors. To remove it, run: heroku config:unset DD_HOSTNAME" + fi +fi + +# TODO: does this apply in Docker containers? +# Disable core checks (these read the host, not the dyno). +#if [ "$DD_DISABLE_HOST_METRICS" == "true" ]; then +# find "$DD_CONF_DIR"/conf.d -name "conf.yaml.default" -exec mv {} {}_disabled \; +#fi + +# Update the Postgres configuration from above using the Heroku application environment variable +if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then + # The default connection URL is set in DATABASE_URL, but can be configured by the user + if [[ -z ${DD_POSTGRES_URL_VAR} ]]; then + DD_POSTGRES_URL_VAR="DATABASE_URL" + fi + + # Use a comma separator instead of new line + IFS="," + + touch "$POSTGRES_CONF/conf.yaml" + echo -e "init_config: \ninstances: \n" > "$POSTGRES_CONF/conf.yaml" + + for PG_URL in $DD_POSTGRES_URL_VAR + do + if [ -n "${!PG_URL}" ]; then + POSTGREGEX='^postgres://([^:]+):([^@]+)@([^:]+):([^/]+)/(.*)$' + if [[ ${!PG_URL} =~ $POSTGREGEX ]]; then + echo -e " - host: ${BASH_REMATCH[3]}" >> "$POSTGRES_CONF/conf.yaml" + echo -e " username: ${BASH_REMATCH[1]}" >> "$POSTGRES_CONF/conf.yaml" + echo -e " password: ${BASH_REMATCH[2]}" >> "$POSTGRES_CONF/conf.yaml" + echo -e " port: ${BASH_REMATCH[4]}" >> "$POSTGRES_CONF/conf.yaml" + echo -e " dbname: ${BASH_REMATCH[5]}" >> "$POSTGRES_CONF/conf.yaml" + echo -e " ssl: require" >> "$POSTGRES_CONF/conf.yaml" + echo -e " disable_generic_tags: false" >> "$POSTGRES_CONF/conf.yaml" + if [ "$DD_ENABLE_DBM" == "true" ]; then + echo -e " dbm: true" >> "$POSTGRES_CONF/conf.yaml" + fi + fi + fi + done + unset IFS +fi + +# Update the Redis configuration from above using the Heroku application environment variable +if [ "$DD_ENABLE_HEROKU_REDIS" == "true" ]; then + + # The default connection URL is set in REDIS_URL, but can be configured by the user + if [[ -z ${DD_REDIS_URL_VAR} ]]; then + DD_REDIS_URL_VAR="REDIS_URL" + fi + + # Use a comma separator instead of new line + IFS="," + + touch "$REDIS_CONF/conf.yaml" + echo -e "init_config: \ninstances: \n" > "$REDIS_CONF/conf.yaml" + + for RD_URL in $DD_REDIS_URL_VAR + do + if [ -n "${!RD_URL}" ]; then + REDISREGEX='^redis(s?)://([^:]*):([^@]+)@([^:]+):([^/]+)/?(.*)$' + if [[ ${!RD_URL} =~ $REDISREGEX ]]; then + echo -e " - host: ${BASH_REMATCH[4]}" >> "$REDIS_CONF/conf.yaml" + echo -e " password: ${BASH_REMATCH[3]}" >> "$REDIS_CONF/conf.yaml" + echo -e " port: ${BASH_REMATCH[5]}" >> "$REDIS_CONF/conf.yaml" + if [[ ! -z ${BASH_REMATCH[1]} ]]; then + echo -e " ssl: true" >> "$REDIS_CONF/conf.yaml" + echo -e " ssl_cert_reqs: 0" >> "$REDIS_CONF/conf.yaml" + fi + if [[ ! -z ${BASH_REMATCH[2]} ]]; then + echo -e " username: ${BASH_REMATCH[2]}" >> "$REDIS_CONF/conf.yaml" + fi + if [[ ! -z ${BASH_REMATCH[6]} ]]; then + echo -e " db: ${BASH_REMATCH[6]}" >> "$REDIS_CONF/conf.yaml" + fi + fi + fi + done + unset IFS +fi + +# Convert comma delimited tags from env vars to yaml +if [ -n "$DD_TAGS" ]; then + DD_TAGS_NORMALIZED="$(sed "s/,[ ]\?/\ /g" <<< "$DD_TAGS")" + DD_TAGS="$DYNO_TAGS $DD_TAGS_NORMALIZED" +else + DD_TAGS="$DYNO_TAGS" +fi + +export DD_VERSION="$DD_VERSION" +export DD_TAGS="$DD_TAGS" +if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "[DEBUG] normalized tags: $DD_TAGS" +fi + +export DD_HEROKU_DYNO="true" + +# END OF EXTRACTED CODE + +if [ -n "$DISABLE_DATADOG_AGENT" ]; then + echo "The Datadog Agent has been disabled. Unset the DISABLE_DATADOG_AGENT or set missing environment variables." +else + if [ "$APP_ENV" = "production" ]; then + datadog-agent run & + /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & + /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & + fi fi # Check if a command is provided From 6b63749872283f314b7287986aa7d073572fced3 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 02:53:27 +0300 Subject: [PATCH 17/25] switch from default sh to bash --- scripts/datadog-entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index c55582db437..849e8d8957e 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash DD_CONF_DIR="/etc/datadog-agent" From 89a771d1208efd7fd72d205b7a864d181cb9112f Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 03:18:29 +0300 Subject: [PATCH 18/25] echo relevant DD env var --- scripts/datadog-entrypoint.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 849e8d8957e..8c38de31217 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -55,6 +55,10 @@ else fi fi +if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "[DEBUG] DD_HOSTNAME: $DD_HOSTNAME" +fi + # TODO: does this apply in Docker containers? # Disable core checks (these read the host, not the dyno). #if [ "$DD_DISABLE_HOST_METRICS" == "true" ]; then @@ -62,6 +66,9 @@ fi #fi # Update the Postgres configuration from above using the Heroku application environment variable +if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "[DEBUG] DD_ENABLE_HEROKU_POSTGRES: $DD_ENABLE_HEROKU_POSTGRES" +fi if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then # The default connection URL is set in DATABASE_URL, but can be configured by the user if [[ -z ${DD_POSTGRES_URL_VAR} ]]; then @@ -96,6 +103,9 @@ if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then fi # Update the Redis configuration from above using the Heroku application environment variable +if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then + echo "[DEBUG] DD_ENABLE_HEROKU_REDIS: $DD_ENABLE_HEROKU_REDIS" +fi if [ "$DD_ENABLE_HEROKU_REDIS" == "true" ]; then # The default connection URL is set in REDIS_URL, but can be configured by the user From b53c8f8ade773868b2a239184fdf5c37c0f74321 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 03:32:55 +0300 Subject: [PATCH 19/25] more env var echo --- scripts/datadog-entrypoint.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 8c38de31217..31d3a6e2d6f 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -57,6 +57,9 @@ fi if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then echo "[DEBUG] DD_HOSTNAME: $DD_HOSTNAME" + echo "[DEBUG] DATADOG_CONF: $DATADOG_CONF" + echo "[DEBUG] POSTGRES_CONF: $POSTGRES_CONF" + echo "[DEBUG] REDIS_CONF: REDIS_CONF" fi # TODO: does this apply in Docker containers? From 0fdb857623f8ac64692a7f15258dff3bccb377a4 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 03:52:12 +0300 Subject: [PATCH 20/25] enable datadog agent env var + more logs --- scripts/datadog-entrypoint.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 31d3a6e2d6f..90756ee63b6 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -84,6 +84,7 @@ if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then touch "$POSTGRES_CONF/conf.yaml" echo -e "init_config: \ninstances: \n" > "$POSTGRES_CONF/conf.yaml" + echo "[DEBUG] Creating Datadog Postgres integration config..." for PG_URL in $DD_POSTGRES_URL_VAR do if [ -n "${!PG_URL}" ]; then @@ -167,7 +168,7 @@ export DD_HEROKU_DYNO="true" if [ -n "$DISABLE_DATADOG_AGENT" ]; then echo "The Datadog Agent has been disabled. Unset the DISABLE_DATADOG_AGENT or set missing environment variables." else - if [ "$APP_ENV" = "production" ]; then + if [ "$APP_ENV" = "production" ] || [ "$ENABLE_DATADOG_AGENT" = "true" ]; then datadog-agent run & /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & From 54d702bb9433a9b8ea8f5884f79c47d0da28994f Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 05:14:48 +0300 Subject: [PATCH 21/25] remove hardcoded datadog.yaml + remove created run dir + update entrypoint script --- Dockerfile.datadog | 2 - datadog-config/datadog.yaml | 4553 --------------------------------- scripts/datadog-entrypoint.sh | 13 +- 3 files changed, 9 insertions(+), 4559 deletions(-) delete mode 100644 datadog-config/datadog.yaml diff --git a/Dockerfile.datadog b/Dockerfile.datadog index 186bd2d097d..9a50dc2ba87 100644 --- a/Dockerfile.datadog +++ b/Dockerfile.datadog @@ -25,7 +25,5 @@ RUN apt-get update && apt-get -y install --reinstall datadog-agent # Expose DogStatsD and trace-agent ports EXPOSE 8125/udp 8126/tcp -RUN mkdir -p /var/run/datadog - # output dir MUST match directory set to DD_CONF_DIR in datadog-entrypoint.sh COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file diff --git a/datadog-config/datadog.yaml b/datadog-config/datadog.yaml deleted file mode 100644 index af5c31ab8c1..00000000000 --- a/datadog-config/datadog.yaml +++ /dev/null @@ -1,4553 +0,0 @@ -{{ if .Common }} -######################### -## Basic Configuration ## -######################### - -## @param api_key - string - required -## @env DD_API_KEY - string - required -## The Datadog API key used by your Agent to submit metrics and events to Datadog. -## Create a new API key here: https://app.datadoghq.com/organization-settings/api-keys . -## Read more about API keys here: https://docs.datadoghq.com/account_management/api-app-keys/#api-keys . -#api_key: - -## @param app_key - string - optional -## The application key used to access Datadog's programatic API. -## Create a new application key here: https://app.datadoghq.com/organization-settings/application-keys . -## Read more about application keys here: https://docs.datadoghq.com/account_management/api-app-keys/#application-keys . -# -# app_key: - -## @param site - string - optional - default: datadoghq.com -## @env DD_SITE - string - optional - default: datadoghq.com -## The site of the Datadog intake to send Agent data to. -## Set to 'datadoghq.eu' to send data to the EU site. -## Set to 'us3.datadoghq.com' to send data to the US3 site. -## Set to 'us5.datadoghq.com' to send data to the US5 site. -## Set to 'ap1.datadoghq.com' to send data to the AP1 site. -## Set to 'ddog-gov.com' to send data to the US1-FED site. -# -# site: datadoghq.com - -## @param dd_url - string - optional - default: https://app.datadoghq.com -## @env DD_DD_URL - string - optional - default: https://app.datadoghq.com -## @env DD_URL - string - optional - default: https://app.datadoghq.com -## The host of the Datadog intake server to send metrics to, only set this option -## if you need the Agent to send metrics to a custom URL, it overrides the site -## setting defined in "site". It does not affect APM, Logs or Live Process intake which have their -## own "*_dd_url" settings. -## If DD_DD_URL and DD_URL are both set, DD_DD_URL is used in priority. -# -# dd_url: https://app.datadoghq.com - -## @param proxy - custom object - optional -## @env DD_PROXY_HTTP - string - optional -## @env DD_PROXY_HTTPS - string - optional -## @env DD_PROXY_NO_PROXY - space separated list of strings - optional -## If you need a proxy to connect to the Internet, provide it here (default: -## disabled). Refer to https://docs.datadoghq.com/agent/proxy/ to understand how to use these settings. -## For Logs proxy information, refer to https://docs.datadoghq.com/agent/proxy/#proxy-for-logs -# -# proxy: -# https: http://:@: -# http: http://:@: -# no_proxy: -# - -# - - -## @param skip_ssl_validation - boolean - optional - default: false -## @env DD_SKIP_SSL_VALIDATION - boolean - optional - default: false -## Setting this option to "true" tells the Agent to skip validation of SSL/TLS certificates. -# -# skip_ssl_validation: false - -## @param sslkeylogfile - string - optional - default: "" -## @env DD_SSLKEYLOGFILE - string - optional - default: "" -## sslkeylogfile specifies a destination for TLS master secrets -## in NSS key log format to allow external programs -## such as Wireshark to decrypt TLS connections. -## For more details, see https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. -## Use of sslkeylogfile compromises security and should only be -## used for debugging. -# sslkeylogfile: "" - - -## @param min_tls_version - string - optional - default: "tlsv1.2" -## @env DD_MIN_TLS_VERSION - string - optional - default: "tlsv1.2" -## This option defines the minimum TLS version that will be used when -## submitting data to the Datadog intake specified in "site" or "dd_url". -## This parameter defaults to "tlsv1.2". -## Possible values are: tlsv1.0, tlsv1.1, tlsv1.2, tlsv1.3; values are case- -## insensitive. -# -# min_tls_version: "tlsv1.2" - -## @param hostname - string - optional - default: auto-detected -## @env DD_HOSTNAME - string - optional - default: auto-detected -## Force the hostname name. -# -# hostname: - -## @param hostname_file - string - optional -## @env DD_HOSTNAME_FILE - string - optional -## In some environments, auto-detection of the hostname is not adequate and -## environment variables cannot be used to set the value. In such cases, the -## file on the host can also be used provide an appropriate value. If -## 'hostname' value has been set to a non-empty value, this option is ignored. -# -# hostname_file: /var/lib/cloud/data/instance-id - -## @param hostname_fqdn - boolean - optional - default: false -## @env DD_HOSTNAME_FQDN - boolean - optional - default: false -## When the Agent relies on the OS to determine the hostname, make it use the -## FQDN instead of the short hostname. Recommended value: true -## More information at https://dtdg.co/flag-hostname-fqdn -# -# hostname_fqdn: false - -## @param hostname_trust_uts_namespace - boolean - optional - default: false -## @env DD_HOSTNAME_TRUST_UTS_NAMESPACE - boolean - optional - default: false -## By default the Agent does not trust the hostname value retrieved from non-root UTS namespace, -## as it's usually a generated name, unrelated to the host (e.g. when running in a container). -## When enabled, the Agent will trust the value retrieved from non-root UTS namespace instead of failing -## hostname resolution. -## (Linux only) -# -# hostname_trust_uts_namespace: false - -## @param host_aliases - list of strings - optional -## @env DD_HOST_ALIASES - space separated list of strings - optional -## List of host aliases to report in addition to any aliases collected -## automatically from cloud providers. -## More information at -## https://docs.datadoghq.com/agent/faq/how-datadog-agent-determines-the-hostname/?tab=agentv6v7#host-aliases -# -# host_aliases: -# - -# - - -## @param tags - list of key:value elements - optional -## @env DD_TAGS - space separated list of strings - optional -## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. -## -## This configuration value merges with `DD_EXTRA_TAGS`, allowing some -## tags to be set in a configuration file (`tags`), and additional tags to be added -## with an environment variable (`DD_EXTRA_TAGS`). -## -## Learn more about tagging: https://docs.datadoghq.com/tagging/ -# -# tags: -# - team:infra -# - : - -## @param extra_tags - list of key:value elements - optional -## @env DD_EXTRA_TAGS - space separated list of strings - optional -## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. -## -## This configuration value merges with `tags`, allowing some -## tags to be set in a configuration file (`tags`), and additional tags to be added -## with an environment variable (`DD_EXTRA_TAGS`). -## -## Learn more about tagging: https://docs.datadoghq.com/tagging/ -# -# extra_tags: -# - region:northerly -# - : - -## @param env - string - optional -## @env DD_ENV - string - optional -## The environment name where the agent is running. Attached in-app to every -## metric, event, log, trace, and service check emitted by this Agent. -# -# env: - -## @param tag_value_split_separator - map - optional -## @env DD_TAG_VALUE_SPLIT_SEPARATOR - list of key:value strings - optional -## Split tag values according to a given separator. Only applies to host tags, -## and tags coming from container integrations. It does not apply to tags on dogstatsd metrics, -## and tags collected by other integrations. -## -## Example use-case: -## -## With a raw collected tag "foo:1;2;3", using the following configuration: -## -## tag_value_split_separator: -## foo: ; -## -## results in the raw tag being transformed into "foo:1", "foo:2", "foo:3" tags -# -# tag_value_split_separator: -# : - -## @param checks_tag_cardinality - string - optional - default: low -## @env DD_CHECKS_TAG_CARDINALITY - string - optional - default: low -## Configure the level of granularity of tags to send for checks metrics and events. Choices are: -## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) -## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality -## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) -## WARNING: sending container tags for checks metrics may create more metrics -## (one per container instead of one per host). This may impact your custom metrics billing. -# -# checks_tag_cardinality: low - -## @param dogstatsd_tag_cardinality - string - optional - default: low -## @env DD_DOGSTATSD_TAG_CARDINALITY - string - optional - default: low -## Configure the level of granularity of tags to send for DogStatsD metrics and events. Choices are: -## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) -## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality -## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) -## -## WARNING: sending container tags for dogstatsd metrics may create more metrics -## (one per container instead of one per host). This may impact your custom metrics billing. -# -# dogstatsd_tag_cardinality: low - -## @param histogram_aggregates - list of strings - optional - default: ["max", "median", "avg", "count"] -## @env DD_HISTOGRAM_AGGREGATES - space separated list of strings - optional - default: max median avg count -## Configure which aggregated value to compute. -## Possible values are: min, max, median, avg, sum and count. -# -# histogram_aggregates: -# - max -# - median -# - avg -# - count - -## @param histogram_percentiles - list of strings - optional - default: ["0.95"] -## @env DD_HISTOGRAM_PERCENTILES - space separated list of strings - optional - default: 0.95 -## Configure which percentiles are computed by the Agent. It must be a list of float between 0 and 1. -## Warning: percentiles must be specified as yaml strings -# -# histogram_percentiles: -# - "0.95" - -## @param histogram_copy_to_distribution - boolean - optional - default: false -## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION - boolean - optional - default: false -## Copy histogram values to distributions for true global distributions (in beta) -## Note: This increases the number of custom metrics created. -# -# histogram_copy_to_distribution: false - -## @param histogram_copy_to_distribution_prefix - string - optional -## @env DD_HISTOGRAM_COPY_TO_DISTRIBUTION_PREFIX - string - optional -## A prefix to add to distribution metrics created when histogram_copy_to_distributions is true -# -# histogram_copy_to_distribution_prefix: "" - -## @param aggregator_stop_timeout - integer - optional - default: 2 -## @env DD_AGGREGATOR_STOP_TIMEOUT - integer - optional - default: 2 -## When stopping the agent, the Aggregator will try to flush out data ready for -## aggregation (metrics, events, ...). Data are flushed to the Forwarder in order -## to be sent to Datadog, therefore the Agent might take at most -## 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit. -## -## You can set the maximum amount of time, in seconds, allocated to the -## Aggregator to do so. You can disable this feature by setting -## 'aggregator_stop_timeout' to 0. -# -# aggregator_stop_timeout: 2 - -## @param aggregator_buffer_size - integer - optional - default: 100 -## @env DD_AGGREGATOR_BUFFER_SIZE - integer - optional - default: 100 -## The default buffer size for the aggregator use a sane value for most of the -## use cases, however, it could be useful to manually set it in order to trade -## RSS usage with better performances. -# -# aggregator_buffer_size: 100 - -## @param forwarder_timeout - integer - optional - default: 20 -## @env DD_FORWARDER_TIMEOUT - integer - optional - default: 20 -## Forwarder timeout in seconds -# -# forwarder_timeout: 20 - -## @param forwarder_retry_queue_payloads_max_size - integer - optional - default: 15728640 (15MB) -## @env DD_FORWARDER_RETRY_QUEUE_PAYLOADS_MAX_SIZE - integer - optional - default: 15728640 (15MB) -## It defines the maximum size in bytes of all the payloads in the forwarder's retry queue. -## The actual memory used is greater than the payloads size as there are extra fields like HTTP headers, -## but no more than 2.5 times the payload size. -# -# forwarder_retry_queue_payloads_max_size: 15728640 - -## @param forwarder_num_workers - integer - optional - default: 1 -## @env DD_FORWARDER_NUM_WORKERS - integer - optional - default: 1 -## The number of workers used by the forwarder. -# -# forwarder_num_workers: 1 - -## @param forwarder_stop_timeout - integer - optional - default: 2 -## @env DD_FORWARDER_STOP_TIMEOUT - integer - optional - default: 2 -## When stopping the agent, the Forwarder will try to flush all new -## transactions (not the ones in retry state). New transactions will be created -## as the Aggregator flush it's internal data too, therefore the Agent might take -## at most 'aggregator_stop_timeout'+'forwarder_stop_timeout' seconds to exit. -## -## You can set the maximum amount of time, in seconds, allocated to the -## Forwarder to send those transactions. You can disable this feature by setting -## 'forwarder_stop_timeout' to 0. -# -# forwarder_stop_timeout: 2 - -## @param forwarder_storage_max_size_in_bytes - integer - optional - default: 0 -## @env DD_FORWARDER_STORAGE_MAX_SIZE_IN_BYTES - integer - optional - default: 0 -## When the retry queue of the forwarder is full, `forwarder_storage_max_size_in_bytes` -## defines the amount of disk space the Agent can use to store transactions on the disk. -## When `forwarder_storage_max_size_in_bytes` is `0`, the transactions are never stored on the disk. -# -# forwarder_storage_max_size_in_bytes: 50000000 - -## @param forwarder_storage_max_disk_ratio - float - optional - default: 0.8 -## @env DD_FORWARDER_STORAGE_MAX_DISK_RATIO - float - optional - default: 0.8 -## `forwarder_storage_max_disk_ratio` defines the disk capacity limit for storing transactions. -## `0.8` means the Agent can store transactions on disk until `forwarder_storage_max_size_in_bytes` -## is reached or when the disk mount for `forwarder_storage_path` exceeds 80% of the disk capacity, -## whichever is lower. -# -# forwarder_storage_max_disk_ratio: 0.8 - -## @param forwarder_outdated_file_in_days - integer - optional - default: 10 -## @env DD_FORWARDER_OUTDATED_FILE_IN_DAYS - integer - optional - default: 10 -## This value specifies how many days the overflow transactions will remain valid before -## being discarded. During the Agent restart, if a retry file contains transactions that were -## created more than `forwarder_outdated_file_in_days` days ago, they are removed. -# -# forwarder_outdated_file_in_days: 10 - -## @param forwarder_high_prio_buffer_size - int - optional - default: 100 -## Defines the size of the high prio buffer. -## Increasing the buffer size can help if payload drops occur due to high prio buffer being full. -# -# forwarder_high_prio_buffer_size: 100 - -## @param forwarder_low_prio_buffer_size - int - optional - default: 100 -## Defines the size of the low prio buffer. -# -# forwarder_low_prio_buffer_size: 100 - -## @param forwarder_requeue_buffer_size - int - optional - default: 100 -## Defines the size of the requeue prio buffer. -# -# forwarder_requeue_buffer_size: 100 - -## @param forwarder_backoff_base - int - optional - default: 2 -## @env DD_FORWARDER_BACKOFF_BASE - integer - optional - default: 2 -## Defines the rate of exponential growth, and the first retry interval range. -## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a -## higher rate of exponential growth. -# forwarder_backoff_base: 2 - -## @param forwarder_backoff_max - int - optional - default: 64 -## @env DD_FORWARDER_BACKOFF_MAX - integer - optional - default: 64 -## Defines the maximum number of seconds to wait for a retry. -## Do not set a lower value than the default. You may increase it if you use a proxy that benefits from a -## higher maximum backoff time. -# forwarder_backoff_max: 64 - -## @param cloud_provider_metadata - list of strings - optional - default: ["aws", "gcp", "azure", "alibaba", "oracle", "ibm"] -## @env DD_CLOUD_PROVIDER_METADATA - space separated list of strings - optional - default: aws gcp azure alibaba oracle ibm -## This option restricts which cloud provider endpoint will be used by the -## agent to retrieve metadata. By default the agent will try # AWS, GCP, Azure -## and alibaba providers. Some cloud provider are not enabled by default to not -## trigger security alert when querying unknown IP (for example, when enabling -## Tencent on AWS). -## Setting an empty list will disable querying any cloud metadata endpoints -## (falling back on system metadata). Disabling metadata for the cloud provider in which an Agent runs may result in -## duplicated hosts in your Datadog account and missing Autodiscovery features -## -## Possible values are: -## "aws" AWS EC2, ECS/Fargate -## "gcp" Google Cloud Provider -## "azure" Azure -## "alibaba" Alibaba -## "tencent" Tencent -## "oracle" Oracle Cloud -## "ibm" IBM Cloud -# -# cloud_provider_metadata: -# - "aws" -# - "gcp" -# - "azure" -# - "alibaba" -# - "oracle" -# - "ibm" - -## @param collect_ec2_tags - boolean - optional - default: false -## @env DD_COLLECT_EC2_TAGS - boolean - optional - default: false -## Collect AWS EC2 custom tags as host tags. -## Requires one of: -## - `collect_ec2_tags_use_imds: true` and configuration of the -## EC2 instance to allow tags in instance metadata; or -## - configuration of the EC2 instance to have an IAM role with -## the `EC2:DescribeTags` permission. -## See docs for further details: -## https://docs.datadoghq.com/integrations/faq/how-do-i-pull-my-ec2-tags-without-using-the-aws-integration/ -# -# collect_ec2_tags: false - -## @param exclude_ec2_tags - list of strings - optional - default: [] -## @env DD_EXCLUDE_EC2_TAGS - space separated list of strings - optional - default: [] -## EC2 tags to exclude from being converted into host tags -- only applicable when collect_ec2_tags is true. This does -## not impact tags collected by the AWS Integration (see https://docs.datadoghq.com/integrations/amazon_web_services/ -## for more information on the AWS integration). -# -# exclude_ec2_tags: [] - -## @param collect_ec2_tags_use_imds - boolean - optional - default: false -## @env DD_COLLECT_EC2_TAGS_USE_IMDS - boolean - optional - default: false -## Use instance metadata service (IMDS) instead of EC2 API to collect AWS EC2 custom tags. -## Requires `collect_ec2_tags`. -# -# collect_ec2_tags_use_imds: false - -## @param ec2_metadata_timeout - integer - optional - default: 300 -## @env DD_EC2_METADATA_TIMEOUT - integer - optional - default: 300 -## Timeout in milliseconds on calls to the AWS EC2 metadata endpoints. -# -# ec2_metadata_timeout: 300 - -## @param ec2_prefer_imdsv2 - boolean - optional - default: false -## @env DD_EC2_PREFER_IMDSV2 - boolean - optional - default: false -## If this flag is true then the agent will request EC2 metadata using IMDS v2, -## which offers additional security for accessing metadata. However, in some -## situations (such as a containerized agent on a plain EC2 instance) it may -## require additional configuration on the AWS side. See the AWS guidelines -## for further details: -## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html#instance-metadata-transition-to-version-2 -# -# ec2_prefer_imdsv2: false - -## @param collect_gce_tags - boolean - optional - default: true -## @env DD_COLLECT_GCE_TAGS - boolean - optional - default: true -## Collect Google Cloud Engine metadata as host tags -# -# collect_gce_tags: true - -## @param exclude_gce_tags - list of strings - optional - default: ["bosh_settings" ,"cli-cert" ,"common-psm1" ,"configure-sh" ,"containerd-configure-sh" ,"disable-address-manager" ,"disable-legacy-endpoints" ,"enable-oslogin" ,"gce-container-declaration" ,"google-container-manifest" ,"ipsec-cert" ,"k8s-node-setup-psm1" ,"kube-env" ,"kubeconfig" ,"kubelet-config" ,"serial-port-logging-enable" ,"shutdown-script" ,"ssh-keys" ,"sshKeys" ,"ssl-cert" ,"startup-script" ,"user-data" ,"windows-keys" ,"windows-startup-script-ps1"] -## @env DD_EXCLUDE_GCE_TAGS - space separated list of strings - optional - default: bosh_settings cli-cert common-psm1 configure-sh containerd-configure-sh disable-address-manager disable-legacy-endpoints enable-oslogin gce-container-declaration google-container-manifest ipsec-cert k8s-node-setup-psm1 kube-env kubeconfig kubelet-config serial-port-logging-enable shutdown-script ssh-keys sshKeys ssl-cert startup-script user-data windows-keys windows-startup-script-ps1 -## Google Cloud Engine metadata attribute to exclude from being converted into -## host tags -- only applicable when collect_gce_tags is true. -# -# exclude_gce_tags: -# - "bosh_settings" -# - "cli-cert" -# - "common-psm1" -# - "configure-sh" -# - "containerd-configure-sh" -# - "disable-address-manager" -# - "disable-legacy-endpoints" -# - "enable-oslogin" -# - "gce-container-declaration" -# - "google-container-manifest" -# - "ipsec-cert" -# - "k8s-node-setup-psm1" -# - "kube-env" -# - "kubeconfig" -# - "kubelet-config" -# - "serial-port-logging-enable" -# - "shutdown-script" -# - "ssh-keys" -# - "sshKeys" -# - "ssl-cert" -# - "startup-script" -# - "user-data" -# - "windows-keys" -# - "windows-startup-script-ps1" - -## @param gce_send_project_id_tag - bool - optional - default: false -## @env DD_GCE_SEND_PROJECT_ID_TAG - bool - optional - default: false -## Send the project ID host tag with the `project_id:` tag key in addition to -## the `project:` tag key. -# -# gce_send_project_id_tag: false - -## @param gce_metadata_timeout - integer - optional - default: 1000 -## @env DD_GCE_METADATA_TIMEOUT - integer - optional - default: 1000 -## Timeout in milliseconds on calls to the GCE metadata endpoints. -# -# gce_metadata_timeout: 1000 - -## @param azure_hostname_style - string - optional - default: "os" -## @env DD_AZURE_HOSTNAME_STYLE - string - optional - default: "os" -## Changes how agent hostname is set on Azure virtual machines. -## -## Possible values: -## "os" - use the hostname reported by the operating system (default) -## "name" - use the instance name -## "name_and_resource_group" - use a combination of the instance name and resource group name -## "full" - use a combination of the instance name, resource group name and subscription id -## "vmid" - use the instance id -# -# azure_hostname_style: "os" - -## @param scrubber - custom object - optional -## Configuration for scrubbing sensitive information from the Agent's logs, configuration and flares. -# -# scrubber: -# - ## @param scrubber.additional_keys - list of strings - optional - ## @env DD_SCRUBBER_ADDITIONAL_KEYS - space-separated list of strings - optional - ## By default, the Agent removes known sensitive keys from Agent and integrations YAML configs before - ## including them in the flare. - ## Use this parameter to define additional sensitive keys that the Agent should scrub from - ## the YAML files included in the flare. - # - # additional_keys: - # - "sensitive_key_1" - # - "sensitive_key_2" - -## @param no_proxy_nonexact_match - boolean - optional - default: false -## @env DD_NO_PROXY_NONEXACT_MATCH - boolean - optional - default: false -## Enable more flexible no_proxy matching. See https://godoc.org/golang.org/x/net/http/httpproxy#Config -## for more information on accepted matching criteria. -# -# no_proxy_nonexact_match: false - -## @param use_proxy_for_cloud_metadata - boolean - optional - default: false -## @env DD_USE_PROXY_FOR_CLOUD_METADATA - boolean - optional - default: false -## By default cloud provider IP's are added to the transport's `no_proxy` list. -## Use this parameter to remove them from the `no_proxy` list. -# -# use_proxy_for_cloud_metadata: false - -## @param inventories_configuration_enabled - boolean - optional - default: true -## @env DD_INVENTORIES_CONFIGURATION_ENABLED - boolean - optional - default: true -## By default the Agent sends its own configuration to Datadog to be displayed in the `Agent Configuration` section of the host -## detail panel. See https://docs.datadoghq.com/infrastructure/list/#agent-configuration for more information. -## -## The Agent configuration is scrubbed of any sensitive information. -# -# inventories_configuration_enabled: true - -## @param auto_exit - custom object - optional -## Configuration for the automatic exit mechanism: the Agent stops when some conditions are met. -# -# auto_exit: - - ## @param noprocess - custom object - optional - ## Configure the `noprocess` automatic exit method. - ## Detect when no other processes (non-agent) are running to trigger automatic exit. `HOST_PROC` is taken into account when gathering processes. - ## Feature is only supported on POSIX systems. - # - # noprocess: - ## @param enabled - boolean - optional - default: false - ## @env DD_AUTO_EXIT_NOPROCESS_ENABLED - boolean - optional - default: false - ## Enable the `noprocess` method - # - # enabled: false - - ## @param excluded_processes - list of strings - optional - ## @env DD_AUTO_EXIT_NOPROCESS_EXCLUDED_PROCESSES - space separated list of strings - optional - ## List of regular expressions to exclude extra processes (on top of built-in list). - # - # excluded_processes: [] - - ## @param validation_period - integer - optional - default: 60 - ## @env DD_AUTO_EXIT_VALIDATION_PERIOD - integer - optional - default: 60 - ## Time (in seconds) delay during which the auto exit validates that the selected method continuously detects an exit condition, before exiting. - ## The value is verified every 30s. By default, three consecutive checks need to return true to trigger an automatic exit. - # - # validation_period: 60 - - -## @param fips - custom object - optional -## [BETA] Enter specific configurations for using the FIPS proxy. -## Uncomment this parameter and the one below to enable them. -# -# fips: - - ## @param enabled - boolean - optional - default: false - ## @env DD_FIPS_ENABLED - boolean - optional - default: false - ## This feature is in BETA. - ## - ## Enable the use of the FIPS proxy to send data to the DataDog backend. Enabling this will force all outgoing traffic - ## from the Agent to the local proxy. - ## It's important to note that enabling this will not make the Datadog Agent FIPS compliant, but will force all outgoing - ## traffic to a local FIPS compliant proxy. The FIPS proxy need to be installed locally in addition to the agent. - ## - ## When setting this to true the following settings would be overridden, ignoring the values from the - ## configuration: - ## - dd_url - ## - apm_config.apm_dd_url - ## - apm_config.profiling_dd_url - ## - apm_config.telemetry.dd_url - ## - process_config.process_dd_url - ## - logs_config.use_http - ## - logs_config.logs_no_ssl - ## - logs_config.logs_dd_url - ## - database_monitoring.metrics.dd_url - ## - database_monitoring.activity.dd_url - ## - database_monitoring.samples.dd_url - ## - compliance_config.endpoints.dd_url - ## - runtime_security_config.endpoints.dd_url - ## - network_devices.metadata.dd_url - # - ## The agent will also ignore 'proxy.*' settings and environment variables related to proxy (HTTP_PROXY, HTTPS_PROXY, - ## DD_PROXY_HTTP and DD_PROXY_HTTPS). - # - # enabled: false - - ## @param local_address - string - optional - default: localhost - ## @env DD_FIPS_LOCAL_ADDRESS - string - optional - default: localhost - ## The local address that the FIPS proxy will bind ports on. - # - # local_address: localhost - -## @param observability_pipelines_worker - custom object - optional -## Configuration for forwarding telemetry to an Observability Pipelines Worker instead of Datadog. -## https://www.datadoghq.com/product/observability-pipelines/ -## Note: This config is interchangeable with `vector` -# -# observability_pipelines_worker: - - ## @param metrics - custom object - optional - ## Specific configurations for metrics - # - # metrics: - - ## @param enabled - boolean - optional - default: false - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_ENABLED - boolean - optional - default: false - ## Enables forwarding of metrics to an Observability Pipelines Worker - # - # enabled: false - - ## @param url - string - optional - default: "" - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_METRICS_URL - string - optional - default: "" - ## URL endpoint for the Observability Pipelines Worker to send metrics to - # - # url: "http//127.0.0.1:8080" - - ## @param logs - custom object - optional - ## Specific configurations for logs - # - # logs: - - ## @param enabled - boolean - optional - default: false - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_ENABLED - boolean - optional - default: false - ## Enables forwarding of logs to an Observability Pipelines Worker - # - # enabled: false - - ## @param url - string - optional - default: "" - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_LOGS_URL - string - optional - default: "" - ## URL endpoint for the Observability Pipelines Worker to send logs to - # - # url: "http//127.0.0.1:8080" - - ## @param traces - custom object - optional - ## Specific configurations for traces - # - # traces: - - ## @param enabled - boolean - optional - default: false - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_ENABLED - boolean - optional - default: false - ## Enables forwarding of traces to an Observability Pipelines Worker - # - # enabled: false - - ## @param url - string - optional - default: "" - ## @env DD_OBSERVABILITY_PIPELINES_WORKER_TRACES_URL - string - optional - default: "" - ## URL endpoint for the Observability Pipelines Worker to send traces to - # - # url: "http//127.0.0.1:8080" - -{{ end }} -{{- if .Agent }} -{{- if .Python }} -{{- if .BothPythonPresent -}} -## @param python_version - integer - optional - default: 2 -## @env DD_PYTHON_VERSION - integer - optional - default: 2 -## The major version of Python used to run integrations and custom checks. -## The only supported values are 2 (to use Python 2) or 3 (to use Python 3). -## Do not change this option when using the official Docker Agent images. -# -# python_version: 2 - -{{ end -}} -{{ end }} - -############################ -## Advanced Configuration ## -############################ - -## @param confd_path - string - optional -## @env DD_CONFD_PATH - string - optional -## The path containing check configuration files. By default, uses the conf.d folder -## located in the Agent configuration folder. -# -# confd_path: "" - -## @param additional_checksd - string - optional -## @env DD_ADDITIONAL_CHECKSD - string - optional -## Additional path indicating where to search for Python checks. By default, uses the checks.d folder -## located in the Agent configuration folder. -# -# additional_checksd: - -## @param expvar_port - integer - optional - default: 5000 -## @env DD_EXPVAR_PORT - integer - optional - default: 5000 -## The port for the go_expvar server. -# -# expvar_port: 5000 - -## @param cmd_port - integer - optional - default: 5001 -## @env DD_CMD_PORT - integer - optional - default: 5001 -## The port on which the IPC api listens. -# -# cmd_port: 5001 - -## @param GUI_port - integer - optional -## @env DD_GUI_PORT - integer - optional -## The port for the browser GUI to be served. -## Setting 'GUI_port: -1' turns off the GUI completely -## Default is: -## * Windows & macOS : `5002` -## * Linux: `-1` -## -# -# GUI_port: - -## @param GUI_session_expiration - duration - optional -## @env GUI_SESSION_EXPIRATION - duration - optional -## The duration after which a GUI session will expire. -## Setting 'GUI_SESSION_EXPIRATION: 0' disable session expiration. -## Default is "0s" (sessions do not expire). -# -# GUI_session_expiration: - -## @param health_port - integer - optional - default: 0 -## @env DD_HEALTH_PORT - integer - optional - default: 0 -## The Agent can expose its health check on a dedicated http port. -## This is useful for orchestrators that support http probes. -## Default is 0 (disabled), set a valid port number (eg. 5555) to enable. -# -# health_port: 0 - -## @param check_runners - integer - optional - default: 4 -## @env DD_CHECK_RUNNERS - integer - optional - default: 4 -## The `check_runners` refers to the number of concurrent check runners available for check instance execution. -## The scheduler attempts to spread the instances over the collection interval and will _at most_ be -## running the number of check runners instances concurrently. -## Setting the value to 1 would result in checks running sequentially. -## -## This is a sensitive setting, and we do NOT recommend changing the default number -## of check runners in the general case. The level of concurrency has effects on -## the Agent's: RSS memory, CPU load, resource contention overhead, etc. -# -# check_runners: 4 - -## @param enable_metadata_collection - boolean - optional - default: true -## @env DD_ENABLE_METADATA_COLLECTION - boolean - optional - default: true -## Metadata collection should always be enabled, except if you are running several -## agents/dsd instances per host. In that case, only one Agent should have it on. -## WARNING: disabling it on every Agent leads to display and billing issues. -# -# enable_metadata_collection: true - -## @param enable_gohai - boolean - optional - default: true -## @env DD_ENABLE_GOHAI - boolean - optional - default: true -## Enable the gohai collection of systems data. -# -# enable_gohai: true - -## @param enable_signing_metadata_collection - boolean - optional - default: true -## @env DD_ENABLE_SIGNING_METADATA_COLLECTION - boolean - optional - default: true -## Enable the Linux package signing medatada collection. -# -# enable_signing_metadata_collection: true - -## @param server_timeout - integer - optional - default: 30 -## @env DD_SERVER_TIMEOUT - integer - optional - default: 30 -## IPC api server timeout in seconds. -# -# server_timeout: 30 - -## @param procfs_path - string - optional -## @env DD_PROCFS_PATH - string - optional -## Some environments may have the procfs file system mounted in a miscellaneous -## location. The procfs_path configuration parameter provides a mechanism to -## override the standard default location: '/proc' - this setting trickles down to -## integrations and affect their behavior if they rely on the psutil python package. -# -# procfs_path: -{{ if .Python }} -## @param disable_py3_validation - boolean - optional - default: false -## @env DD_DISABLE_PY3_VALIDATION - boolean - optional - default: false -## Disable Python3 validation of python checks. -# -# disable_py3_validation: false -# -## @param python3_linter_timeout - integer - optional - default: 120 -## @env DD_PYTHON3_LINTER_TIMEOUT - integer - optional - default: 120 -## Timeout in seconds for validation of compatibility with python 3 when running python 2. -# -# python3_linter_timeout: 120 - -## @param memtrack_enabled - boolean - optional - default: true -## @env DD_MEMTRACK_ENABLED - boolean - optional - default: true -## Enables tracking of memory allocations made from the python runtime loader. -# -# memtrack_enabled: true - -## @param tracemalloc_debug - boolean - optional - default: false -## @env DD_TRACEMALLOC_DEBUG - boolean - optional - default: false -## Enables debugging with tracemalloc for python checks. -## Please note that this option is only available when python_version is set to "3". -## Additionally when this option becomes effective the number of check runners is -## overridden to 1. -# -# tracemalloc_debug: false - -## @param tracemalloc_include - string - optional -## @env DD_TRACEMALLOC_INCLUDE - string - optional -## Comma-separated list of Python checks to enable tracemalloc for when `tracemalloc_debug` is true. -## By default, all Python checks are enabled. -# -# tracemalloc_include: - -## @param tracemalloc_exclude - string - optional -## @env DD_TRACEMALLOC_EXCLUDE - string - optional -## Comma-separated list of Python checks to disable tracemalloc for when `tracemalloc_debug` is true. -## By default, all Python checks are enabled. This setting takes precedence over `tracemalloc_include`. -# -# tracemalloc_exclude: - -## @param windows_use_pythonpath - boolean - optional -## @env DD_WINDOWS_USE_PYTHONPATH - boolean - optional -## Whether to honour the value of the PYTHONPATH env var when set on Windows. -## Disabled by default, so we only load Python libraries bundled with the Agent. -# -# windows_use_pythonpath: false -{{ end }} -## @param secret_backend_command - string - optional -## @env DD_SECRET_BACKEND_COMMAND - string - optional -## `secret_backend_command` is the path to the script to execute to fetch secrets. -## The executable must have specific rights that differ on Windows and Linux. -## -## For more information see: https://github.com/DataDog/datadog-agent/blob/main/docs/agent/secrets.md -# -# secret_backend_command: - -## @param secret_backend_arguments - list of strings - optional -## @env DD_SECRET_BACKEND_ARGUMENTS - space separated list of strings - optional -## If secret_backend_command is set, specify here a list of arguments to give to the command at each run. -# -# secret_backend_arguments: -# - -# - - -## @param secret_backend_output_max_size - integer - optional - default: 1048576 -## @env DD_SECRET_BACKEND_OUTPUT_MAX_SIZE - integer - optional - default: 1048576 -## The size in bytes of the buffer used to store the command answer (apply to both stdout and stderr) -# -# secret_backend_output_max_size: 1048576 - -## @param secret_backend_timeout - integer - optional - default: 30 -## @env DD_SECRET_BACKEND_TIMEOUT - integer - optional - default: 30 -## The timeout to execute the command in second -# -# secret_backend_timeout: 30 - -## @param secret_backend_skip_checks - boolean - optional - default: false -## @env DD_SECRET_BACKEND_SKIP_CHECKS - boolean - optional - default: false -## Disable fetching secrets for check configurations -# -# secret_backend_skip_checks: false -# - -## @param secret_backend_remove_trailing_line_break - boolean - optional - default: false -## @env DD_SECRET_BACKEND_REMOVE_TRAILING_LINE_BREAK - boolean - optional - default: false -## Remove trailing line breaks from secrets returned by the secret_backend_command. Some secret management tools automatically -## add a line break when exporting secrets through files. -# -# secret_backend_remove_trailing_line_break: false - - -{{- if .InternalProfiling -}} -## @param profiling - custom object - optional -## Enter specific configurations for internal profiling. -## -## Please note that: -## 1. This does *not* enable profiling for user applications. -## 2. This only enables internal profiling of the agent go runtime. -## 3. To enable profiling for user apps please refer to -## https://docs.datadoghq.com/tracing/profiling/ -## 4. Enabling this feature will incur in billing charges and other -## unexpected side-effects (ie. agent profiles showing with your -## services). -## -## Uncomment this parameter and the one below to enable profiling. -# -# internal_profiling: -# - ## @param enabled - boolean - optional - default: false - ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false - ## Enable internal profiling for the Agent process. - # - # enabled: false - -{{ end }} - - -{{ end -}} - -{{- if .LogsAgent }} - -################################## -## Log collection Configuration ## -################################## - -## @param logs_enabled - boolean - optional - default: false -## @env DD_LOGS_ENABLED - boolean - optional - default: false -## Enable Datadog Agent log collection by setting logs_enabled to true. -# -# logs_enabled: false - -## @param logs_config - custom object - optional -## Enter specific configurations for your Log collection. -## Uncomment this parameter and the one below to enable them. -## See https://docs.datadoghq.com/agent/logs/ -# -# logs_config: - - ## @param container_collect_all - boolean - optional - default: false - ## @env DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL - boolean - optional - default: false - ## Enable container log collection for all the containers (see ac_exclude to filter out containers) - # - # container_collect_all: false - - ## @param logs_dd_url - string - optional - ## @env DD_LOGS_CONFIG_LOGS_DD_URL - string - optional - ## Define the endpoint and port to hit when using a proxy for logs. The logs are forwarded in TCP - ## therefore the proxy must be able to handle TCP connections. - # - # logs_dd_url: : - - ## @param logs_no_ssl - boolean - optional - default: false - ## @env DD_LOGS_CONFIG_LOGS_NO_SSL - optional - default: false - ## Disable the SSL encryption. This parameter should only be used when logs are - ## forwarded locally to a proxy. It is highly recommended to then handle the SSL encryption - ## on the proxy side. - # - # logs_no_ssl: false - - ## @param processing_rules - list of custom objects - optional - ## @env DD_LOGS_CONFIG_PROCESSING_RULES - list of custom objects - optional - ## Global processing rules that are applied to all logs. The available rules are - ## "exclude_at_match", "include_at_match" and "mask_sequences". More information in Datadog documentation: - ## https://docs.datadoghq.com/agent/logs/advanced_log_collection/#global-processing-rules - # - # processing_rules: - # - type: - # name: - # pattern: - - ## @param force_use_http - boolean - optional - default: false - ## @env DD_LOGS_CONFIG_FORCE_USE_HTTP - boolean - optional - default: false - ## By default, the Agent sends logs in HTTPS batches to port 443 if HTTPS connectivity can - ## be established at Agent startup, and falls back to TCP otherwise. Set this parameter to `true` to - ## always send logs with HTTPS (recommended). - ## Warning: force_use_http means HTTP over TCP, not HTTP over HTTPS. Please use logs_no_ssl for HTTP over HTTPS. - # - # force_use_http: true - - ## @param force_use_tcp - boolean - optional - default: false - ## @env DD_LOGS_CONFIG_FORCE_USE_TCP - boolean - optional - default: false - ## By default, logs are sent through HTTPS if possible, set this parameter - ## to `true` to always send logs via TCP. If `force_use_http` is set to `true`, this parameter - ## is ignored. - # - # force_use_tcp: true - - ## @param use_compression - boolean - optional - default: true - ## @env DD_LOGS_CONFIG_USE_COMPRESSION - boolean - optional - default: true - ## This parameter is available when sending logs with HTTPS. If enabled, the Agent - ## compresses logs before sending them. - # - # use_compression: true - - ## @param compression_level - integer - optional - default: 6 - ## @env DD_LOGS_CONFIG_COMPRESSION_LEVEL - boolean - optional - default: false - ## The compression_level parameter accepts values from 0 (no compression) - ## to 9 (maximum compression but higher resource usage). Only takes effect if - ## `use_compression` is set to `true`. - # - # compression_level: 6 - - ## @param batch_wait - integer - optional - default: 5 - ## @env DD_LOGS_CONFIG_BATCH_WAIT - integer - optional - default: 5 - ## The maximum time (in seconds) the Datadog Agent waits to fill each batch of logs before sending. - # - # batch_wait: 5 - - ## @param open_files_limit - integer - optional - default: 500 - ## @env DD_LOGS_CONFIG_OPEN_FILES_LIMIT - integer - optional - default: 500 - ## The maximum number of files that can be tailed in parallel. - ## Note: the default for Mac OS is 200. The default for - ## all other systems is 500. - # - # open_files_limit: 500 - - ## @param file_wildcard_selection_mode - string - optional - default: `by_name` - ## @env DD_LOGS_CONFIG_FILE_WILDCARD_SELECTION_MODE - string - optional - default: `by_name` - ## The strategy used to prioritize wildcard matches if they exceed the open file limit. - ## - ## Choices are `by_name` and `by_modification_time`. - ## - ## `by_name` means that each log source is considered and the matching files are ordered - ## in reverse name order. While there are less than `logs_config.open_files_limit` files - ## being tailed, this process repeats, collecting from each configured source. - ## - ## `by_modification_time` takes all log sources and first adds any log sources that - ## point to a specific file. Next, it finds matches for all wildcard sources. - ## This resulting list is ordered by which files have been most recently modified - ## and the top `logs_config.open_files_limit` most recently modified files are - ## chosen for tailing. - ## - ## WARNING: `by_modification_time` is less performant than `by_name` and will trigger - ## more disk I/O at the configured wildcard log paths. - # - # file_wildcard_selection_mode: by_name - - ## @param max_message_size_bytes - integer - optional - default: 256000 - ## @env DD_LOGS_CONFIG_MAX_MESSAGE_SIZE_BYTES - integer - optional - default : 256000 - ## The maximum size of single log message in bytes. If maxMessageSizeBytes exceeds - ## the documented API limit of 1MB - any payloads larger than 1MB will be dropped by the intake. - # https://docs.datadoghq.com/api/latest/logs/ - # - # max_message_size_bytes: 256000 - - ## @param integrations_logs_files_max_size - integer - optional - default: 10 - ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_FILES_MAX_SIZE - integer - optional - default: 10 - ## The max size in MB that an integration logs file is allowed to use - # - # integrations_logs_files_max_size - - ## @param integrations_logs_total_usage - integer - optional - default: 100 - ## @env DD_LOGS_CONFIG_INTEGRATIONS_LOGS_TOTAL_USAGE - integer - optional - default: 100 - ## The total combined usage all integrations logs files can use - # - # integrations_logs_total_usage - -{{ end -}} -{{- if .TraceAgent }} - -#################################### -## Trace Collection Configuration ## -#################################### - -## @param apm_config - custom object - optional -## Enter specific configurations for your trace collection. -## Uncomment this parameter and the one below to enable them. -## See https://docs.datadoghq.com/agent/apm/ -# -# apm_config: - - ## @param enabled - boolean - optional - default: true - ## @env DD_APM_ENABLED - boolean - optional - default: true - ## Set to true to enable the APM Agent. - # - # enabled: true - - ## @param env - string - optional - default: none - ## @env DD_APM_ENV - string - optional - default: none - ## The environment tag that Traces should be tagged with. - ## If not set the value will be inherited, in order, from the top level - ## "env" config option if set and then from the 'env:' tag if present in the - ## 'tags' top level config option. - # - # env: none - - ## @param receiver_port - integer - optional - default: 8126 - ## @env DD_APM_RECEIVER_PORT - integer - optional - default: 8126 - ## The port that the trace receiver should listen on. - ## Set to 0 to disable the HTTP receiver. - # - # receiver_port: 8126 - -{{- if (eq .OS "windows")}} - ## Please note that UDS receiver is not available in Windows. - #@ Enabling this setting may result in unexpected behavior. - ## @param receiver_socket - string - optional - default: "" - ## @env DD_APM_RECEIVER_SOCKET - string - optional - default: "" - ## Accept traces through Unix Domain Sockets. - ## Set to "" to disable the UDS receiver. - # - # receiver_socket: "" -{{else}} - ## @param receiver_socket - string - optional - default: unix:///var/run/datadog/apm.socket - ## @env DD_APM_RECEIVER_SOCKET - string - optional - default: unix:///var/run/datadog/apm.socket - ## Accept traces through Unix Domain Sockets. - ## Set to "" to disable the UDS receiver. - # - # receiver_socket: /var/run/datadog/apm.socket -{{ end }} - - ## @param apm_non_local_traffic - boolean - optional - default: false - ## @env DD_APM_NON_LOCAL_TRAFFIC - boolean - optional - default: false - ## Set to true so the Trace Agent listens for non local traffic, - ## i.e if Traces are being sent to this Agent from another host/container - # - # apm_non_local_traffic: false - - ## @param apm_dd_url - string - optional - ## @env DD_APM_DD_URL - string - optional - ## Define the endpoint and port to hit when using a proxy for APM. The traces are forwarded in TCP - ## therefore the proxy must be able to handle TCP connections. - # - # apm_dd_url: : - - ## DEPRECATED - please use `target_traces_per_second` instead. - ## @param max_traces_per_second - integer - optional - default: 10 - ## @env DD_APM_MAX_TPS - integer - optional - default: 10 - ## The target traces per second to sample. Sampling rates to apply are adjusted given - ## the received traffic and communicated to tracers. This configures head base sampling. - ## As of 7.35.0 sampling cannot be disabled and setting 'max_traces_per_second' to 0 no longer - ## disables sampling, but instead sends no traces to the intake. To avoid rate limiting, set this - ## value sufficiently high for your traffic pattern. - # - # max_traces_per_second: 10 - - ## @param target_traces_per_second - integer - optional - default: 10 - ## @env DD_APM_TARGET_TPS - integer - optional - default: 10 - ## The target traces per second to sample. Sampling rates to apply are adjusted given - ## the received traffic and communicated to tracers. This configures head-based sampling. - ## As of 7.35.0 sampling cannot be disabled and setting 'max_traces_per_second' to 0 no longer - ## disables sampling, but instead sends no traces to the intake. To avoid rate limiting, set this - ## value sufficiently high for your traffic pattern. - # - # target_traces_per_second: 10 - - ## @param errors_per_second - integer - optional - default: 10 - ## @env DD_APM_ERROR_TPS - integer - optional - default: 10 - ## The target error trace chunks to receive per second. The TPS is spread - ## to catch all combinations of service, name, resource, http.status, and error.type. - ## Set to 0 to disable the errors sampler. - # - # errors_per_second: 10 - - ## @param max_events_per_second - integer - optional - default: 200 - ## @env DD_APM_MAX_EPS - integer - optional - default: 200 - ## Maximum number of APM events per second to sample. - # - # max_events_per_second: 200 - - ## @param max_memory - integer - optional - default: 500000000 - ## @env DD_APM_MAX_MEMORY - integer - optional - default: 500000000 - ## This value is what the Agent aims to use in terms of memory. If surpassed, the API - ## rate limits incoming requests to aim and stay below this value. - ## Note: The Agent process is killed if it uses more than 150% of `max_memory`. - ## Set the `max_memory` parameter to `0` to disable the memory limitation. - # - # max_memory: 500000000 - - ## @param max_cpu_percent - integer - optional - default: 50 - ## @env DD_APM_MAX_CPU_PERCENT - integer - optional - default: 50 - ## The CPU percentage that the Agent aims to use. If surpassed, the API rate limits - ## incoming requests to aim and stay below this value. Examples: 50 = half a core, 200 = two cores. - ## Set `max_cpu_percent` to `0` to disable rate limiting based on CPU usage. - # - # max_cpu_percent: 50 - - ## @param obfuscation - object - optional - ## Defines obfuscation rules for sensitive data. Disabled by default. - ## See https://docs.datadoghq.com/tracing/setup_overview/configure_data_security/#agent-trace-obfuscation - # - # obfuscation: - # credit_cards: - ## @param DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED - boolean - optional - ## Enables obfuscation rules for credit cards. Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN - boolean - optional - ## Enables a Luhn checksum check in order to eliminate false negatives. Disabled by default. - # luhn: false - # - # elasticsearch: - ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED - boolean - optional - ## Enables obfuscation rules for spans of type "elasticsearch". Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES - object - optional - ## List of keys that should not be obfuscated. - # keep_values: - # - client_id - ## @param DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES - boolean - optional - ## The set of keys for which their values will be passed through SQL obfuscation - # obfuscate_sql_values: - # - val1 - # - # opensearch: - ## @param DD_APM_OBFUSCATION_OPENSEARCH_ENABLED - boolean - optional - ## Enables obfuscation rules for spans of type "opensearch". Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES - object - optional - ## List of keys that should not be obfuscated. - # keep_values: - # - client_id - ## @param DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES - boolean - optional - ## The set of keys for which their values will be passed through SQL obfuscation - # obfuscate_sql_values: - # - val1 - # - # http: - ## @param DD_APM_OBFUSCATION_HTTP_REMOVE_QUERY_STRING - boolean - optional - ## Enables obfuscation of query strings in URLs - # remove_query_string: false - ## @param DD_APM_OBFUSCATION_HTTP_REMOVE_PATHS_WITH_DIGITS - boolean - optional - ## If enabled, path segments in URLs containing digits are replaced by "?" - # remove_path_with_digits: false - # - # memcached: - ## @param DD_APM_OBFUSCATION_MEMCACHED_ENABLED - boolean - optional - ## Enables obfuscation rules for spans of type "memcached". Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND - boolean - optional - ## If enabled, the full command for the query will be kept, including any lookup - ## keys that could be present. The value for storage commands will still be - ## redacted if Memcached obfuscation is enabled. - # keep_command: false - # - # mongodb: - ## @param DD_APM_OBFUSCATION_MONGODB_ENABLED - boolean - optional - ## Enables obfuscation rules for spans of type "mongodb". Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_MONGODB_KEEP_VALUES - object - optional - ## List of keys that should not be obfuscated. - # keep_values: - # - document_id - ## @param DD_APM_OBFUSCATION_MONGODB_OBFUSCATE_SQL_VALUES - object - optional - ## The set of keys for which their values will be passed through SQL obfuscation - # obfuscate_sql_values: - # - val1 - # - # redis: - ## @param DD_APM_OBFUSCATION_REDIS_ENABLED - boolean - optional - ## Enables obfuscation rules for spans of type "redis". Enabled by default. - # enabled: true - ## @param DD_APM_OBFUSCATION_REDIS_REMOVE_ALL_ARGS - boolean - optional - ## When true, replaces all arguments of a redis command with a single "?". Disabled by default. - # remove_all_args: false - # - ## @param DD_APM_OBFUSCATION_REMOVE_STACK_TRACES - boolean - optional - ## Enables removing stack traces to replace them with "?". Disabled by default. - # remove_stack_traces: false - # - # sql_exec_plan: - ## @param DD_APM_SQL_EXEC_PLAN_ENABLED - boolean - optional - ## Enables obfuscation rules for JSON query execution plans. Disabled by default. - # enabled: false - ## @param DD_APM_SQL_EXEC_PLAN_KEEP_VALUES - object - optional - ## List of keys that should not be obfuscated. - # keep_values: - # - id1 - ## @param DD_APM_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES - boolean - optional - ## The set of keys for which their values will be passed through SQL obfuscation - # obfuscate_sql_values: - # - val1 - # - # sql_exec_plan_normalize: - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_ENABLED - boolean - optional - ## Enables obfuscation rules for JSON query execution plans, including cost and row estimates. - ## Produces a normalized execution plan. Disabled by default. - # enabled: false - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES - object - optional - ## List of keys that should not be obfuscated. - # keep_values: - # - id1 - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES - boolean - optional - ## The set of keys for which their values will be passed through SQL obfuscation - # obfuscate_sql_values: - # - val1 - - ## @param filter_tags - object - optional - ## @env DD_APM_FILTER_TAGS_REQUIRE - object - optional - ## @env DD_APM_FILTER_TAGS_REJECT - object - optional - ## Defines rules by which to filter traces based on tags. - ## * require - list of key or key/value strings - traces must have those tags in order to be sent to Datadog - ## * reject - list of key or key/value strings - traces with these tags are dropped by the Agent - ## Please note that: - ## 1. Rules take into account the intersection of tags defined. - ## 2. When `filter_tags` and `filter_tags_regex` are used at the same time, all rules are united for filtering. - ## In cases where rules in `filter_tags` and `filter_tags_regex` match the same key, the rule from `filter_tags` - ## takes precendence over the rule from `filter_tags_regex`. - ## - ## For example, in the case of the following configuration: - ## filter_tags: - ## require: ["foo:bar"] - ## filter_tags_regex: - ## require: ["foo:^bar[0-9]{1}$"] - ## With these rules, traces with a tag `foo:bar1` will be dropped, and those with a `foo:bar` tag will be kept - # - # filter_tags: - # require: [] - # reject: [] - - ## @param filter_tags_regex - object - optional - ## Defines rules by which to filter traces based on tags with regex pattern for tag values. - ## * require - list of key or key/value regex pattern strings - traces must have those tags in order to be sent to Datadog - ## * reject - list of key or key/value regex pattern strings - traces with these tags are dropped by the Agent - ## Note: Rules take into account the intersection of tags defined. - ## Using regexp patterns for tag filtering can have performance implications, and is slower than typical tag filtering - ## without regexp. However, this regexp is only run on the root span of a trace, so should not have a critical impact - ## on overall performance. - ## More detailed information can be found in the description of the `filter_tags` parameter above - # - # filter_tags_regex: - # require: [] # e.g. [":"] - # reject: [] # e.g. [":"] - - ## @param replace_tags - list of objects - optional - ## @env DD_APM_REPLACE_TAGS - list of objects - optional - ## Defines a set of rules to replace or remove certain resources, tags containing - ## potentially sensitive information. - ## Each rules has to contain: - ## * name - string - The tag name to replace, for resources use "resource.name". - ## * pattern - string - The pattern to match the desired content to replace - ## * repl - string - what to inline if the pattern is matched - ## - ## See https://docs.datadoghq.com/tracing/setup_overview/configure_data_security/#replace-rules-for-tag-filtering - ## - # - # replace_tags: - # - name: "" - # pattern: "" - # repl: "" - - ## @param ignore_resources - list of strings - optional - ## @env DD_APM_IGNORE_RESOURCES - comma separated list of strings - optional - ## An exclusion list of regular expressions can be provided to disable certain traces based on their resource name - ## all entries must be surrounded by double quotes and separated by commas. - # - # ignore_resources: ["(GET|POST) /healthcheck"] - - ## @param log_file - string - optional - ## @env DD_APM_LOG_FILE - string - optional - ## The full path to the file where APM-agent logs are written. - # - # log_file: - - ## @param connection_limit - integer - default: 2000 - ## @env DD_APM_CONNECTION_LIMIT - integer - default: 2000 - ## The APM connection limit for the Agent. - ## See https://docs.datadoghq.com/tracing/troubleshooting/agent_rate_limits/#max-connection-limit - # - # connection_limit: 2000 - - ## @param compute_stats_by_span_kind - bool - default: false - ## @env DD_APM_COMPUTE_STATS_BY_SPAN_KIND - bool - default: false - ## [BETA] Enables an additional stats computation check on spans to see they have an eligible `span.kind` (server, consumer, client, producer). - ## If enabled, a span with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. - ## NOTE: For stats computed from OTel traces, only top-level spans are considered when this option is off. - ## If you are sending OTel traces and want stats on non-top-level spans, this flag will need to be enabled. - ## If you are sending OTel traces and do not want stats computed by span kind, you need to disable this flag and remove the "enable_otlp_compute_top_level_by_span_kind" APM feature if present. - # compute_stats_by_span_kind: false - - ## @param peer_service_aggregation - bool - default: false - ## @env DD_APM_PEER_SERVICE_AGGREGATION - bool - default: false - ## DEPRECATED - please use `peer_tags_aggregation` instead. - # peer_service_aggregation: false - - ## @param peer_tags_aggregation - bool - default: false - ## @env DD_APM_PEER_TAGS_AGGREGATION - bool - default: false - ## [BETA] Enables aggregation of peer related tags (e.g., `peer.service`, `db.instance`, etc.) in the Agent. - ## If disabled, aggregated trace stats will not include these tags as dimensions on trace metrics. - ## For the best experience with peer tags, Datadog also recommends enabling `compute_stats_by_span_kind`. - ## If you are using an OTel tracer, it's best to have both enabled because client/producer spans with relevant peer tags - ## may not be marked by the Agent as top-level spans. - ## If enabling both causes the Agent to consume too many resources, try disabling `compute_stats_by_span_kind` first. - ## A high cardinality of peer tags or APM resources can also contribute to higher CPU and memory consumption. - ## You can check for the cardinality of these fields by making trace search queries in the Datadog UI. - ## The default list of peer tags can be found in pkg/trace/stats/concentrator.go. - # peer_tags_aggregation: false - - ## @param peer_tags - list of strings - optional - ## @env DD_APM_PEER_TAGS - list of strings - optional - ## [BETA] Optional list of supplementary peer tags that go beyond the defaults. The Datadog backend validates all tags - ## and will drop ones that are unapproved. - # peer_tags: [] - - ## @param features - list of strings - optional - ## @env DD_APM_FEATURES - comma separated list of strings - optional - ## Configure additional beta APM features. - ## The list of items available under apm_config.features is not guaranteed to persist across versions; - ## a feature may eventually be promoted to its own configuration option on the agent, or dropped entirely. - # - # features: ["error_rare_sample_tracer_drop","table_names","component2name","sql_cache","sqllexer","enable_otlp_compute_top_level_by_span_kind","enable_receive_resource_spans_v2"] - - ## @param additional_endpoints - object - optional - ## @env DD_APM_ADDITIONAL_ENDPOINTS - object - optional - ## Enables sending data to multiple endpoints and/or with multiple API keys via dual shipping. - ## See https://docs.datadoghq.com/agent/guide/dual-shipping - # - # additional_endpoints: - # "https://trace.agent.datadoghq.com": - # - apikey2 - # - apikey3 - # "https://trace.agent.datadoghq.eu": - # - apikey4 - - ## @param debug - custom object - optional - ## Specifies settings for the debug server of the trace agent. - # - # debug: - - ## @param port - integer - optional - default: 5012 - ## @env DD_APM_DEBUG_PORT - string - optional - default: 5012 - ## Port for the debug endpoints for the trace Agent. Set it to 0 to disable the server. - # - # port: 5012 - - ## @param instrumentation_enabled - boolean - default: false - ## @env DD_APM_INSTRUMENTATION_ENABLED - boolean - default: false - ## Enables Single Step Instrumentation in the cluster (in beta) - # - # instrumentation_enabled: false - - ## @param instrumentation_enabled_namespaces - list of strings - optional - ## @env DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES - space separated list of strings - optional - ## Enables Single Step Instrumentation in specific namespaces, while Single Step Instrumentation is off in the whole cluster (in beta) - ## Can only be set if DD_APM_INSTRUMENTATION_ENABLED is false. Cannot be set together with DD_APM_INSTRUMENTATION_DISABLED_NAMESPACES. - # - # instrumentation_enabled_namespaces: - # - ns1 - # - apps - - ## @param instrumentation_disabled_namespaces - list of strings - optional - ## @env DD_APM_INSTRUMENTATION_DISABLED_NAMESPACES - space separated list of strings - optional - ## Disables Single Step Instrumentation in specific namespaces, while Single Step Instrumentation is enabled in the whole cluster (in beta) - ## Can only be set if DD_APM_INSTRUMENTATION_ENABLED is true. Cannot be set together with DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES. - # - # instrumentation_disabled_namespaces: - # - ns2 - # - system-ns - - ## @param trace_buffer - integer - optional - default: 0 - ## @env DD_APM_TRACE_BUFFER - integer - optional - default: 0 - ## - ## WARNING: Do not use this config. It is here for debugging and - ## as a temporary fix in certain load scenarios. Setting this - ## results in a performance deterioration and an increase in memory - ## usage when the Trace Agent is under load. This config may be - ## removed in a future version. - ## - ## Specifies the number of trace payloads to buffer after decoding. - ## Traces can be buffered when receiving traces faster than the - ## processor can process them. - ## - # - # trace_buffer: 0 - - ## @param probabilistic_sampler - object - optional - ## Enables and configures the Probabilistic Sampler, compatible with the - ## OTel Probabilistic Sampler Processor ( https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/probabilisticsamplerprocessor#probabilistic-sampling-processor ) - ## - #probabilistic_sampler: - ## @env DD_APM_PROBABILISTIC_SAMPLER_ENABLED - boolean - optional - default: false - ## Enables or disables the probabilistic sampler - # enabled: false - # - ## @env DD_APM_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE - float - optional - default: 0 - ## Samples this percentage (0-100) of traffic - # sampling_percentage: 0 - # - ## @env DD_APM_PROBABILISTIC_SAMPLER_HASH_SEED - integer - optional - default: 0 - ## hash_seed: A seed used for the hash algorithm. This must match other agents and OTel - ## collectors using the probabilistic sampler to ensure consistent sampling. - # hash_seed: 0 - - - {{- if .InternalProfiling -}} - ## @param profiling - custom object - optional - ## Enter specific configurations for internal profiling. - ## - ## Please note that: - ## 1. This does *not* enable profiling for user applications. - ## 2. This only enables internal profiling of the agent go runtime. - ## 3. To enable profiling for user apps please refer to - ## https://docs.datadoghq.com/tracing/profiling/ - ## 4. Enabling this feature will incur in billing charges and other - ## unexpected side-effects (ie. agent profiles showing with your - ## services). - ## - ## Uncomment this parameter and the one below to enable profiling. - # - # internal_profiling: - # - ## @param enabled - boolean - optional - default: false - ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false - ## Enable internal profiling for the trace-agent process. - # - # enabled: false - - {{ end -}} -{{ end -}} -{{- if .ProcessAgent }} - -###################################### -## Process Collection Configuration ## -###################################### - -## @param process_config - custom object - optional -## Enter specific configurations for your Process data collection. -## Uncomment this parameter and the one below to enable them. -## See https://docs.datadoghq.com/graphing/infrastructure/process/ -# -# process_config: - - {{- if (eq .OS "linux")}} - ## @param run_in_core_agent - custom object - optional - ## Controls whether the process Agent or core Agent collects process and/or container information (Linux only). - # run_in_core_agent: - ## @param enabled - boolean - optional - default: false - ## Enables process/container collection on the core Agent instead of the process Agent. - # enabled: false - {{ end }} - - ## @param process_collection - custom object - optional - ## Specifies settings for collecting processes. - # process_collection: - ## @param enabled - boolean - optional - default: false - ## Enables collection of information about running processes. - # enabled: false - - ## @param container_collection - custom object - optional - ## Specifies settings for collecting containers. - # container_collection: - ## @param enabled - boolean - optional - default: true - ## Enables collection of information about running containers. - # enabled: true - - ## Deprecated - use `process_collection.enabled` and `container_collection.enabled` instead - ## @param enabled - string - optional - default: "false" - ## @env DD_PROCESS_CONFIG_ENABLED - string - optional - default: "false" - ## A string indicating the enabled state of the Process Agent: - ## * "false" : The Agent collects only containers information. - ## * "true" : The Agent collects containers and processes information. - ## * "disabled" : The Agent process collection is disabled. - # - # enabled: "true" - - ## @param expvar_port - string - optional - default: 6062 - ## @env DD_PROCESS_CONFIG_EXPVAR_PORT - string - optional - default: 6062 - ## Port for the debug endpoints for the process Agent. - # - # expvar_port: 6062 - - ## @param cmd_port - string - optional - default: 6162 - ## Port for configuring runtime settings for the process Agent. - # - # cmd_port: 6162 - - ## @param log_file - string - optional - ## @env DD_PROCESS_CONFIG_LOG_FILE - string - optional - ## The full path to the file where process Agent logs are written. - # - # log_file: - - ## @param intervals - custom object - optional - default: 10s for normal checks and 2s for others. - ## @env DD_PROCESS_CONFIG_INTERVALS_CONTAINER - integer - optional - default: 10 - ## @env DD_PROCESS_CONFIG_INTERVALS_CONTAINER_REALTIME - integer - optional - default: 2 - ## @env DD_PROCESS_CONFIG_INTERVALS_PROCESS - integer - optional - default: 10 - ## @env DD_PROCESS_CONFIG_INTERVALS_PROCESS_REALTIME - integer - optional - default: 2 - ## The interval, in seconds, at which the Agent runs each check. If you want consistent - ## behavior between real-time, set the `container_realtime` and `process_realtime` intervals to 10. - # - # intervals: - # container: 10 - # container_realtime: 2 - # process: 10 - # process_realtime: 2 - - ## @param process_discovery - custom object - optional - ## Specifies custom settings for the `process_discovery` object. - # process_discovery: - ## @param enabled - boolean - optional - default: true - ## Toggles the `process_discovery` check. If enabled, this check gathers information about running integrations. - # enabled: true - - ## @param interval - duration - optional - default: 4h - minimum: 10m - ## An interval in hours that specifies how often the process discovery check should run. - # interval: 4h - - - ## @param blacklist_patterns - list of strings - optional - ## @env DD_PROCESS_CONFIG_BLACKLIST_PATTERNS - space separated list of strings - optional - ## A list of regex patterns that exclude processes if matched. - # - # blacklist_patterns: - # - - - ## @param queue_size - integer - optional - default: 256 - ## @env DD_PROCESS_CONFIG_QUEUE_SIZE - integer - optional - default: 256 - ## The number of check results to buffer in memory when a POST fails. - # - # queue_size: 256 - - ## @param process_queue_bytes - integer - optional - default: 60000000 - ## @env DD_PROCESS_CONFIG_PROCESS_QUEUE_BYTES - integer - optional - default: 60000000 - ## The amount of data (in bytes) to buffer in memory when a POST fails. - # - # process_queue_bytes: 60000000 - - ## @param rt_queue_size - integer - optional - default: 5 - ## @env DD_PROCESS_CONFIG_RT_QUEUE_SIZE - integer - optional - default: 5 - ## The number of realtime check results to buffer in memory when a POST fails. - # - # rt_queue_size: 5 - - ## @param max_per_message - integer - optional - default: 100 - ## @env DD_PROCESS_CONFIG_MAX_PER_MESSAGE - integer - optional - default: 100 - ## The maximum number of processes or containers per message. - # - # max_per_message: 100 - - ## @param dd_agent_bin - string - optional - ## @env DD_PROCESS_CONFIG_DD_AGENT_BIN - string - optional - ## Overrides the path to the Agent bin used for getting the hostname. Defaults are: - ## * Windows: \embedded\\agent.exe - ## * Unix: /opt/datadog-agent/bin/agent/agent - # - # dd_agent_bin: - - ## @param dd_agent_env - string - optional - default: "" - ## @env DD_PROCESS_CONFIG_DD_AGENT_ENV - string - optional - default: "" - ## Overrides of the environment we pass to fetch the hostname. - # - # dd_agent_env: "" - - ## @param scrub_args - boolean - optional - default: true - ## @env DD_PROCESS_CONFIG_SCRUB_ARGS - boolean - optional - default: true - ## Hide sensitive data on the Live Processes page. - # - # scrub_args: true - - ## @param custom_sensitive_words - list of strings - optional - ## @env DD_PROCESS_CONFIG_CUSTOM_SENSITIVE_WORDS - space separated list of strings - optional - ## Define your own list of sensitive data to be merged with the default one. - ## Read more on Datadog documentation: - ## https://docs.datadoghq.com/graphing/infrastructure/process/#process-arguments-scrubbing - # - # custom_sensitive_words: - # - 'personal_key' - # - '*token' - # - 'sql*' - # - '*pass*d*' - - ## @param disable_realtime_checks - boolean - optional - default: false - ## @env DD_PROCESS_CONFIG_DISABLE_REALTIME - boolean - optional - default: false - ## Disable realtime process and container checks - # - # disable_realtime_checks: false - -{{- if .InternalProfiling -}} - ## @param profiling - custom object - optional - ## Enter specific configurations for internal profiling. - ## - ## Please note that: - ## 1. This does *not* enable profiling for user applications. - ## 2. This only enables internal profiling of the agent go runtime. - ## 3. To enable profiling for user apps please refer to - ## https://docs.datadoghq.com/tracing/profiling/ - ## 4. Enabling this feature will incur in billing charges and other - ## unexpected side-effects (ie. agent profiles showing with your - ## services). - ## - ## Uncomment this parameter and the one below to enable profiling. - # - # internal_profiling: - # - ## @param enabled - boolean - optional - default: false - ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false - ## Enable internal profiling for the Process Agent process. - # - # enabled: false - -{{ end }} - -{{- if .NetworkPath }} -{{- if (ne .OS "darwin")}} - -######################################## -## Network Path Configuration ## -######################################## - -# network_path: - ## @param connections_monitoring - custom object - optional - ## Specific configurations for monitoring network connections via Network Path. - # - # connections_monitoring: - - ## @param enabled - bool - optional - default: false - ## @env DD_NETWORK_PATH_CONNECTIONS_MONITORING_ENABLED - bool - optional - default: false - ## [Beta] Enables monitoring network connections via Network Path. - # - # enabled: false - - ## @param collector - custom object - optional - ## Configuration related to Network Path Collector. - # - # collector: - - ## @param workers - integer - optional - default: 4 - ## @env DD_WORKERS - integer - optional - default: 4 - ## The `workers` refers to the number of concurrent workers available for network path execution. - # - # workers: 4 - -{{ end -}} -{{ end -}} -{{ end -}} -{{- if .Compliance }} -############################################# -## Security Agent Compliance Configuration ## -############################################# - -## @param compliance_config - custom object - optional -## Enter specific configuration for continuous compliance checks. -# compliance_config: - - ## @param enabled - boolean - optional - default: false - ## @env DD_COMPLIANCE_CONFIG_ENABLED - boolean - optional - default: false - ## Set to true to enable Cloud Security Posture Management (CSPM). - # - # enabled: false - - ## @param dir - string - optional - default: /etc/datadog-agent/compliance.d - ## @env DD_COMPLIANCE_CONFIG_DIR - string - optional - default: /etc/datadog-agent/compliance.d - ## Directory path for compliance checks configuration containing enabled benchmarks - # - # dir: /etc/datadog-agent/compliance.d - - ## @param check_interval - duration - optional - default: 20m - ## @env DD_COMPLIANCE_CONFIG_CHECK_INTERVAL - duration - optional - default: 20m - ## Check interval (see https://golang.org/pkg/time/#ParseDuration for available options) - # check_interval: 20m - - ## @param check_max_events_per_run - integer - optional - default: 100 - ## @env DD_COMPLIANCE_CONFIG_CHECK_MAX_EVENTS_PER_RUN - integer - optional - default: 100 - ## - # check_max_events_per_run: 100 -{{ end -}} - -{{- if .SBOM }} -## @param sbom - custom object - optional -## Enter specific configuration for the Cloud Security Management Vulnerability Management feature -# sbom: - ## @param enabled - boolean - optional - default: false - ## set to true to enable Cloud Security Management Vulnerability Management - # enabled: false - - ## uncomment the sections below to enable where the vulnerability scanning is done - - ## @param enabled - boolean - optional - default: false - ## set to true to enable Infrastructure Vulnerabiltilies - # host: - # enabled: false -{{- if (eq .OS "linux")}} - - - # container_image: - # enabled: false -{{ end -}} -{{ end -}} -{{- if .SystemProbe }} - -################################## -## System Probe Configuration ## -################################## - -## @param system_probe_config - custom object - optional -## Enter specific configurations for your System Probe data collection. -## Uncomment this parameter and the one below to enable them. -# -# system_probe_config: -{{- if (eq .OS "windows")}} - ## @param sysprobe_socket - string - optional - default: localhost:3333 - ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: localhost:3333 - ## The TCP address where system probes are accessed. - # - # sysprobe_socket: localhost:3333 -{{else}} - ## @param sysprobe_socket - string - optional - default: /opt/datadog-agent/run/sysprobe.sock - ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: /opt/datadog-agent/run/sysprobe.sock - ## The full path to the location of the unix socket where system probes are accessed. - # - # sysprobe_socket: /opt/datadog-agent/run/sysprobe.sock -{{ end }} - ## @param log_file - string - optional - default: /var/log/datadog/system-probe.log - ## @env DD_SYSTEM_PROBE_CONFIG_LOG_FILE - string - optional - default: /var/log/datadog/system-probe.log - ## The full path to the file where system-probe logs are written. - # - # log_file: /var/log/datadog/system-probe.log - - ## @param langauge_detection - custom object - optional - ## Enter specific configurations for language detection - ## Uncomment this parameter and the one below to enable them. - # - # language_detection: - - ## @param enabled - bool - optional - default: false - ## @env DD_SYSTEM_PROBE_CONFIG_LANGUAGE_DETECTION_ENABLED - bool - optional - default: false - ## [Beta] Enables language detection via binary analysis in the system probe. - # - # enabled: false - - ## @param health_port - integer - optional - default: 0 - ## @env DD_SYSTEM_PROBE_HEALTH_PORT - integer - optional - default: 0 - ## The Agent can expose its health check on a dedicated HTTP port. - ## This is useful for orchestrators that support HTTP probes. - ## Default is 0 (disabled). Set a valid port number (example: 5558) to enable. - # - # health_port: 0 - -{{- if .InternalProfiling -}} - ## @param profiling - custom object - optional - ## Enter specific configurations for internal profiling. - ## - ## Please note that: - ## 1. This does *not* enable profiling for user applications. - ## 2. This only enables internal profiling of the agent go runtime. - ## 3. To enable profiling for user apps please refer to - ## https://docs.datadoghq.com/tracing/profiling/ - ## 4. Enabling this feature will incur in billing charges and other - ## unexpected side-effects (ie. agent profiles showing with your - ## services). - ## - ## Uncomment this parameter and the one below to enable profiling. - # - # internal_profiling: - # - ## @param enabled - boolean - optional - default: false - ## @env DD_INTERNAL_PROFILING_ENABLED - boolean - optional - default: false - ## Enable internal profiling for the System Probe process. - # - # enabled: false - - ## @param memory_controller - custom object - optional - ## Cgroup memory controller for internal memory profiling. - ## - ## memory_controller: - # - ## @param enabled - boolean - optional - default: false - ## Enable cgroup memory controller. - # - # enabled: false - # - ## @param thresholds - map of strings - optional - ## Thresholds and the respective active actions to trigger when - ## memory usage is above the specified threshold. - ## Threshold can be either an absolute value - such as 500MB or 2GB - - ## or a percentage of the cgroup allocated memory such as 50%. - ## The action can be: - ## - gc: to trigger the Go garbage collector - ## - profile: to generate a system-probe memory profile in /tmp - ## - log: to simply log that the threshold was reached - # - # thresholds: - # 500MB: gc - # 50%: profile - - ## @param pressure_levels - map of strings - optional - ## Pressure levels and the respective active actions to trigger when - ## memory usage reaches the specified level. - ## The pressure level is 'low', 'medium' or 'critical'. - ## The actions are the same for thresholds (see above). - # - # pressure_levels: - # medium: gc -{{ end }} - -{{- if .NetworkModule }} - -######################################## -## System Probe Network Configuration ## -######################################## - -# network_config: -{{- if (eq .OS "windows")}} - ## Please note that enabling the Network Module of the System - ## Probe will result in a kernel driver being loaded. -{{ end }} - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the Network Module of the System Probe - # - # enabled: false - -{{ end -}} - -{{- if .UniversalServiceMonitoringModule }} - -############################################################# -## System Probe Universal Service monitoring Configuration ## -############################################################# - -# service_monitoring_config: -{{- if (eq .OS "windows")}} - ## Please note that enabling the Universal Service Monitoring - ## Module of the System Probe will result in a kernel driver - ## being loaded. -{{ end }} - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the Universal Service Monitoring Module of the System Probe - # - # enabled: false - -{{ end -}} - -{{- if .PingModule }} - -##################################### -## System Probe Ping Configuration ## -##################################### - -# ping: - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the Ping Module of the System Probe - # - # enabled: false - -{{ end -}} - - -{{- if .TracerouteModule }} - -########################################### -## System Probe Traceroute Configuration ## -########################################### - -# traceroute: - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the Traceroute Module of the System Probe - # - # enabled: false - -{{ end -}} - - -{{- if .SecurityModule }} - -########################################## -## Security Agent Runtime Configuration ## -## ## -## Settings to sent logs to Datadog are ## -## fetched from section `logs_config` ## -########################################## - -# runtime_security_config: - ## @param enabled - boolean - optional - default: false - ## @env DD_RUNTIME_SECURITY_CONFIG_ENABLED - boolean - optional - default: false - ## Set to true to enable Cloud Workload Security (CWS). - # - # enabled: false - - ## @param fim_enabled - boolean - optional - default: false - ## Set to true to enable the File Integrity Monitoring (FIM) feature of Cloud Workload Security (CWS). - # - # fim_enabled: false - -{{- if (eq .OS "windows")}} - ## @param sysprobe_socket - string - optional - default: localhost:3334 - ## @env DD_SYSTEM_PROBE_CONFIG_SYSPROBE_SOCKET - string - optional - default: localhost:3334 - ## The TCP address where the security runtime module is accessed. - # - # socket: localhost:3334 -{{else}} - ## @param socket - string - optional - default: /opt/datadog-agent/run/runtime-security.sock - ## @env DD_RUNTIME_SECURITY_CONFIG_SOCKET - string - optional - default: /opt/datadog-agent/run/runtime-security.sock - ## The full path to the location of the unix socket where security runtime module is accessed. - # - # socket: /opt/datadog-agent/run/runtime-security.sock -{{ end }} - ## @param policies - custom object - optional - ## Policy files - # policies: -{{- if (eq .OS "windows")}} - ## @param dir - string - default: %ProgramData%\Datadog\runtime-security.d - ## @env DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR - string - default: /etc/datadog-agent/runtime-security.d - ## Path from where the policy files are loaded - # - # dir: c:\ProgramData\Datadog\runtime-security.d -{{else}} - ## @param dir - string - default: /etc/datadog-agent/runtime-security.d - ## @env DD_RUNTIME_SECURITY_CONFIG_POLICIES_DIR - string - default: /etc/datadog-agent/runtime-security.d - ## Path from where the policy files will be loaded - # - # dir: /etc/datadog-agent/runtime-security.d -{{ end }} - ## @param syscall_monitor - custom object - optional - ## Syscall monitoring - # - # syscall_monitor: - - ## @param enabled - boolean - optional - default: false - ## @env DD_RUNTIME_SECURITY_CONFIG_SYSCALL_MONITOR_ENABLED - boolean - optional - default: false - ## Set to true to enable the Syscall monitoring (recommended for troubleshooting only). - # - # enabled: false - - ## @param custom_sensitive_words - list of strings - optional - ## @env DD_RUNTIME_SECURITY_CONFIG_CUSTOM_SENSITIVE_WORDS - space separated list of strings - optional - ## Define your own list of sensitive data to be merged with the default one. - ## Read more on Datadog documentation: - ## https://docs.datadoghq.com/graphing/infrastructure/process/#process-arguments-scrubbing - # - # custom_sensitive_words: - # - 'personal_key' - # - '*token' - # - 'sql*' - # - '*pass*d*' - - ## @param envs_with_value - list of strings - optional - ## @env DD_RUNTIME_SECURITY_CONFIG_ENVS_WITH_VALUE - space separated list of strings - optional - ## Define your own list of non-sensitive environment variable names whose value will not be - ## concealed by the runtime security module. - ## Default: LD_PRELOAD, LD_LIBRARY_PATH, PATH, HISTSIZE, HISTFILESIZE, GLIBC_TUNABLES - # - # envs_with_value: - # - LD_PRELOAD - # - LD_LIBRARY_PATH - # - PATH - # - HISTSIZE - # - HISTFILESIZE - - ## @param activity_dump - custom object - optional - ## Activity dump section configures if/how the Agent sends activity dumps to Datadog - # - # activity_dump: - - ## @param enabled - boolean - optional - default: false - ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_ENABLED - boolean - optional - default: false - ## Set to true to activate the security profiles feature. - # - # enabled: false - - ## @param traced_cgroups_count - integer - optional - default: 5 - ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_TRACED_CGROUPS_COUNT - integer - optional - default: 5 - ## Defines the number of concurrent cgroups to be traced. - # - # traced_cgroups_count: 5 - - ## @param dump_duration - duration - optional - default: 30m - ## @env DD_RUNTIME_SECURITY_CONFIG_ACTIVITY_DUMP_DUMP_DURATION - duration - optional - default: 30m - ## Defines the duration of cgroups learning phase. Minimum value is 10m. - # - # dump_duration: 30m - - ## @param network - custom object - optional - ## Network section is used to configure Cloud Workload Security (CWS) network features. - # - # network: - - ## @param enabled - boolean - optional - default: true - ## @env DD_RUNTIME_SECURITY_CONFIG_NETWORK_ENABLED - boolean - optional - default: true - ## Set to true to activate the CWS network detections. - # - # enabled: true - -{{- if (eq .OS "windows")}} - -##################################################### -## Datadog Agent Windows Crash Detection module -##################################################### - -# windows_crash_detection: - ## @param enabled - boolean - optional - default: false - ## Enables the system probe module which supports the Windows crash detection check. - # - # enabled: false -{{ end }} - -{{ end -}} -{{ end -}} - -{{- if .SecurityAgent -}} -#################################### -## Runtime Security configuration ## -#################################### - -# runtime_security_config: - ## @param enabled - boolean - optional - default: false - ## Set to true to enable Cloud Workload Security (CWS). - # - # enabled: false - -{{- if (eq .OS "windows")}} - ## @param socket - string - optional - default: localhost:3334 - ## The local address and port where the security runtime module is accessed - # - # socket: localhost:3334 -{{else}} - ## @param socket - string - optional - default: /opt/datadog-agent/run/runtime-security.sock - ## The full path to the location of the unix socket where security runtime module is accessed. - # - # socket: /opt/datadog-agent/run/runtime-security.sock -{{ end }} - -########################################## -## Compliance monitoring configuration ## -########################################## - -# compliance_config: - ## @param enabled - boolean - optional - default: false - ## Set to true to enable Cloud Security Posture Management (CSPM). - # - # enabled: false -{{ end -}} -{{- if .Dogstatsd }} - -############################# -## DogStatsD Configuration ## -############################# - -## @param use_dogstatsd - boolean - optional - default: true -## @env DD_USE_DOGSTATSD - boolean - optional - default: true -## Set this option to false to disable the Agent DogStatsD server. -# -# use_dogstatsd: true - -## @param dogstatsd_port - integer - optional - default: 8125 -## @env DD_DOGSTATSD_PORT - integer - optional - default: 8125 -## Override the Agent DogStatsD port. -## Note: Make sure your client is sending to the same UDP port. -# -# dogstatsd_port: 8125 - -## @param bind_host - string - optional - default: localhost -## @env DD_BIND_HOST - string - optional - default: localhost -## The host to listen on for Dogstatsd and traces. This is ignored by APM when -## `apm_config.apm_non_local_traffic` is enabled and ignored by DogStatsD when `dogstatsd_non_local_traffic` -## is enabled. The trace-agent uses this host to send metrics to. -## The `localhost` default value is invalid in IPv6 environments where dogstatsd listens on "::1". -## To solve this problem, ensure Dogstatsd is listening on IPv4 by setting this value to "127.0.0.1". -# -# bind_host: localhost - -{{- if (eq .OS "windows")}} -## Please note that UDS receiver is not available in Windows. -#@ Enabling this setting may result in unexpected behavior. -## @param dogstatsd_socket - string - optional - default: "" -## @env DD_DOGSTATSD_SOCKET - string - optional - default: "" -## Listen for Dogstatsd metrics on a Unix Socket (*nix only). -## Set to "" to disable this feature. -# -# dogstatsd_socket: "" -{{else}} -## @param dogstatsd_socket - string - optional - default: "/var/run/datadog/dsd.socket" -## @env DD_DOGSTATSD_SOCKET - string - optional - default: "/var/run/datadog/dsd.socket" -## Listen for Dogstatsd metrics on a Unix Socket (*nix only). -## Set to "" to disable this feature. -# -# dogstatsd_socket: "/var/run/datadog/dsd.socket" -{{ end }} - -## @param dogstatsd_origin_detection - boolean - optional - default: false -## @env DD_DOGSTATSD_ORIGIN_DETECTION - boolean - optional - default: false -## When using Unix Socket, DogStatsD can tag metrics with container metadata. -## If running DogStatsD in a container, host PID mode (e.g. with --pid=host) is required. -# -# dogstatsd_origin_detection: false - -## @param dogstatsd_origin_detection_client - boolean - optional - default: false -## @env DD_DOGSTATSD_ORIGIN_DETECTION_CLIENT - boolean - optional - default: false -## Whether the Agent should use a client-provided container ID to enrich the metrics, events and service checks with container tags. -## Note: This requires using a client compatible with DogStatsD protocol version 1.2. -# -# dogstatsd_origin_detection_client: false - -## @param dogstatsd_buffer_size - integer - optional - default: 8192 -## @env DD_DOGSTATSD_BUFFER_SIZE - integer - optional - default: 8192 -## The buffer size use to receive statsd packets, in bytes. -# -# dogstatsd_buffer_size: 8192 - -## @param dogstatsd_non_local_traffic - boolean - optional - default: false -## @env DD_DOGSTATSD_NON_LOCAL_TRAFFIC - boolean - optional - default: false -## Set to true to make DogStatsD listen to non local UDP traffic. -# -# dogstatsd_non_local_traffic: false - -## @param dogstatsd_stats_enable - boolean - optional - default: false -## @env DD_DOGSTATSD_STATS_ENABLE - boolean - optional - default: false -## Publish DogStatsD's internal stats as Go expvars. -# -# dogstatsd_stats_enable: false - -## @param dogstatsd_logging_enabled - boolean - optional - default: true -## Set to true to write DogstatsD metrics received by the Agent to dogstats_stats log files. -## Requires `dogstatsd_stats_enable: true`. -# -# dogstatsd_logging_enabled: true - -## @param dogstatsd_log_file_max_size - custom - optional - default: 10MB -## Maximum size of dogstatsd log file. Use either a size (for example, 10MB) or -## provide value in bytes (for example, 10485760.) -# -# dogstatsd_log_file_max_size: 10MB - -## @param dogstatsd_queue_size - integer - optional - default: 1024 -## @env DD_DOGSTATSD_QUEUE_SIZE - integer - optional - default: 1024 -## Configure the internal queue size of the Dogstatsd server. -## Reducing the size of this queue will reduce the maximum memory usage of the -## Dogstatsd server but as a trade-off, it could increase the number of packet drops. -# -# dogstatsd_queue_size: 1024 - -## @param dogstatsd_stats_buffer - integer - optional - default: 10 -## @env DD_DOGSTATSD_STATS_BUFFER - integer - optional - default: 10 -## Set how many items should be in the DogStatsD's stats circular buffer. -# -# dogstatsd_stats_buffer: 10 - -## @param dogstatsd_stats_port - integer - optional - default: 5000 -## @env DD_DOGSTATSD_STATS_PORT - integer - optional - default: 5000 -## The port for the go_expvar server. -# -# dogstatsd_stats_port: 5000 - -## @param dogstatsd_so_rcvbuf - integer - optional - default: 0 -## @env DD_DOGSTATSD_SO_RCVBUF - integer - optional - default: 0 -## The number of bytes allocated to DogStatsD's socket receive buffer (POSIX system only). -## By default, the system sets this value. If you need to increase the size of this buffer -## but keep the OS default value the same, you can set DogStatsD's receive buffer size here. -## The maximum accepted value might change depending on the OS. -# -# dogstatsd_so_rcvbuf: 0 - -## @param dogstatsd_metrics_stats_enable - boolean - optional - default: false -## @env DD_DOGSTATSD_METRICS_STATS_ENABLE - boolean - optional - default: false -## Set this parameter to true to have DogStatsD collects basic statistics (count/last seen) -## about the metrics it processed. Use the Agent command "dogstatsd-stats" to visualize -## those statistics. -# -# dogstatsd_metrics_stats_enable: false - -## @param dogstatsd_tags - list of key:value elements - optional -## @env DD_DOGSTATSD_TAGS - list of key:value elements - optional -## Additional tags to append to all metrics, events and service checks received by -## this DogStatsD server. -# -# dogstatsd_tags: -# - : -# -## @param dogstatsd_mapper_profiles - list of custom object - optional -## @env DD_DOGSTATSD_MAPPER_PROFILES - list of custom object - optional -## The profiles will be used to convert parts of metrics names into tags. -## If a profile prefix is matched, other profiles won't be tried even if that profile matching rules doesn't match. -## The profiles and matching rules are processed in the order defined in this configuration. -## -## For each profile, following fields are available: -## name (required): profile name -## prefix (required): mapping only applies to metrics with the prefix. If set to `*`, it will match everything. -## mappings: mapping rules, see below. -## For each mapping, following fields are available: -## match (required): pattern for matching the incoming metric name e.g. `test.job.duration.*` -## match_type (optional): pattern type can be `wildcard` (default) or `regex` e.g. `test\.job\.(\w+)\.(.*)` -## name (required): the metric name the metric should be mapped to e.g. `test.job.duration` -## tags (optional): list of key:value pair of tag key and tag value -## The value can use $1, $2, etc, that will be replaced by the corresponding element capture by `match` pattern -## This alternative syntax can also be used: ${1}, ${2}, etc -# -# dogstatsd_mapper_profiles: -# - name: # e.g. "airflow", "consul", "some_database" -# prefix: # e.g. "airflow.", "consul.", "some_database." -# mappings: -# - match: # e.g. `test.job.duration.*` to match `test.job.duration.my_job_name` -# match_type: # e.g. `wildcard` or `regex` -# name: # e.g. `test.job.duration` -# tags: -# : # e.g. `job_name: "$1"`, $1 is replaced by value capture by * -# - match: 'test.worker.*.*.start_time' # to match `test.worker...start_time` -# name: 'test.worker.start_time' -# tags: -# worker_type: '$1' -# worker_name: '$2' -# - match: 'test\.task\.duration\.(\w+)\.(.*)' # no need to escape in yaml context using single quote -# match_type: regex -# name: 'test.task' -# tags: -# task_type: '$1' -# task_name: '$2' - -## @param dogstatsd_mapper_cache_size - integer - optional - default: 1000 -## @env DD_DOGSTATSD_MAPPER_CACHE_SIZE - integer - optional - default: 1000 -## Size of the cache (max number of mapping results) used by Dogstatsd mapping feature. -# -# dogstatsd_mapper_cache_size: 1000 - -## @param dogstatsd_entity_id_precedence - boolean - optional - default: false -## @env DD_DOGSTATSD_ENTITY_ID_PRECEDENCE - boolean - optional - default: false -## Disable enriching Dogstatsd metrics with tags from "origin detection" when Entity-ID is set. -# -# dogstatsd_entity_id_precedence: false - - -## @param dogstatsd_no_aggregation_pipeline - boolean - optional - default: true -## @env DD_DOGSTATSD_NO_AGGREGATION_PIPELINE - boolean - optional - default: true -## Enable the no-aggregation pipeline in DogStatsD: a pipeline receiving metrics -## with timestamp and forwarding them to the intake without extra processing except -## for tagging. -# -# dogstatsd_no_aggregation_pipeline: true - -## @param dogstatsd_no_aggregation_pipeline_batch_size - integer - optional - default: 2048 -## @env DD_DOGSTATSD_NO_AGGREGATION_PIPELINE_BATCH_SIZE - integer - optional - default: 2048 -## How many metrics maximum in payloads sent by the no-aggregation pipeline to the intake. -# -# dogstatsd_no_aggregation_pipeline_batch_size: 2048 - -## @param statsd_forward_host - string - optional - default: "" -## @env DD_STATSD_FORWARD_HOST - string - optional - default: "" -## Forward every packet received by the DogStatsD server to another statsd server. -## WARNING: Make sure that forwarded packets are regular statsd packets and not "DogStatsD" packets, -## as your other statsd server might not be able to handle them. -# -# statsd_forward_host: "" - -## @param statsd_forward_port - integer - optional - default: 0 -## @env DD_STATSD_FORWARD_PORT - integer - optional - default: 0 -## Port or the "statsd_forward_host" to forward StatsD packet to. -# -# statsd_forward_port: 0 - -## @param statsd_metric_namespace - string - optional - default: "" -## @env DD_STATSD_METRIC_NAMESPACE - string - optional - default: "" -## Set a namespace for all StatsD metrics coming from this host. -## Each metric received is prefixed with the namespace before it's sent to Datadog. -# -# statsd_metric_namespace: "" - -{{ end -}} -{{- if .Metadata }} - -## @param metadata_providers - list of custom object - optional -## @env DD_METADATA_PROVIDERS - list of custom object - optional -## Metadata providers, add or remove from the list to enable or disable collection. -## Intervals are expressed in seconds. You can also set a provider's interval to 0 -## to disable it. -# -# metadata_providers: -# - name: k8s -# interval: 60 - -{{ end -}} -{{- if .JMX }} - -####################### -## JMX Configuration ## -####################### - -## @param jmx_custom_jars - list of strings - optional -## @env DD_JMX_CUSTOM_JARS - space separated list of strings - optional -## If you only run Autodiscovery tests, jmxfetch might fail to pick up custom_jar_paths -## set in the check templates. If that is the case, force custom jars here. -# -# jmx_custom_jars: -# - /jmx-jars/jboss-cli-client.jar - -## @param jmx_use_cgroup_memory_limit - boolean - optional - default: false -## @env DD_JMX_USE_CGROUP_MEMORY_LIMIT - boolean - optional - default: false -## When running in a memory cgroup, openjdk 8u131 and higher can automatically adjust -## its heap memory usage in accordance to the cgroup/container's memory limit. -## The Agent set a Xmx of 200MB if none is configured. -## Note: OpenJDK version < 8u131 or >= 10 as well as other JVMs might fail -## to start if this option is set. -# -# jmx_use_cgroup_memory_limit: false - -## @param jmx_use_container_support - boolean - optional - default: false -## @env DD_JMX_USE_CONTAINER_SUPPORT - boolean - optional - default: false -## When running in a container, openjdk 10 and higher can automatically detect -## container specific configuration instead of querying the operating system -## to adjust resources allotted to the JVM. -## Note: openjdk versions prior to 10 and other JVMs might fail to start if -## this option is set. -# -# jmx_use_container_support: false - -## @param jmx_max_ram_percentage - float - optional - default: 25.0 -## @env DD_JMX_MAX_RAM_PERCENTAGE - float - optional - default: 25.0 -## When running in a container with jmx_use_container_support enabled, the JVM can -## automatically declare the maximum heap size based off of a percentage of -## total container allocated memory. This option is overwritten if -## you use -Xmx to manually define the size of the heap. This option applies -## to containers with a total memory limit greater than ~250mb. If -## jmx_use_container_support is disabled this option has no effect. -# -# jmx_max_ram_percentage: 25.0 - -## @param jmx_log_file - string - optional -## @env DD_JMX_LOG_FILE - string - optional -## Path of the log file where JMXFetch logs are written. -# -# jmx_log_file: - -## @param jmx_max_restarts - integer - optional - default: 3 -## @env DD_JMX_MAX_RESTARTS - integer - optional - default: 3 -## Number of JMX restarts allowed in the restart-interval before giving up. -# -# jmx_max_restarts: 3 - -## @param jmx_restart_interval - integer - optional - default: 5 -## @env DD_JMX_RESTART_INTERVAL - integer - optional - default: 5 -## Duration of the restart interval in seconds. -# -# jmx_restart_interval: 5 - -## @param jmx_check_period - integer - optional - default: 15000 -## @env DD_JMX_CHECK_PERIOD - integer - optional - default: 15000 -## Duration of the period for check collections in milliseconds. -# -# jmx_check_period: 15000 - -## @param jmx_thread_pool_size - integer - optional - default: 3 -## @env DD_JMX_THREAD_POOL_SIZE - integer - optional - default: 3 -## JMXFetch collects multiples instances concurrently. Defines the maximum level of concurrency: -## * Higher concurrency increases CPU utilization during metric collection. -## * Lower concurrency results in lower CPU usage but may increase the total collection time. -## A value of 1 processes instances serially. -# -# jmx_thread_pool_size: 3 - -## @param jmx_collection_timeout - integer - optional - default: 60 -## @env DD_JMX_COLLECTION_TIMEOUT - integer - optional - default: 60 -## Defines the maximum waiting period in seconds before timing up on metric collection. -# -# jmx_collection_timeout: 60 - -## @param jmx_reconnection_thread_pool_size - integer - optional - default: 3 -## @env DD_JMX_RECONNECTION_THREAD_POOL_SIZE - integer - optional - default: 3 -## JMXFetch reconnects to multiples instances concurrently. Defines the maximum level of concurrency: -## * Higher concurrency increases CPU utilization during reconnection. -## * Lower concurrency results in lower CPU usage but may increase the total reconnection time -## A value of 1 processes instance reconnections serially. -# -# jmx_reconnection_thread_pool_size: 3 - -## @param jmx_reconnection_timeout - integer - optional - default: 60 -## @env DD_JMX_RECONNECTION_TIMEOUT - integer - optional - default: 60 -## Determines the maximum waiting period in seconds before timing up on instance reconnection. -# -# jmx_reconnection_timeout: 60 - -## @param jmx_statsd_telemetry_enabled - boolean - optional - default: false -## @env DD_JMX_STATSD_TELEMETRY_ENABLED - boolean - optional - default: false -## Specifies whether the JMXFetch statsd client telemetry is enabled. -# -# jmx_statsd_telemetry_enabled: false - -## @param jmx_telemetry_enabled - boolean - optional - default: false -## @env DD_JMX_TELEMETRY_ENABLED - boolean - optional - default: false -## Specifies whether additional JMXFetch telemetry is enabled. -# -# jmx_telemetry_enabled: false - -{{ end -}} -{{- if .Logging }} - -########################### -## Logging Configuration ## -########################### - -## @param log_level - string - optional - default: info -## @env DD_LOG_LEVEL - string - optional - default: info -## Minimum log level of the Datadog Agent. -## Valid log levels are: trace, debug, info, warn, error, critical, and off. -## Note: When using the 'off' log level, quotes are mandatory. -# -# log_level: 'info' - -## @param log_file - string - optional -## @env DD_LOG_FILE - string - optional -## Path of the log file for the Datadog Agent. -## See https://docs.datadoghq.com/agent/guide/agent-log-files/ -# -# log_file: - -## @param log_format_json - boolean - optional - default: false -## @env DD_LOG_FORMAT_JSON - boolean - optional - default: false -## Set to 'true' to output Agent logs in JSON format. -# -# log_format_json: false - -## @param log_to_console - boolean - optional - default: true -## @env DD_LOG_TO_CONSOLE - boolean - optional - default: true -## Set to 'false' to disable Agent logging to stdout. -# -# log_to_console: true - -## @param disable_file_logging - boolean - optional - default: false -## @env DD_DISABLE_FILE_LOGGING - boolean - optional - default: false -## Set to 'true' to disable logging to the log file. -# -# disable_file_logging: false - -## @param log_file_max_size - custom - optional - default: 10MB -## @env DD_LOG_FILE_MAX_SIZE - custom - optional - default: 10MB -## Maximum size of one log file. Use either a size (e.g. 10MB) or -## provide value in bytes: 10485760 -# -# log_file_max_size: 10MB - -## @param log_file_max_rolls - integer - optional - default: 1 -## @env DD_LOG_FILE_MAX_ROLLS - integer - optional - default: 1 -## Maximum amount of "old" log files to keep. -## Set to 0 to not limit the number of files to create. -# -# log_file_max_rolls: 1 - -## @param log_to_syslog - boolean - optional - default: false -## @env DD_LOG_TO_SYSLOG - boolean - optional - default: false -## Set to 'true' to enable logging to syslog. -## Note: Even if this option is set to 'false', the service launcher of your environment -## may redirect the Agent process' stdout/stderr to syslog. In that case, if you wish -## to disable logging to syslog entirely, set 'log_to_console' to 'false' as well. -# -# log_to_syslog: false - -## @param syslog_uri - string - optional -## @env DD_SYSLOG_URI - string - optional -## Define a custom remote syslog uri if needed. If 'syslog_uri' is left undefined/empty, -## a local domain socket connection is attempted. -# -# syslog_uri: - -## @param syslog_rfc - boolean - optional - default: false -## @env DD_SYSLOG_RFC - boolean - optional - default: false -## Set to 'true' to output in an RFC 5424-compliant format for Agent logs. -# -# syslog_rfc: false - -## @param syslog_pem - string - optional -## @env DD_SYSLOG_PEM - string - optional -## If TLS enabled, you must specify a path to a PEM certificate here. -# -# syslog_pem: - -## @param syslog_key - string - optional -## @env DD_SYSLOG_KEY - string - optional -## If TLS enabled, you must specify a path to a private key here. -# -# syslog_key: - -## @param syslog_tls_verify - boolean - optional - default: true -## @env DD_SYSLOG_TLS_VERIFY - boolean - optional - default: true -## If TLS enabled, you may enforce TLS verification here. -# -# syslog_tls_verify: true - -## @param log_format_rfc3339 - boolean - optional - default false -## @env DD_LOG_FORMAT_RFC3339 - boolean - optional - default false -## If enabled the Agent will log using the RFC3339 format for the log time. -# -# log_format_rfc3339: false - -## @param log_all_goroutines_when_unhealthy - boolean - optional - default false -## @env DD_LOG_ALL_GOROUTINES_WHEN_UNHEALTHY - boolean - optional - default false -## If enabled, when the health probe of an internal component fails, the stack traces -## of all the goroutines are logged. -# -# log_all_goroutines_when_unhealthy: false - -{{ end -}} -{{- if .Autoconfig }} - -############################## -## Autoconfig Configuration ## -############################## - -## @param autoconf_template_dir - string - optional - default: /datadog/check_configs -## @env DD_AUTOCONF_TEMPLATE_DIR - string - optional - default: /datadog/check_configs -## Directory containing configuration templates for Autoconfig. -# -# autoconf_template_dir: /datadog/check_configs - -## @param autoconf_config_files_poll - boolean - optional - default: false -## @env DD_AUTOCONF_CONFIG_FILES_POLL - boolean - optional - default: false -## Should the we check for new/updated integration configuration files on disk. -## WARNING: Only files containing checks configuration are supported (logs configuration are not supported). -# -# autoconf_config_files_poll: false - -## @param autoconf_config_files_poll_interval - integer - optional - default: 60 -## @env DD_AUTOCONF_CONFIG_FILES_POLL_INTERVAL - integer - optional - default: 60 -## How frequently should the Agent check for new/updated integration configuration files (in seconds). -## This value must be >= 1 (i.e. 1 second). -## WARNING: Only files containing checks configuration are supported (logs configuration are not supported). -# -# autoconf_config_files_poll_interval: 60 - -## @param config_providers - List of custom object - optional -## @env DD_CONFIG_PROVIDERS - List of custom object - optional -## The providers the Agent should call to collect checks configurations. Available providers are: -## * kubelet - The kubelet provider handles templates embedded in pod annotations. -## * docker - The Docker provider handles templates embedded in container labels. -## * clusterchecks - The clustercheck provider retrieves cluster-level check configurations from the cluster-agent. -## * kube_services - The kube_services provider watches Kubernetes services for cluster-checks -## -## See https://docs.datadoghq.com/guides/autodiscovery/ to learn more -# -# config_providers: -# - name: kubelet -# polling: true -# - name: docker -# polling: true -# - name: clusterchecks -# grace_time_seconds: 60 -{{ if .ClusterChecks }} -# - name: kube_services -# polling: true -{{ end -}} -# - name: etcd -# polling: true -# template_dir: /datadog/check_configs -# template_url: http://127.0.0.1 -# username: -# password: -# - name: consul -# polling: true -# template_dir: datadog/check_configs -# template_url: http://127.0.0.1 -# ca_file: -# ca_path: -# cert_file: -# key_file: -# username: -# password: -# token: -# - name: zookeeper -# polling: true -# template_dir: /datadog/check_configs -# template_url: 127.0.0.1 -# username: -# password: - -## @param extra_config_providers - list of strings - optional -## @env DD_EXTRA_CONFIG_PROVIDERS - space separated list of strings - optional -## Add additional config providers by name using their default settings, and pooling enabled. -## This list is available as an environment variable binding. -# -# extra_config_providers: -# - clusterchecks - -## @param autoconfig_exclude_features - list of comma separated strings - optional -## @env DD_AUTOCONFIG_EXCLUDE_FEATURES - list of space separated strings - optional -## Exclude features automatically detected and enabled by environment autodiscovery. -## Supported syntax is a list of `(:)`. Currently only the `name` attribute is supported. -## When no attribute is present, it defaults to `name:` attribute. -# -# autoconfig_exclude_features: -# - cloudfoundry -# - containerd -# - cri -# - docker -# - ecsec2 -# - ecsfargate -# - eksfargate -# - kubernetes -# - orchestratorexplorer -# - podman - -## @param autoconfig_include_features - list of comma separated strings - optional -## @env DD_AUTOCONFIG_INCLUDE_FEATURES - list of space separated strings - optional -## Force activation of features (as if they were discovered by environment autodiscovery). -# -# autoconfig_include_features: -# - cloudfoundry -# - containerd -# - cri -# - docker -# - ecsec2 -# - ecsfargate -# - eksfargate -# - kubernetes -# - orchestratorexplorer -# - podman - -{{ end -}} -{{- if .Autodiscovery }} - -########################################### -## Container Autodiscovery Configuration ## -########################################### - -## @param container_cgroup_root - string - optional - default: /host/sys/fs/cgroup/ -## @env DD_CONTAINER_CGROUP_ROOT - string - optional - default: /host/sys/fs/cgroup/ -## Change the root directory to look at to get cgroup statistics. -## Default if environment variable "DOCKER_DD_AGENT" is set to "/host/sys/fs/cgroup" -## and "/sys/fs/cgroup" if not. -# -# container_cgroup_root: /host/sys/fs/cgroup/ - -## @param container_proc_root - string - optional - default: /host/proc -## @env DD_CONTAINER_PROC_ROOT - string - optional - default: /host/proc -## Change the root directory to look at to get proc statistics. -## Default if environment variable "DOCKER_DD_AGENT" is set "/host/proc" and "/proc" if not. -# -# container_proc_root: /host/proc - -## @param listeners - list of key:value elements - optional -## @env DD_LISTENERS - list of key:value elements - optional -## Choose "auto" if you want to let the Agent find any relevant listener on your host -## At the moment, the only auto listener supported is Docker -## If you have already set Docker anywhere in the listeners, the auto listener is ignored -# -# listeners: -# - name: auto -# - name: docker - -## @param extra_listeners - list of strings - optional -## @env DD_EXTRA_LISTENERS - space separated list of strings - optional -## You can also add additional listeners by name using their default settings. -## This list is available as an environment variable binding. -# -# extra_listeners: -# - kubelet - -## @param ac_exclude - list of comma separated strings - optional -## @env DD_AC_EXCLUDE - list of space separated strings - optional -## Exclude containers from metrics and AD based on their name or image. -## If a container matches an exclude rule, it won't be included unless it first matches an include rule. -## An excluded container won't get any individual container metric reported for it. -## See: https://docs.datadoghq.com/agent/guide/autodiscovery-management/ -# -# ac_exclude: [] - -## @param ac_include - list of comma separated strings - optional -## @env DD_AC_INCLUDE - list of space separated strings - optional -## Include containers from metrics and AD based on their name or image: -## See: https://docs.datadoghq.com/agent/guide/autodiscovery-management/ -# -# ac_include: [] - -## @param exclude_pause_container - boolean - optional - default: true -## @env DD_EXCLUDE_PAUSE_CONTAINER - boolean - optional - default: true -## Exclude default pause containers from orchestrators. -## By default the Agent doesn't monitor kubernetes/openshift pause container. -## They are still counted in the container count (just like excluded containers). -# -# exclude_pause_container: true - -## @param docker_query_timeout - integer - optional - default: 5 -## @env DD_DOCKER_QUERY_TIMEOUT - integer - optional - default: 5 -## Set the default timeout value when connecting to the Docker daemon. -# -# docker_query_timeout: 5 - -## @param ad_config_poll_interval - integer - optional - default: 10 -## @env DD_AD_CONFIG_POLL_INTERVAL - integer - optional - default: 10 -## The default interval in second to check for new autodiscovery configurations -## on all registered configuration providers. -# -# ad_config_poll_interval: 10 - -## @param cloud_foundry_garden - custom object - optional -## Settings for Cloudfoundry application container autodiscovery. -# -# cloud_foundry_garden: - - ## @param listen_network - string - optional - default: unix - ## @env DD_CLOUD_FOUNDRY_GARDEN_LISTEN_NETWORK - string - optional - default: unix - ## The network on which the garden API is listening. Possible values are `unix` or `tcp` - # - # listen_network: unix - - ## @param listen_address - string - optional - default: /var/vcap/data/garden/garden.sock - ## @env DD_CLOUD_FOUNDRY_GARDEN_LISTEN_ADDRESS - string - optional - default: /var/vcap/data/garden/garden.sock - ## The address on which the garden API is listening. - # - # listen_address: /var/vcap/data/garden/garden.sock - -## @param podman_db_path - string - optional - default: "" -## @env DD_PODMAN_DB_PATH - string - optional - default: "" -## Settings for Podman DB that Datadog Agent collects container metrics. -# -# podman_db_path: "" - -{{ end -}} -{{- if .ClusterAgent }} - -################################# -## Cluster Agent Configuration ## -################################# - -## @param cluster_agent - custom object - optional -## Settings for the Cluster Agent. -## See https://docs.datadoghq.com/agent/cluster_agent/ -# -# cluster_agent: - - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the Cluster Agent. - # - # enabled: false - - ## @param auth_token - string - optional - default: "" - ## Auth token used to make requests to the Kubernetes API server. - # - # auth_token: "" - - ## @param url - string - optional - default: "" - ## The Cluster Agent endpoint. There's no need to set it if "kubernetes_service_name" is set. - # - # url: "" - - ## @param kubernetes_service_name - string - optional - default: "datadog-cluster-agent" - ## Name of the Kubernetes service for the Cluster Agent. - # - # kubernetes_service_name: "datadog-cluster-agent" - - ## @param max_leader_connections - integer - optional - default: 100 - ## Maximum number of connections between a follower and a leader. - # - # max_leader_connections: 100 - - ## @param client_reconnect_period_seconds - integer - optional - default: 1200 - ## Set the refersh period for Agent to Cluster Agent connection (new connection is created, old connection is closed). - ## Set to 0 to disable periodic reconnection. - # - # client_reconnect_period_seconds: 1200 - - ## @param tagging_fallback - boolean - optional - default: false - ## Set to true to enabled fallback to local metamapper when the connection with the Cluster Agent fails. - # - # tagging_fallback: false - - ## @param server - custom object - optional - ## Sets the connection timeouts - # - # server: - - ## @param read_timeout_seconds - integer - optional - default: 2 - ## Read timeout in seconds. - # - # read_timeout_seconds: 2 - - ## @param write_timeout_seconds - integer - optional - default: 2 - ## Write timeout in seconds. - # - # write_timeout_seconds: 2 - - ## @param idle_timeout_seconds - integer - optional - default: 60 - ## Idle timeout in seconds. - # - # idle_timeout_seconds: 60 - -{{ end -}} -{{- if .ClusterChecks }} - -################################# -## Cluster check Configuration ## -################################# - -## @param cluster_checks - custom object - optional -## Enter specific configurations for your cluster check. -## The cluster-agent is able to autodiscover cluster resources and dispatch checks on -## the node-agents (provided the clustercheck config provider is enabled on them). -## Uncomment this parameter and the one below to enable them. -## See https://docs.datadoghq.com/agent/kubernetes/cluster/ -# -# cluster_checks: - - ## @param enabled - boolean - optional - default: false - ## @env DD_CLUSTER_CHECKS_ENABLED - boolean - optional - default: false - ## Set to true to enable the dispatching logic on the leader cluster-agent. - # - # enabled: false - - ## @param node_expiration_timeout - integer - optional - default: 30 - ## @env DD_CLUSTER_CHECKS_NODE_EXPIRATION_TIMEOUT - integer - optional - default: 30 - ## Set "node_expiration_timeout" time in second after which Node-agents that have not - ## queried the cluster-agent are deleted, and their checks re-dispatched to other nodes. - # - # node_expiration_timeout: 30 - - ## @param warmup_duration - integer - optional - default: 30 - ## @env DD_CLUSTER_CHECKS_WARMUP_DURATION - integer - optional - default: 30 - ## Set the "warmup_duration" duration in second for the cluster-agent to wait for all - ## node-agents to report to it before dispatching configurations. - # - # warmup_duration: 30 - - ## @param cluster_tag_name - string - optional - default: cluster_name - ## @env DD_CLUSTER_CHECKS_CLUSTER_TAG_NAME - string - optional - default: cluster_name - ## If a cluster_name value is set or autodetected, a "" tag is added - ## to all cluster-check configurations sent to the node-agents. - ## Set a custom tag name here, or disable it by setting an empty name. - # - # cluster_tag_name: cluster_name - - ## @param extra_tags - list of key:value elements - optional - ## @env DD_CLUSTER_CHECKS_EXTRA_TAGS - list of key:value elements - optional - ## Set a list of additionnal tags can to be added to every cluster-check configuration. - # - # extra_tags: - # - : - - ## @param advanced_dispatching_enabled - boolean - optional - default: false - ## @env DD_CLUSTER_CHECKS_ADVANCED_DISPATCHING_ENABLED - boolean - optional - default: false - ## If advanced_dispatching_enabled is true the leader cluster-agent collects stats - ## from the cluster level check runners to optimize the check dispatching logic. - # - # advanced_dispatching_enabled: false - - ## @param clc_runners_port - integer - optional - default: 5005 - ## @env DD_CLUSTER_CHECKS_CLC_RUNNERS_PORT - integer - optional - default: 5005 - ## Set the "clc_runners_port" used by the cluster-agent client to reach cluster level - ## check runners and collect their stats. - # - # clc_runners_port: 5005 - -{{ end -}} -{{- if .AdmissionController }} - -######################################## -## Admission controller Configuration ## -######################################## - -## @param admission_controller - custom object - optional -## Enter specific configurations for your admission controller. -## The Datadog admission controller is a component of the Datadog Cluster Agent. -## It has two main functionalities: -## Inject environment variables (DD_AGENT_HOST and DD_ENTITY_ID) to configure DogStatsD and APM tracer libraries into your application containers. -## Inject Datadog reserved tags (env, service, version) from application labels into the container environment variables. -## Uncomment this parameter and the one below to enable it. -## See https://docs.datadoghq.com/agent/cluster_agent/admission_controller/ -# -# admission_controller: - - ## @param enabled - boolean - optional - default: false - ## @env DD_ADMISSION_CONTROLLER_ENABLED - boolean - optional - default: false - ## Set to true to enable the admission controller in the cluster-agent. - # - # enabled: false - - ## @param validation - custom object - optional - ## The admission controller's validation configuration. - # - # validation: - - ## @param enabled - boolean - optional - default: true - ## @env DD_ADMISSION_CONTROLLER_VALIDATION_ENABLED - boolean - optional - default: true - ## Set to true to enable validation webhooks controller in the cluster-agent. - # - # enabled: true - - ## @param mutation - custom object - optional - ## The admission controller's mutation configuration. - # - # mutation: - - ## @param enabled - boolean - optional - default: true - ## @env DD_ADMISSION_CONTROLLER_MUTATION_ENABLED - boolean - optional - default: true - ## Set to true to enable mutation webhooks controller in the cluster-agent. - # - # enabled: true - - ## @param mutate_unlabelled - boolean - optional - default: false - ## @env DD_ADMISSION_CONTROLLER_MUTATE_ENABLED - boolean - optional - default: false - ## Enable injecting config without having the pod label admission.datadoghq.com/enabled="true". - # - # mutate_unlabelled: false - - ## @param port - integer - optional - default: 8000 - ## @env DD_ADMISSION_CONTROLLER_PORT - integer - optional - default: 8000 - ## The admission controller server port. - # - # port: 8000 - - ## @param timeout_seconds - integer - optional - default: 10 - ## @env DD_ADMISSION_CONTROLLER_TIMEOUT_SECONDS - integer - optional - default: 10 - ## The admission controller server timeout in seconds. - # - # timeout_seconds: 10 - - ## @param service_name - string - optional - default: datadog-admission-controller - ## @env DD_ADMISSION_CONTROLLER_SERVICE_NAME - string - optional - default: datadog-admission-controller - ## The name of the Kubernetes service that exposes the admission controller. - # - # service_name: datadog-admission-controller - - ## @param webhook_name - string - optional - default: datadog-webhook - ## @env DD_ADMISSION_CONTROLLER_WEBHOOK_NAME - string - optional - default: datadog-webhook - ## The name of the Kubernetes webhook object. - # - # webhook_name: datadog-webhook - - ## @param pod_owners_cache_validity - integer - optional - default: 10 - ## @env DD_ADMISSION_CONTROLLER_POD_OWNERS_CACHE_VALIDITY - integer - optional - default: pod_owners_cache_validity - ## The in-memory cache TTL for pod owners in minutes. - # - # pod_owners_cache_validity: 10 - - ## @param namespace_selector_fallback - boolean - optional - default: false - ## @env DD_ADMISSION_CONTROLLER_NAMESPACE_SELECTOR_FALLBACK - boolean - optional - default: false - ## Use namespace selectors instead of object selectors to watch objects. - ## For Kubernetes versions from 1.10 to 1.14 (inclusive) - # - # namespace_selector_fallback: false - - ## @param certificate - custom object - optional - ## The webhook's certificate configuration. - # - # certificate: - - ## @param validity_bound - integer - optional - default: 8760 - ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_VALIDITY_BOUND - integer - optional - default: 8760 - ## The certificate's validity bound in hours, default 1 year (365*24). - # - # validity_bound: 8760 - - ## @param expiration_threshold - integer - optional - default: 720 - ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_EXPIRATION_THRESHOLD - integer - optional - default: 720 - ## The certificate's refresh threshold in hours, default 1 month (30*24). - # - # expiration_threshold: 720 - - ## @param secret_name - string - optional - default: webhook-certificate - ## @env DD_ADMISSION_CONTROLLER_CERTIFICATE_SECRET_NAME - string - optional - default: webhook-certificate - ## Name of the Secret object containing the webhook certificate. - # - # secret_name: webhook-certificate - - ## @param inject_config - custom object - optional - ## Configuration injection parameters. - # - # inject_config: - - ## @param enabled - boolean - optional - default: true - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENABLED - boolean - optional - default: true - ## Enable configuration injection (configure DogStatsD and APM tracer libraries). - # - # enabled: true - - ## @param endpoint - string - optional - default: /injectconfig - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_ENDPOINT - string - optional - default: /injectconfig - ## Admission controller's endpoint responsible for handling configuration injection requests. - # - # endpoint: /injectconfig - - ## @param mode - string - optional - default: hostip - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE - string - optional - default: hostip - ## The kind of configuration to be injected, it can be "hostip", "service", or "socket". - # - # mode: hostip - - ## @param local_service_name - string - optional - default: datadog - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME - string - optional - default: datadog - ## Configure the local service name that exposes the Datadog Agent. Only applicable in "service" mode. - # - # local_service_name: datadog - - ## @param socket_path - string - optional - default: /var/run/datadog - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_SOCKET_PATH - string - optional - default: /var/run/datadog - ## Configure Datadog Agent's socket path. Only applicable in "socket" mode. - # - # socket_path: /var/run/datadog - - ## @param trace_agent_socket - string - optional - default: unix:///var/run/datadog/apm.socket - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TRACE_AGENT_SOCKET - string - optional - default: unix:///var/run/datadog/apm.socket - ## Configure Trace Agent's socket path in the app container (DD_TRACE_AGENT_URL). - ## Only applicable in "socket" mode. - # - # trace_agent_socket: unix:///var/run/datadog/apm.socket - - ## @param type_socket_volumes - boolean - optional - default: false - ## @env DD_ADMISSION_CONTROLLER_INJECT_CONFIG_TYPE_SOCKET_VOLUMES - boolean - optional - default: false - ## When enabled, injected volumes are of type "Socket". This means that - ## injected pods will not start until the Agent creates the dogstatsd and - ## trace-agent sockets. This ensures no lost traces or dogstatsd metrics but - ## can cause the pod to wait if the agent has issues creating the sockets. - # - # type_socket_volumes: false - - ## @param inject_tags - custom object - optional - ## Tags injection parameters. - # - # inject_tags: - - ## @param enabled - boolean - optional - default: true - ## @env DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENABLED - boolean - optional - default: true - ## Enable standard tags injection. - # - # enabled: true - - ## @param endpoint - string - optional - default: /injecttags - ## @env DD_ADMISSION_CONTROLLER_INJECT_TAGS_ENDPOINT - string - optional - default: /injecttags - ## Admission controller's endpoint responsible for handling tags injection requests. - # - # endpoint: /injecttags - - ## @param failure_policy - string - optional - default: Ignore - ## @env DD_ADMISSION_CONTROLLER_FAILURE_POLICY - string - optional - default: Ignore - ## Set the failure policy for dynamic admission control. - ## The default of Ignore means that pods will still be admitted even if the webhook is unavailable to inject them. - ## Setting to Fail will require the admission controller to be present and pods to be injected before they are allowed to run. - # - # failure_policy: Ignore - - ## @param reinvocation_policy - string - optional - default: IfNeeded - ## @env DD_ADMISSION_CONTROLLER_REINVOCATION_POLICY - string - optional - default: IfNeeded - ## Set the reinvocation policy for dynamic admission control. - ## See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#reinvocation-policy - # - # reinvocation_policy: IfNeeded - - ## @param add_aks_selectors - boolean - optional - default: false - ## @env DD_ADMISSION_CONTROLLER_ADD_AKS_SELECTORS - boolean - optional - default: false - ## Adds in the admission controller webhook the selectors that are required in AKS. - ## See https://docs.microsoft.com/en-us/azure/aks/faq#can-i-use-admission-controller-webhooks-on-aks - # - # add_aks_selectors: false - - ## @param auto_instrumentation - custom object - optional - ## Library injection parameters. - # - # auto_instrumentation: - - ## @param init_resources - custom object - optional - ## CPU and Memory resources of the init containers. - # - # init_resources: - - ## @param cpu - string - optional - ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_RESOURCES_CPU - string - optional - ## Configures the CPU request and limit for the init containers. - # - # cpu: - - ## @param memory - string - optional - ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_RESOURCES_MEMORY - string - optional - ## Configures the memory request and limit for the init containers. - # - # memory: - - ## @param init_security_context - json - optional - ## @env DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT - json - optional - ## Security context for the init containers in JSON format. Follows the Kubernetes security context spec, - ## https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#securitycontext-v1-core, - ## ignores unknown properties. - # - # init_security_context: '{"privileged": false}' - # - # DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_INIT_SECURITY_CONTEXT='{"privileged": false}' -{{ end -}} -{{- if .DockerTagging }} - -######################### -## Container detection ## -######################### - -## @param container_cgroup_prefix - string - optional - default: /docker/ -## @env DD_CONTAINER_CGROUP_PREFIX - string - optional - default: /docker/ -## On hosts with mixed workloads, non-containernized processes can -## mistakenly be detected as containerized. Use this parameter to -## tune the detection logic to your system and avoid false-positives. -# -# container_cgroup_prefix: "/docker/" - -########################### -## Docker tag extraction ## -########################### - -## @param docker_labels_as_tags - map - optional -## @env DD_DOCKER_LABELS_AS_TAGS - json - optional -## The Agent can extract container label values and set them as metric tags values associated to a . -## If you prefix your tag name with `+`, it will only be added to high cardinality metrics (Docker check). -# -# docker_labels_as_tags: -# : -# : + -# -# DD_DOCKER_LABELS_AS_TAGS='{"LABEL_NAME":"tag_key"}' - -## @param docker_env_as_tags - map - optional -## @env DD_DOCKER_ENV_AS_TAGS - json - optional -## The Agent can extract environment variables values and set them as metric tags values associated to a . -## If you prefix your tag name with `+`, it will only be added to high cardinality metrics (Docker check). -# -# docker_env_as_tags: -# : -# -# DD_DOCKER_ENV_AS_TAGS='{"ENVVAR_NAME": "tag_key"}' - -{{ end -}} -{{- if .KubernetesTagging }} - -############################### -## Kubernetes tag extraction ## -############################### - -## @param kubernetes_pod_labels_as_tags - map - optional -## @env DD_KUBERNETES_POD_LABELS_AS_TAGS - json - optional -## The Agent can extract pod labels values and set them as metric tags values associated to a . -## If you prefix your tag name with +, it will only be added to high cardinality metrics. -# -# kubernetes_pod_labels_as_tags: -# : -# : + -# -# DD_KUBERNETES_POD_LABELS_AS_TAGS='{"LABEL_NAME":"tag_key"}' - -## @param kubernetes_pod_annotations_as_tags - map - optional -## @env DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS - json - optional -## The Agent can extract annotations values and set them as metric tags values associated to a . -## If you prefix your tag name with +, it will only be added to high cardinality metrics. -# -# kubernetes_pod_annotations_as_tags: -# : -# : + -# -# DD_KUBERNETES_POD_ANNOTATIONS_AS_TAGS='{"ANNOTATION_NAME":"tag_key"}' - -## @param kubernetes_namespace_labels_as_tags - map - optional -## @env DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS - json - optional -## The Agent can extract namespace label values and set them as metric tags values associated to a . -## If you prefix your tag name with +, it will only be added to high cardinality metrics. -# -# kubernetes_namespace_labels_as_tags: -# : -# : + -# -# DD_KUBERNETES_NAMESPACE_LABELS_AS_TAGS='{"": ""}' - -## @param container_env_as_tags - map - optional -## @env DD_CONTAINER_ENV_AS_TAGS - map - optional -## The Agent can extract environment variable values and set them as metric tags values associated to a . -## Requires the container runtime socket to be reachable. (Supported container runtimes: Containerd, Docker) -# -# container_env_as_tags: -# : - -## @param container_labels_as_tags - map - optional -## @env DD_CONTAINER_LABELS_AS_TAGS - map - optional -## The Agent can extract container label values and set them as metric tags values associated to a . -## If you prefix your tag name with `+`, it will only be added to high cardinality metrics. (Supported container -## runtimes: Containerd, Docker). -# -# container_labels_as_tags: -# : -# : + - -{{ end -}} -{{- if .ECS }} - -################################### -## ECS integration Configuration ## -################################### - -## @param ecs_agent_container_name - string - optional - default: ecs-agent -## @env DD_ECS_AGENT_CONTAINER_NAME - string - optional - default: ecs-agent -## The ECS Agent container should be autodetected when running with the -## default (ecs-agent) name. If not, change the container name here: -# -# ecs_agent_container_name: ecs-agent - -## @param ecs_agent_url - string - optional - default: http://localhost:51678 -## @env DD_ECS_AGENT_URL - string - optional - default: http://localhost:51678 -## The ECS Agent container should be autodetected when running with the -## default (ecs-agent) name. If not, change the container name the -## Agent should look for with ecs_agent_container_name, or force a fixed url here: -# -# ecs_agent_url: http://localhost:51678 - -## @param ecs_collect_resource_tags_ec2 - boolean - optional - default: false -## @env DD_ECS_COLLECT_RESOURCE_TAGS_EC2 - boolean - optional - default: false -## The Agent can collect resource tags from the metadata API exposed by the -## ECS Agent for tasks scheduled with the EC2 launch type. -# -# ecs_collect_resource_tags_ec2: false - -## @param ecs_resource_tags_replace_colon - boolean - optional - default: false -## @env DD_ECS_RESOURCE_TAGS_REPLACE_COLON - boolean - optional - default: false -## The Agent replaces colon `:` characters in the ECS resource tag keys by underscores `_`. -# -# ecs_resource_tags_replace_colon: false - -## @param ecs_metadata_timeout - integer - optional - default: 500 -## @env DD_ECS_METADATA_TIMEOUT - integer - optional - default: 500 -## Timeout in milliseconds on calls to the AWS ECS metadata endpoints. -# -# ecs_metadata_timeout: 500 - -## @param ecs_task_collection_enabled - boolean - optional - default: false -## @env DD_ECS_TASK_COLLECTION_ENABLED - boolean - optional - default: false -## The Agent can collect detailed task information from the metadata API exposed by the ECS Agent, -## which is used for the orchestrator ECS check. -# -# ecs_task_collection_enabled: false - -{{ end -}} -{{- if .CRI }} - -################################### -## CRI integration Configuration ## -################################### - -## @param cri_socket_path - string - optional - default: "" -## @env DD_CRI_SOCKET_PATH - string - optional - default: "" -## To activate the CRI check, indicate the path of the CRI socket you're using -## and mount it in the container if needed. -## If left empty, the CRI check is disabled. -## see: https://docs.datadoghq.com/integrations/cri/ -# -# cri_socket_path: "" - -## @param cri_connection_timeout - integer - optional - default: 1 -## @env DD_CRI_CONNECTION_TIMEOUT - integer - optional - default: 1 -## Configure the initial connection timeout in seconds. -# -# cri_connection_timeout: 1 - -## @param cri_query_timeout - integer - optional - default: 5 -## @env DD_CRI_QUERY_TIMEOUT - integer - optional - default: 5 -## Configure the timeout in seconds for querying the CRI. -# -# cri_query_timeout: 5 - -{{ end -}} -{{- if .Containerd}} - -########################################## -## Containerd integration Configuration ## -########################################## - -## @param cri_socket_path - string - optional - default: /var/run/containerd/containerd.sock -## @env DD_CRI_SOCKET_PATH - string - optional - default: /var/run/containerd/containerd.sock -## To activate the Containerd check, indicate the path of the Containerd socket you're using -## and mount it in the container if needed. -## see: https://docs.datadoghq.com/integrations/containerd/ -# -# cri_socket_path: /var/run/containerd/containerd.sock - -## @param cri_query_timeout - integer - optional - default: 5 -## @env DD_CRI_QUERY_TIMEOUT - integer - optional - default: 5 -## Configure the timeout in seconds for querying the Containerd API. -# -# cri_query_timeout: 5 - -## Deprecated - use `containerd_namespaces` instead -## @param containerd_namespace - list of strings - optional - default: [] -## @env DD_CONTAINERD_NAMESPACE - space separated list of strings - optional - default: [] -## Activating the Containerd check also activates the CRI check, as it contains an additional subset of useful metrics. -## Defaults to [] which configures the agent to report metrics and events from all the containerd namespaces. -## To watch specific namespaces, list them here. -## https://github.com/containerd/cri/blob/release/1.2/pkg/constants/constants.go#L22-L23 -# -# containerd_namespace: -# - k8s.io - -## @param containerd_namespaces - list of strings - optional - default: [] -## @env DD_CONTAINERD_NAMESPACES - space separated list of strings - optional - default: [] -## Activating the Containerd check also activates the CRI check, as it contains an additional subset of useful metrics. -## Defaults to [] which configures the agent to report metrics and events from all the containerd namespaces. -## containerd_namespaces acts as an alias for containerd_namespace. When both containerd_namespaces and containerd_namespace -## are configured, the Agent merges the two lists. -# -# containerd_namespaces: -# - k8s.io -# -## @param containerd_exclude_namespaces - list of strings - optional - default: ["moby"] -## @env DD_CONTAINERD_EXCLUDE_NAMESPACES - space separated list of strings - optional - default: ["moby"] -## When containerd_namespaces is set to [], containerd_exclude_namespaces -## allows the exclusion of containers from specific namespaces. By default it -## excludes "moby", to prevent Docker containers from being detected as -## containerd containers. -# -# containerd_exclude_namespaces: -# - moby - -{{ end -}} -{{- if .Kubelet }} - -################################################### -## Kubernetes kubelet connectivity Configuration ## -################################################### - -## @param kubernetes_kubelet_host - string - optional -## @env DD_KUBERNETES_KUBELET_HOST - string - optional -## The kubelet host should be autodetected when running inside a pod. -## If you run into connectivity issues, set the host here according to your cluster setup. -# -# kubernetes_kubelet_host: - -## @param kubernetes_http_kubelet_port - integer - optional - default: 10255 -## @env DD_KUBERNETES_HTTP_KUBELET_PORT - integer - optional - default: 10255 -## The kubelet http port should be autodetected when running inside a pod. -## If you run into connectivity issues, set the http port here according to your cluster setup. -# -# kubernetes_http_kubelet_port: 10255 - -## @param kubernetes_https_kubelet_port - integer - optional - default: 10250 -## @env DD_KUBERNETES_HTTPS_KUBELET_PORT - integer - optional - default: 10250 -## The kubelet https port should be autodetected when running inside a pod. -## If you run into connectivity issues, set the https port here according to your cluster setup. -# -# kubernetes_https_kubelet_port: 10250 - -## @param kubelet_tls_verify - boolean - optional - default: true -## @env DD_KUBELET_TLS_VERIFY - boolean - optional - default: true -## Set to false if you don't want the Agent to verify the kubelet's certificate when using HTTPS. -# -# kubelet_tls_verify: true - -## @param kubelet_client_ca - string - optional - default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -## @env DD_KUBELET_CLIENT_CA - string - optional - default: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -## Kublet client CA file path. -# -# kubelet_client_ca: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - -## @param kubelet_auth_token_path - string - optional -## @env DD_KUBELET_AUTH_TOKEN_PATH - string - optional -## If authentication is needed, the Agent uses the pod's service account's -## credentials. If you want to use a different account, or are running the Agent -## on the host, set a custom token file path here. -# -# kubelet_auth_token_path: - -## @param kubelet_client_crt - string - optional -## @env DD_KUBELET_CLIENT_CRT - string - optional -## Set a custom Client CRT file path. -# -# kubelet_client_crt: - -## @param kubelet_client_key - string - optional -## @env DD_KUBELET_CLIENT_KEY - string - optional -## Set a custom Client key file path. -# -# kubelet_client_key: - -## @param kubelet_wait_on_missing_container - integer - optional - default: 0 -## @env DD_KUBELET_WAIT_ON_MISSING_CONTAINER - integer - optional - default: 0 -## On some kubelet versions, containers can take up to a second to -## register in the podlist. This option allows to wait for up to a given -## number of seconds (in 250ms chunks) when a container does not exist in the podlist. -# -# kubelet_wait_on_missing_container: 0 - -## @param kubelet_cache_pods_duration - integer - optional - default: 5 -## @env DD_KUBELET_CACHE_PODS_DURATION - integer - optional - default: 5 -## Polling frequency in seconds of the Agent to the kubelet "/pods" endpoint. -# -# kubelet_cache_pods_duration: 5 - -## @param kubernetes_pod_expiration_duration - integer - optional - default: 900 -## @env DD_KUBERNETES_POD_EXPIRATION_DURATION - integer - optional - default: 900 -## Set the time in second after which the Agent ignores the pods that have exited. -## Set the duration to 0 to disable this filtering. -# -# kubernetes_pod_expiration_duration: 900 - -## @param kubelet_listener_polling_interval - integer - optional - default: 5 -## @env DD_KUBELET_LISTENER_POLLING_INTERVAL - integer - optional - default: 5 -## Polling frequency in seconds at which autodiscovery will query the pod watcher to detect new pods/containers. -## Note that kubelet_cache_pods_duration needs to be lower than this setting, or autodiscovery will only poll more frequently the same cached data (kubelet_cache_pods_duration controls the cache refresh frequency). -# -# kubelet_listener_polling_interval: 5 - -{{ end -}} -{{- if .KubeApiServer }} - -#################################################### -## Kubernetes apiserver integration Configuration ## -#################################################### - -## @param kubernetes_kubeconfig_path - string - optional - default: "" -## @env DD_KUBERNETES_KUBECONFIG_PATH - string - optional - default: "" -## When running in a pod, the Agent automatically uses the pod's service account -## to authenticate with the API server. -## Provide the path to a custom KubeConfig file if you wish to install the Agent out of a pod -## or customize connection parameters. -## See https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/ -# -# kubernetes_kubeconfig_path: "" - -## @param kubernetes_apiserver_ca_path - string - optional - default: "" -## @env DD_KUBERNETES_APISERVER_CA_PATH - string - optional - default: "" -## When running in a pod, the Agent automatically uses the pod's service account CA. -## Use this option to keep using the InCluster config but overriding the default CA Path. -## This parameter has no effect if `kubernetes_kubeconfig_path` is set. -# -# kubernetes_apiserver_ca_path: "" - -## @param kubernetes_apiserver_tls_verify - boolean - optional - default: true -## @env DD_KUBERNETES_APISERVER_TLS_VERIFY - boolean - optional - default: true -## When running in a pod, the Agent automatically uses the pod's service account CA. -## Use this option to keep using the InCluster config but deactivating TLS verification (in case APIServer CA is not ServiceAccount CA) -## This parameter has no effect if `kubernetes_kubeconfig_path` is set. -# -# kubernetes_apiserver_tls_verify: true - -## @param kubernetes_apiserver_use_protobuf - boolean - optional - default: false -## @env DD_KUBERNETES_APISERVER_USE_PROTOBUF - boolean - optional - default: false -## By default, communication with the apiserver is in json format. Setting the following -## option to true allows communication in the binary protobuf format. -# -# kubernetes_apiserver_use_protobuf: false - -## @param kubernetes_collect_metadata_tags - boolean - optional - default: true -## @env DD_KUBERNETES_COLLECT_METADATA_TAGS - boolean - optional - default: true -## Set this to false to disable tag collection for the Agent. -## Note: In order to collect Kubernetes service names, the Agent needs certain rights. -## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#kubernetes -# -# kubernetes_collect_metadata_tags: true - -## @param kubernetes_metadata_tag_update_freq - integer - optional - default: 60 -## @env DD_KUBERNETES_METADATA_TAG_UPDATE_FREQ - integer - optional - default: 60 -## Set how often in secons the Agent refreshes the internal mapping of services to ContainerIDs. -# -# kubernetes_metadata_tag_update_freq: 60 - -## @param kubernetes_apiserver_client_timeout - integer - optional - default: 10 -## @env DD_KUBERNETES_APISERVER_CLIENT_TIMEOUT - integer - optional - default: 10 -## Set the timeout for the Agent when connecting to the Kubernetes API server. -# -# kubernetes_apiserver_client_timeout: 10 - -## @param collect_kubernetes_events - boolean - optional - default: false -## @env DD_COLLECT_KUBERNETES_EVENTS - boolean - optional - default: false -## Set `collect_kubernetes_events` to true to enable collection of kubernetes -## events to be sent to Datadog. -## Note: leader election must be enabled below to collect events. -## Only the leader Agent collects events. -## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#event-collection -# -# collect_kubernetes_events: false - -## @param kubernetes_event_collection_timeout - integer - optional - default: 100 -## @env DD_KUBERNETES_EVENT_COLLECTION_TIMEOUT - integer - optional - default: 100 -## Set the timeout between two successful event collections in milliseconds. -# -# kubernetes_event_collection_timeout: 100 - -## @param leader_election - boolean - optional - default: false -## @env DD_LEADER_ELECTION - boolean - optional - default: false -## Set the parameter to true to enable leader election on this node. -## See https://github.com/DataDog/datadog-agent/blob/main/Dockerfiles/agent/README.md#leader-election -# -# leader_election: false - -## @param leader_lease_duration - integer - optional - default: 60 -## @env DD_LEADER_LEASE_DURATION - integer - optional - default: 60 -## Set the leader election lease in seconds. -# -# leader_lease_duration: 60 - -## @param kubernetes_node_labels_as_tags - map - optional -## @env DD_KUBERNETES_NODE_LABELS_AS_TAGS - json - optional -## Configure node labels that should be collected and their name as host tags. -## Note: Some of these labels are redundant with metadata collected by cloud provider crawlers (AWS, GCE, Azure) -# -# kubernetes_node_labels_as_tags: -# kubernetes.io/hostname: nodename -# beta.kubernetes.io/os: os -# -# DD_KUBERNETES_NODE_LABELS_AS_TAGS='{"NODE_LABEL": "TAG_KEY"}' - -## @param kubernetes_node_annotations_as_tags - map - optional -## @env DD_KUBERNETES_NODE_ANNOTATIONS_AS_TAGS - json - optional -## Configure node annotationss that should be collected and their name as host tags. -# -# kubernetes_node_annotations_as_tags: -# cluster.k8s.io/machine: machine -# -# DD_KUBERNETES_NODE_ANNOTATIONS_AS_TAGS='{"NODE_ANNOTATION": "TAG_KEY"}' - -## @param kubernetes_node_annotations_as_host_aliases - list - optional -## @env DD_KUBERNETES_NODE_ANNOTATIONS_AS_HOST_ALIASES - list - optional -## Configure node annotations that should be collected and used as host aliases. -# -# kubernetes_node_annotations_as_host_aliases: -# - cluster.k8s.io/machine -# -# DD_KUBERNETES_NODE_ANNOTATIONS_AS_HOST_ALIASES='["cluster.k8s.io/machine"]' - -## @param cluster_name - string - optional -## @env DD_CLUSTER_NAME - string - optional -## Set a custom kubernetes cluster identifier to avoid host alias collisions. -## The cluster name can be up to 40 characters with the following restrictions: -## * Lowercase letters, numbers, and hyphens only. -## * Must start with a letter. -## * Must end with a number or a letter. -## -## These are the same rules as the ones enforced by GKE: -## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name -# -# cluster_name: - -## @param disable_cluster_name_tag_key - boolean - optional - default: false -## @env DD_DISABLE_CLUSTER_NAME_TAG_KEY - boolean - optional - default: false -## Disable using the 'cluster_name' tag key to submit orchestrator cluster name tag. -## The Agent will continue sending the cluster name tag with 'kube|ecs_cluster_name' key -## regardless of the value of this parameter. -# -# disable_cluster_name_tag_key: false - -## @param kubernetes_ad_tags_disabled -- list of strings - optional -## @env DD_KUBERNETES_AD_TAGS_DISABLED -- list of strings - optional -## Can only be set to a single valid value: [ "kube_service" ] -## in order to not attach the kube_service tag on ready pods -# -# kubernetes_ad_tags_disabled: -# - kube_service - -{{ end -}} -{{- if .PrometheusScrape }} -## @param prometheus_scrape - custom object - optional -## This section configures the Autodiscovery based on the Prometheus annotations -# -# prometheus: - - ## @param enabled - boolean - optional - default: false - ## Enables the prometheus config provider - # - # enabled: false - - ## @param service_endpoints - boolean - optional - default: false - ## Enables Service Endpoints checks in the prometheus config provider - # - # service_endpoints: false - - ## @param checks - custom object - optional - ## Defines any extra prometheus/openmetrics check configurations to be handled by the prometheus config provider - # - # checks: {} - - ## @param version - integer - optional - default: 1 - ## Version of the openmetrics check to be scheduled by the Prometheus auto-discovery - # - # version: 1 - -{{ end -}} -{{- if .CloudFoundryBBS }} -####################################################### -## Cloud Foundry BBS Configuration for Autodiscovery ## -####################################################### - -## @param cloud_foundry_bbs - custom object - optional -## This section configures how the Cluster Agent accesses BBS API to gather information -## necessary for autodiscovery on BBS-based Cloud Foundry deployments. -# -# cloud_foundry_bbs: - - ## @param url - string - optional - default: https://bbs.service.cf.internal:8889 - ## @env DD_CLOUD_FOUNDRY_BBS_URL - string - optional - default: https://bbs.service.cf.internal:8889 - ## URL of the BBS API. - # - # url: https://bbs.service.cf.internal:8889 - - ## @param poll_interval - integer - optional - default: 15 - ## @env DD_CLOUD_FOUNDRY_BBS_POLL_INTERVAL - integer - optional - default: 15 - ## Refresh rate of BBS API, in seconds. Values lower than 10 might influence - ## performance of other operations in the cluster. - # - # poll_interval: 15 - - ## @param ca_file - string - optional - default: "" - ## @env DD_CLOUD_FOUNDRY_BBS_CA_FILE - string - optional - default: "" - ## PEM-encoded CA certificate used when connecting to the BBS API. - # - # ca_file: "" - - ## @param cert_file - string - optional - default: "" - ## @env DD_CLOUD_FOUNDRY_BBS_CERT_FILE - string - optional - default: "" - ## PEM-encoded client certificate used when connecting to the BBS API. - # - # cert_file: "" - - ## @param key_file - string - optional - default: "" - ## @env DD_CLOUD_FOUNDRY_BBS_KEY_FILE - string - optional - default: "" - ## PEM-encoded client key used when connecting to the BBS API. - # - # key_file: "" - - ## @param env_include - list of strings - optional - default: [] - ## @env DD_CLOUD_FOUNDRY_BBS_ENV_INCLUDE - list of strings - optional - ## List of regular expressions to allow a set of environment variables to be included as container tags - # - # env_include: [] - ## @param env_exclude - list of strings - optional - default: [] - ## @env DD_CLOUD_FOUNDRY_BBS_ENV_EXCLUDE - list of strings - optional - ## List of regular expressions to forbid a set of environment variables to be included as container tags - # - # env_exclude: [] - -{{ end -}} -{{- if .CloudFoundryCC }} -#################################################################### -## Cloud Foundry Cloud Controller Configuration for Autodiscovery ## -#################################################################### - -## @param cloud_foundry_cc - custom object - optional -## This section configures how the Cluster Agent accesses CC API to gather information -## necessary for autodiscovery on Cloud Foundry deployments. -# -# cloud_foundry_cc: - - ## @param url - string - optional - default: https://cloud-controller-ng.service.cf.internal:9024 - ## @env DD_CLOUD_FOUNDRY_CC_URL - string - optional - default: https://cloud-controller-ng.service.cf.internal:9024 - ## URL of the CC API. - # - # url: https://cloud-controller-ng.service.cf.internal:9024 - - ## @param client_id - string - optional - ## @env DD_CLOUD_FOUNDRY_CC_CLIENT_ID - ## Client ID for oauth with UAA to get a token to access the CC API. - # - # client_id: - - ## @param client_secret - string - optional - ## @env DD_CLOUD_FOUNDRY_CC_CLIENT_SECRET - ## Client secrect for oauth with UAA to get a token to access the CC API. - # - # client_secret: - - ## @param skip_ssl_validation - boolean - optional - default: false - ## @env DD_CLOUD_FOUNDRY_CC_SKIP_SSL_VALIDATION - ## Whether or not to skip SSL validation when interacting with CC API. - # - # skip_ssl_validation: false - - ## @param poll_interval - integer - optional - default: 60 - ## @env DD_CLOUD_FOUNDRY_CC_POLL_INTERVAL - ## Refresh rate of CC API, in seconds. Values lower than 10 might influence - ## performance of other operations in the cluster. - # - # poll_interval: 60 - - ## @param apps_batch_size - integer - optional - default: 5000 - ## @env DD_CLOUD_FOUNDRY_CC_APPS_BATCH_SIZE - ## Number of apps per page to collect when calling the list apps endpoint of the CC API. Max 5000. - # - # apps_batch_size: 5000 - -{{ end -}} -{{- if .SNMP }} - -################################### -## Network Devices Configuration ## -################################### - -## @param network_devices - custom object - optional -## Configuration related to Network Devices Monitoring -# -# network_devices: - - ## @param namespace - string - optional - default: default - ## Namespace can be used to disambiguate devices with the same IP. - ## Changing namespace will cause devices being recreated in NDM app. - ## It should contain less than 100 characters and should not contain any of - ## `<`, `>`, `\n`, `\t`, `\r` characters. - ## This field is used by NDM features (SNMP check, SNMP Traps listener, etc). - # - # namespace: default - - ## @param autodiscovery - custom object - optional - ## Creates and schedules a listener to automatically discover your SNMP devices. - ## Discovered devices can then be monitored with the SNMP integration by using - ## the auto_conf.yaml file provided by default. - # - # autodiscovery: - - ## @param workers - integer - optional - default: 2 - ## The number of concurrent tasks used to discover SNMP devices. Increasing this value - ## discovers devices faster but at the cost of increased resource consumption. - # - # workers: 2 - - ## @param discovery_interval - integer - optional - default: 3600 - ## How often to discover new SNMP devices, in seconds. Decreasing this value - ## discovers devices faster (within the limit of the time taken to scan subnets) - ## but at the cost of increased resource consumption. - # - # discovery_interval: 3600 - - ## @param discovery_allowed_failures - integer - optional - default: 3 - ## The number of failed requests to a given SNMP device before removing it from the list of monitored - ## devices. - ## If a device shuts down, the Agent stops monitoring it after `discovery_interval * discovery_allowed_failures` seconds. - # - # discovery_allowed_failures: 3 - - ## @param loader - string - optional - default: python - ## Check loader to use. Available loaders: - ## - core: (recommended) Uses new corecheck SNMP integration - ## - python: Uses legacy python SNMP integration - # - # loader: core - - ## @param min_collection_interval - number - optional - default: 15 - ## This changes the collection interval for the check instances created - ## from discovered SNMP devices. - ## For more information, see: - ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval - # - # min_collection_interval: 15 - - ## @param use_device_id_as_hostname - boolean - optional - default: false - ## Use `device:` (device_id is composed of `:`) as `hostname` - ## for metrics and service checks (meaning that metrics and services checks will have - ## `host:device:` as tag). - ## This option is needed for custom tags. - # - # use_device_id_as_hostname: true - - ## @param collect_topology - boolean - optional - default: true - ## Enable the collection of topology (LLDP/CDP) data - # - # collect_topology: true - - ## @param ping - custom object - optional - ## Configure ICMP pings for all hosts in SNMP autodiscovery - ## Devices will be pinged with these settings each time the SNMP - ## check is run. - ## - ## By default, Datadog tries to use an unprivileged UDP socket to send ICMP - ## pings, but some Linux systems require using a raw socket. - ## - ## If `linux.use_raw_socket` is set, you must enable the `ping` module - ## of system-probe for elevated privileges. See - ## system-probe.yaml.example for details. - # - # ping: - # enabled: true # Disabled by default - # timeout: 3000 # Timeout in milliseconds - # count: 2 # Number of ping packets to send per check run - # interval: 10 # Time between sending pings (up to `count` packets) in milliseconds - # linux: # Linux-specific configuration - # use_raw_socket: true # Send pings in a privileged fashion using a raw socket. - # # This may be required if your system doesn't support - # # sending pings in an unprivileged fashion (using a UDP socket). - # # If `use_raw_socket` is set to true, you MUST also enable - # # system-probe which has elevated privileges. To enable it, see system-probe.yaml.example. - - ## @param configs - list - required - ## The actual list of configurations used to discover SNMP devices in various subnets. - ## Example: - ## configs: - ## - network_address: 10.0.0.0/24 - ## snmp_version: 1 - ## community_string: public - ## - network_address: 10.0.1.0/28 - ## community_string: public - ## ignored_ip_addresses: - ## - 10.0.1.0 - ## - 10.0.1.1 - # - # configs: - ## @param network_address - string - required - ## The subnet in CIDR format to scan for SNMP devices. - ## All unignored IP addresses in the CIDR range are scanned. - ## For optimal discovery time, be sure to use the smallest network mask - ## possible as is appropriate for your network topology. - ## Ex: 10.0.1.0/24 - # - # - network_address: - - ## @param ignored_ip_addresses - list of strings - optional - ## A list of IP addresses to ignore when scanning the network. - # - # ignored_ip_addresses: - # - - # - - - ## @param port - integer - optional - default: 161 - ## The UDP port to use when connecting to SNMP devices. - # - # port: 161 - - ## @param snmp_version - integer - optional - default: - ## Set the version of the SNMP protocol. Available options are: `1`, `2` or `3`. - ## If unset, the Agent tries to guess the correct version based on other configuration - ## parameters, for example: if `user` is set, the Agent uses SNMP v3. - # - # snmp_version: - - ## @param timeout - integer - optional - default: 5 - ## The number of seconds before timing out. - # - # timeout: 5 - - ## @param retries - integer - optional - default: 3 - ## The number of retries before failure. - # - # retries: 3 - - ## @param community_string - string - optional - ## Required for SNMP v1 & v2. - ## Enclose the community string with single quote like below (to avoid special characters being interpreted). - ## Ex: 'public' - # - # community_string: '' - - ## @param user - string - optional - ## The username to connect to your SNMP devices. - ## SNMPv3 only. - # - # user: - - ## @param authKey - string - optional - ## The passphrase to use with your Authentication type. - ## SNMPv3 only. - # - # authKey: - - ## @param authProtocol - string - optional - ## The authentication protocol to use when connecting to your SNMP devices. - ## Available options are: MD5, SHA, SHA224, SHA256, SHA384, SHA512 - ## Defaults to MD5 when `authentication_key` is specified. - ## SNMPv3 only. - # - # authProtocol: - - ## @param privKey - string - optional - ## The passphrase to use with your privacy protocol. - ## SNMPv3 only. - # - # privKey: - - ## @param privProtocol - string - optional - ## The privacy protocol to use when connecting to your SNMP devices. - ## Available options are: DES, AES (128 bits), AES192, AES192C, AES256, AES256C - ## Defaults to DES when `privacy_key` is specified. - ## SNMPv3 only. - # - # privProtocol: - - ## @param context_name - string - optional - ## The name of your context (optional SNMP v3-only parameter). - # - # context_name: - - ## @param tags - list of strings - optional - ## A list of tags to attach to every metric and service check of all devices discovered in the subnet. - ## - ## Learn more about tagging at https://docs.datadoghq.com/tagging - # - # tags: - # - : - # - : - - ## @param ad_identifier - string - optional - default: snmp - ## A unique identifier to attach to devices from that subnetwork. - ## When configuring the SNMP integration in snmp.d/auto_conf.yaml, - ## specify the corresponding ad_identifier at the top of the file. - # - # ad_identifier: snmp - - ## @param loader - string - optional - default: python - ## Check loader to use. Available loaders: - ## - core: will use corecheck SNMP integration - ## - python: will use python SNMP integration - # - # loader: core - - ## @param min_collection_interval - number - optional - default: 15 - ## This changes the collection interval for the check instances created from - ## discovered SNMP devices. It applies to each specific config from `snmp_listener.configs` - ## and has precedence over `snmp_listener.min_collection_interval`. - ## For more information, see: - ## https://docs.datadoghq.com/developers/write_agent_check/#collection-interval - # - # min_collection_interval: 15 - - ## @param use_device_id_as_hostname - boolean - optional - default: false - ## Use `device:` (device_id is composed of `:`) as `hostname` - ## for metrics and service checks (meaning that metrics and services checks will have - ## `host:device:` as tag). - ## This option is needed for custom tags. - # - # use_device_id_as_hostname: true - - ## @param oid_batch_size - integer - optional - default: 5 - ## The number of OIDs handled by each batch. - # - # oid_batch_size: 5 - - ## @param interface_configs - map - optional - ## This option is used to override interface inbound/outbound speed and add interface tags - ## Example: - ## interface_configs: - ## "10.0.0.1": # target device IP address - ## - match_field: "name" # (required) the field to match, can be `name` (interface name) or `index` (ifIndex) - ## match_value: "eth0" # (required) the value to match - ## in_speed: 50 # (optional) inbound speed value in bits per sec, no value or 0 means no override - ## out_speed: 25 # (optional) outbound speed value in bits per sec, no value or 0 means no override - ## tags: # (optional) interface level tags - ## - "testTagKey:testTagValue" - ## - "tagKey2:tagValue2" - # - # interface_config: - # "10.0.0.1": - # - match_field: name - # match_value: eth0 - # in_speed: 50 - # out_speed: 25 - # - match_field: index - # match_value: '10' - # in_speed: 50 - # out_speed: 25 - # "10.0.0.2": - # - match_field: name - # match_value: eth3 - # in_speed: 50 - # out_speed: 25 - # "10.0.0.3": - # - match_field: name - # match_value: eth4 - # tags: - # - "monitored:true" - # - "customKey:customValue" - - ## @param ping - custom object - optional - ## Configure ICMP pings for all hosts in SNMP autodiscovery - ## Devices will be pinged with these settings each time the SNMP - ## check is run. - ## - ## By default, Datadog tries to use an unprivileged UDP socket to send ICMP - ## pings, but some linux systems require using a raw socket. - ## - ## If `linux.use_raw_socket` is set, you must enable the `ping` module - ## of system-probe for elevated privileges. See - ## system-probe.yaml.example for details. - # - # ping: - # enabled: true # Disabled by default - # timeout: 3000 # Timeout in milliseconds - # count: 2 # Number of ping packets to send per check run - # interval: 10 # Time between sending pings (up to `count` packets) in milliseconds - # linux: # Linux-specific configuration - # use_raw_socket: true # Send pings in a privileged fashion using a raw socket. - # # This may be required if your system doesn't support - # # sending pings in an unprivileged fashion (using a UDP socket). - # # If `use_raw_socket` is set to true, you MUST also enable - # # system-probe which has elevated privileges. To enable it, see system-probe.yaml.example. - - - ## @param snmp_traps - custom object - optional - ## This section configures SNMP traps collection. - ## Traps are forwarded as logs and can be found in the logs explorer with a source:snmp-traps query - # - # snmp_traps: - - ## @param enabled - boolean - optional - default: false - ## Set to true to enable collection of traps. - # - # enabled: false - - ## @param port - integer - optional - default: 9162 - ## @env DD_SNMP_TRAPS_CONFIG_PORT - integer - optional - default: 9162 - ## The UDP port to use when listening for incoming trap packets. - ## Because the Datadog Agent does not run as root, the port cannot be below 1024. - ## However, if you run `sudo setcap 'cap_net_bind_service=+ep' /opt/datadog-agent/bin/agent/agent`, - ## the Datadog Agent can listen on ports below 1024. - # - # port: 9162 - - ## @param community_strings - list of strings - required - ## A list of known SNMP community strings that devices can use to send traps to the Agent. - ## Traps with an unknown community string are ignored. - ## Enclose the community string with single quote like below (to avoid special characters being interpreted). - ## Must be non-empty. - # - # community_strings: - # - '' - # - '' - - ## @param users - list of custom objects - optional - ## List of SNMPv3 users that can be used to listen for traps. - ## Each user can contain: - ## * user - string - The username used by devices when sending Traps to the Agent. - ## * authKey - string - (Optional) The passphrase to use with the given user and authProtocol - ## * authProtocol - string - (Optional) The authentication protocol to use when listening for traps from this user. - ## Available options are: MD5, SHA, SHA224, SHA256, SHA384, SHA512. - ## Defaults to MD5 when authKey is set. - ## * privKey - string - (Optional) The passphrase to use with the given user privacy protocol. - ## * privProtocol - string - (Optional) The privacy protocol to use when listening for traps from this user. - ## Available options are: DES, AES (128 bits), AES192, AES192C, AES256, AES256C. - ## Defaults to DES when privKey is set. - # - # users: - # - user: - # authKey: - # authProtocol: - # privKey: - # privProtocol: - - ## @param bind_host - string - optional - ## The hostname to listen on for incoming trap packets. - ## Binds to 0.0.0.0 by default (accepting all packets). - # - # bind_host: 0.0.0.0 - - ## stop_timeout - float - optional - default: 5.0 - ## The maximum number of seconds to wait for the trap server to stop when the Agent shuts down. - # - # stop_timeout: 5.0 - - ## @param netflow - custom object - optional - ## This section configures NDM NetFlow (and sFlow, IPFIX) collection. - # - # netflow: - - ## @param enabled - boolean - optional - default: false - ## Set to true to enable collection of NetFlow traffic. - # - # enabled: false - - ## @param listeners - custom object - optional - ## This section configures one or more listeners ports that will receive flow traffic. - ## Each listener have the following options: - ## * flow_type - string - The flow type correspond to the incoming flow protocol. - ## Choices are: netflow5, netflow9, ipfix, sflow5 - ## * port - string - (Optional) The port used to receive incoming flow traffic. - ## Default port differ by flow type: netflow5(2055), netflow9(2055), ipfix(4739), sflow5(6343) - ## * bind_host - string - (Optional) The hostname to listen on for incoming netflow packets. - ## Binds to 0.0.0.0 by default (accepting all packets). - ## * workers - string - (Optional) Number of workers to use for this listener. - ## Defaults to 1. - ## * mapping - (Optional) List of NetflowV9/IPFIX fields to additionally collect. - ## Defaults to None. - ## * field - integer - The Netflow field type ID to collect. - ## * destination - string - Name of the collected field, is queryable under @ in Datadog. - ## Default fields can be overridden, for example, `destination.port` overrides - ## the default destination port collected. - ## * type - string - The field type. - ## Available options are: string, integer, hex. - ## Defaults to hex. - ## * endianness - string - (Optional) If type is integer, endianness can be set using this parameter. - ## Available options are: big, little. - ## Defaults to big. - # - # listeners: - # - flow_type: netflow9 - # port: 2055 - # mapping: - # - field: 1234 - # destination: transport_rtp_ssrc - # type: integer - # - flow_type: netflow5 - # port: 2056 - # - flow_type: ipfix - # port: 4739 - # - flow_type: sflow5 - # port: 6343 - - ## stop_timeout - integer - optional - default: 5 - ## The maximum number of seconds to wait for the NetFlow listeners to stop when the Agent shuts down. - # - # stop_timeout: 5 - - ## @param reverse_dns_enrichment_enabled - boolean - optional - default: false - ## Set to true to enable reverse DNS enrichment of private source and destination IP addresses in NetFlow records. - # reverse_dns_enrichment_enabled: false - -## @param reverse_dns_enrichment - custom object - optional -## This section configures the reverse DNS enrichment component that can be used by other components in the Datadog Agent. -# reverse_dns_enrichment: - - ## @param workers - integer - optional - default: 10 - ## The number of concurrent workers used to perform reverse DNS lookups. - # workers: 10 - - ## @param chan_size - integer - optional - default: 5000 - ## The size of the channel used to send reverse DNS lookup requests to the workers. - # chan_size: 5000 - - ## @param cache - custom object - optional - ## This section configures the cache used by the reverse DNS enrichment component. - # cache: - - ## @param enabled - boolean - optional - default: true - ## Set to true to enable reverse DNS enrichment caching. - # - # enabled: true - - ## @param entry_ttl - duration - optional - default: 24h - ## The amount of time that a cache entry remains valid before it is expired and removed from the cache. - # entry_ttl: 24h - - ## @param clean_interval - duration - optional - default: 2h - ## An interval that specifies how often expired entries are removed from the cache to free space. - # clean_interval: 2h - - ## @param persist_interval - duration - optional - default: 2h - ## An interval that specifies how often the cache is persisted to disk so the cache can be reloaded when the Agent is upgraded or restarted. - # persist_interval: 2h - - ## @param max_retries - integer - optional - default: 10 - ## The maximum number of retries to perform when a DNS lookup operation fails, after which the hostname "" is returned and cached for the IP address. - # max_retries: 10 - - ## @param max_size - integer - optional - default: 1000000 - ## The maximum size in entries of the cache, above which additional entries will not be cached. - # - # max_size: 1000000 - - ## @param rate_limiter - custom object - optional - ## This section configures the rate limiter used by the reverse DNS enrichment component. - # rate_limiter: - - ## @param enabled - boolean - optional - default: true - ## Set to true to enable the reverse DNS enrichment rate limiter. - # - # enabled: true - - ## @param limit_per_sec - integer - optional - default: 1000 - ## The maximum number of reverse DNS lookups allowed per second by the rate limiter. - # limit_per_sec: 1000 - - ## @param limit_throttled_per_sec - integer - optional - default: 1 - ## The maximum number of reverse DNS lookups allowed per second when the rate limiter is throttled due to errors exceeding the threshold. - # limit_throttled_per_sec: 1 - - ## @param throttle_error_threshold - integer - optional - default: 10 - ## The number of consecutive errors that will trigger the rate limiter to throttle down to limit_throttled_per_sec. - # throttle_error_threshold: 10 - - ## @param recovery_intervals - integer - optional - default: 5 - ## The number of intervals over which to increase the rate limit back to limit_per_sec when lookups are again successful after being throttled due to errors. - # recovery_intervals: 5 - - ## @param recovery_interval - duration - optional - default: 5s - ## The interval between incrementally increasing the rate limit back to limit_per_sec when lookups are again successful after being throttled due to errors. - ## The rate limit will be increased by (limit_per_sec - limit_throttled_per_sec) / recovery_intervals every recovery_interval, until it reaches - ## limit_per_sec. For example, with limit_per_sec=1000, limit_throttled_per_sec=1, recovery_intervals=5, recovery_interval=5s, the limit will - ## be increased by 200 every 5 seconds until reaching 1000. - # recovery_interval: 5s - -{{end -}} -{{- if .OTLP }} -################################### -## OpenTelemetry Configuration ## -################################### - -## @param otlp_config - custom object - optional -## This section configures OTLP ingest in the Datadog Agent. -# -# otlp_config: - - ## @param receiver - custom object - optional - ## The receiver configuration. It follows the OpenTelemetry Collector's OTLP Receiver Configuration. - ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation - ## for a full list of available settings: - ## https://github.com/open-telemetry/opentelemetry-collector/blob/main/receiver/otlpreceiver/config.md - # - # receiver: - - ## @param protocols - custom object - optional - ## Configuration for the supported protocols. - # - # protocols: - - ## @param grpc - custom object - optional - ## Configuration for OTLP/gRPC listener. - ## Setting this as an empty section enables the OTLP/gRPC listener with default options. - # - # grpc: - - ## @param endpoint - string - optional - default: 0.0.0.0:4317 - ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_ENDPOINT - string - optional - default: 0.0.0.0:4317 - ## The OTLP/gRPC listener endpoint. - # - # endpoint: 0.0.0.0:4317 - - ## @param transport - string - optional - default: tcp - ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_TRANSPORT - string - optional - default: tcp - ## The OTLP/gRPC listener transport protocol. - ## Known protocols are "tcp", "udp", "ip", "unix", "unixgram", and "unixpacket". - # - # transport: tcp - - ## @param max_recv_msg_size_mib - number - optional - default: 4 - ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_GRPC_MAX_RECV_MSG_SIZE_MIB - number - optional - default: 4 - ## The maximum size (in MiB) of messages accepted by the OTLP/gRPC endpoint. - # - # max_recv_msg_size_mib: 4 - - ## @param http - custom object - optional - ## Configuration for OTLP/HTTP listener. - ## Setting this as an empty section enables the OTLP/HTTP listener with default options. - # - # http: - - ## @param endpoint - string - optional - default: 0.0.0.0:4318 - ## @env DD_OTLP_CONFIG_RECEIVER_PROTOCOLS_HTTP_ENDPOINT - string - optional - default: 0.0.0.0:4318 - ## The OTLP/HTTP listener endpoint. - # - # endpoint: 0.0.0.0:4318 - - ## @param metrics - custom object - optional - ## Metrics-specific configuration for OTLP ingest in the Datadog Agent. - # - # metrics: - - ## @param enabled - boolean - optional - default: true - ## @env DD_OTLP_CONFIG_METRICS_ENABLED - boolean - optional - default: true - ## Set to false to disable metrics support in the OTLP ingest endpoint. - ## To enable the OTLP ingest, the otlp_config.receiver section must be set. - # - # enabled: true - - ## @param resource_attributes_as_tags - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_METRICS_RESOURCE_ATTRIBUTES_AS_TAGS - boolean - optional - default: false - ## Set to true to add resource attributes of a metric to its metric tags. Please note that any of - ## the subset of resource attributes in this list https://docs.datadoghq.com/opentelemetry/guide/semantic_mapping/ - ## are converted to Datadog conventions and set to to metric tags whether this option is enabled or not. - # - # resource_attributes_as_tags: false - - ## Deprecated - use `instrumentation_scope_metadata_as_tags` instead in favor of - ## https://github.com/open-telemetry/opentelemetry-proto/releases/tag/v0.15.0 - ## Both must not be set at the same time. - ## @param instrumentation_library_metadata_as_tags - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_METRICS_INSTRUMENTATION_LIBRARY_METADATA_AS_TAGS - boolean - optional - default: false - ## Set to true to add metadata about the instrumentation library that created a metric. - # - # instrumentation_library_metadata_as_tags: false - - ## @param instrumentation_scope_metadata_as_tags - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_METRICS_INSTRUMENTATION_SCOPE_METADATA_AS_TAGS - boolean - optional - default: false - ## Set to true to add metadata about the instrumentation scope that created a metric. - # - # instrumentation_scope_metadata_as_tags: false - - ## @param tag_cardinality - string - optional - default: low - ## @env DD_OTLP_CONFIG_METRICS_TAG_CARDINALITY - string - optional - default: low - ## Configure the level of granularity of tags to send for OTLP metrics. Choices are: - ## * low: add tags about low-cardinality objects (clusters, hosts, deployments, container images, ...) - ## * orchestrator: add tags about pod, (in Kubernetes), or task (in ECS or Mesos) -level of cardinality - ## * high: add tags about high-cardinality objects (individual containers, user IDs in requests, ...) - ## WARNING: sending container tags for checks metrics may create more metrics - ## (one per container instead of one per host). This may impact your custom metrics billing. - # - # tag_cardinality: low - - ## @param delta_ttl - int - optional - default: 3600 - ## @env DD_OTLP_CONFIG_METRICS_DELTA_TTL - int - optional - default: 3600 - ## The amount of time (in seconds) that values are kept in memory for - ## calculating deltas for cumulative monotonic metrics. - # - # delta_ttl: 3600 - - ## @param histograms - custom object - optional - ## Configuration for OTLP Histograms. - ## See https://docs.datadoghq.com/metrics/otlp/?tab=histogram for details. - # - # histograms: - - ## @param mode - string - optional - default: distributions - ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_MODE - string - optional - default: distributions - ## How to report histograms. Valid values are: - ## - ## - `distributions` to report metrics as Datadog distributions (recommended). - ## - `nobuckets` to not report bucket metrics, - ## - `counters` to report one metric per histogram bucket. - # - # mode: distributions - - ## Deprecated - use `send_aggregation_metrics` instead. This flag will override `send_aggregation_metrics` if both are set. - ## @param send_count_sum_metrics - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_SEND_COUNT_SUM_METRICS - boolean - optional - default: false - ## Whether to report sum, count, min, and max as separate histogram metrics. - # - # send_count_sum_metrics: false - - ## @param send_aggregation_metrics - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_METRICS_HISTOGRAMS_SEND_AGGREGATION_METRICS - boolean - optional - default: false - ## Whether to report sum, count, min, and max as separate histogram metrics. - # - # send_aggregation_metrics: false - - ## @param sums - custom object - optional - ## Configuration for OTLP Sums. - ## See https://docs.datadoghq.com/metrics/otlp/?tab=sum for details. - # - # sums: - - ## @param cumulative_monotonic_mode - string - optional - default: to_delta - ## @env DD_OTLP_CONFIG_METRICS_SUMS_CUMULATIVE_MONOTONIC_MODE - string - optional - default: to_delta - ## How to report cumulative monotonic sums. Valid values are: - ## - ## - `to_delta` to calculate delta for sum in the client side and report as Datadog counts. - ## - `raw_value` to report the raw value as a Datadog gauge. - # - # cumulative_monotonic_mode: to_delta - - ## @param initial_cumulative_monotonic_value - string - optional - default: auto - ## How to report the initial value for cumulative monotonic sums. Valid values are: - ## - ## - `auto` reports the initial value if its start timestamp is set and it happens after the process was started. - ## - `drop` always drops the initial value. - ## - `keep` always reports the initial value. - # - # initial_cumulative_monotonic_value: auto - - ## @param summaries - custom object - optional - ## Configuration for OTLP Summaries. - ## See https://docs.datadoghq.com/metrics/otlp/?tab=summary for more details. - # - # summaries: - - ## @param mode - string - optional - default: gauges - ## @env DD_OTLP_CONFIG_METRICS_SUMMARIES_MODE - string - optional - default: gauges - ## How to report summaries. Valid values are: - ## - ## - `noquantiles` to not report quantile metrics. - ## - `gauges` to report one gauge metric per quantile. - # - # mode: gauges - - ## @param traces - custom object - optional - ## Traces-specific configuration for OTLP ingest in the Datadog Agent. - # - # traces: - - ## @param enabled - boolean - optional - default: true - ## @env DD_OTLP_CONFIG_TRACES_ENABLED - boolean - optional - default: true - ## Set to false to disable traces support in the OTLP ingest endpoint. - ## To enable the OTLP ingest, the otlp_config.receiver section must be set. - # - # enabled: true - - ## @param span_name_as_resource_name - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_TRACES_SPAN_NAME_AS_RESOURCE_NAME - boolean - optional - default: false - ## If set to true the OpenTelemetry span name will used in the Datadog resource name. - ## If set to false the resource name will be filled with the instrumentation library name + span kind. - # - # span_name_as_resource_name: false - - ## @param span_name_remappings - map - optional - ## @env DD_OTLP_CONFIG_TRACES_SPAN_NAME_REMAPPINGS - json - optional - ## Defines a map of span names and preferred names to map to. This can be used to automatically map Datadog Span - ## Operation Names to an updated value. - ## span_name_remappings: - ## "io.opentelemetry.javaagent.spring.client": "spring.client" - ## "instrumentation:express.server": "express" - ## "go.opentelemetry.io_contrib_instrumentation_net_http_otelhttp.client": "http.client" - # - # span_name_remappings: - # : - - ## @param probabilistic_sampler - custom object - optional - ## Probabilistic sampler controlling the rate of ingestion. Using this sampler works consistently - ## in a distributed system where the sampling rate is shared. Exceptions are made for errors and - ## rare traces (if enabled via apm_config.enable_rare_sampler). - # - # probabilistic_sampler: - ## @param sampling_percentage - number - optional - default: 100 - ## @env DD_OTLP_CONFIG_TRACES_PROBABILISTIC_SAMPLER_SAMPLING_PERCENTAGE - number - optional - default: 100 - ## Percentage of traces to ingest (0 100]. Invalid values (<= 0 || > 100) are disconsidered and the default is used. - ## If incoming spans have a sampling.priority set by the user, it will be followed and the sampling percentage will - ## be overridden. - # - # sampling_percentage: 100 - - ## @param logs - custom object - optional - ## Logs-specific configuration for OTLP ingest in the Datadog Agent. - # - # logs: - - ## @param enabled - boolean - optional - default: false - ## @env DD_OTLP_CONFIG_LOGS_ENABLED - boolean - optional - default: false - ## Set to true to enable logs support in the OTLP ingest endpoint. - ## To enable the OTLP ingest, the otlp_config.receiver section must be set. - # - # enabled: true - -## @param debug - custom object - optional - ## Debug-specific configuration for OTLP ingest in the Datadog Agent. - ## This template lists the most commonly used settings; see the OpenTelemetry Collector documentation - ## for a full list of available settings: - ## https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter/debugexporter#getting-started - # - # debug: - ## @param verbosity - string - optional - default: normal - ## @env DD_OTLP_CONFIG_DEBUG_VERBOSITY - string - optional - default: normal - ## Verbosity of debug logs when Datadog Agent receives otlp traces/metrics. - ## Valid values are basic, normal, detailed, none. - # - # verbosity: normal -{{- if (eq .OS "windows")}} -##################################################### -## Datadog Agent Manager System Tray Configuration ## -##################################################### - -## @param system_tray - custom object - optional -## This section configures the Datadog Agent Manager System Tray -# -# system_tray: - ## @param log_file - string - optional - default: %ProgramData%\Datadog\logs\ddtray.log - ## @env DD_TRAY_LOG_FILE - string - optional - ## The full path to the file where Datadog Agent Manager System Tray logs are written. - # - # log_file: -{{end -}} -{{end -}} - - -{{- if .APMInjection -}} -############################################## -## Datadog APM Auto-injection Configuration ## -############################################## - -## @param injection_controller_config - custom object -## This section configures the Datadog APM Auto Injection controller. -## Uncomment this parameter and the one below to enable them. -# -# injection_controller_config: - - ## @param enabled - boolean - optional - default: false - ## Set to true to enable the APM Auto-injection. - ## Please note that enabling this service will result in a kernel driver being loaded. - # - # enabled: false - - ## @param log_file - string - optional - default: c:\programdata\datadog\logs\apm-inject.log - ## The full path to the file where injection controller logs are written. - # - # log_file: c:\programdata\datadog\logs\apm-inject.log - - ## @param log_level - string - optional - default: info - ## Minimum log level of the injection controller. - ## Valid log levels are: debug, info, warn, and error. - # - # log_level: 'info' - - ## @param log_to_console - boolean - optional - default: true - ## Set to 'false' to disable injection controller logging to stdout. - # - # log_to_console: true - - ## @param socket_port - integer - optional - default: 3030 - ## The port used for the injection controller communications API (served on localhost). - # - # socket_port: 3030 - - # internal_profiling: - # - ## @param enabled - boolean - optional - default: false - ## Enable internal profiling for the injection controller process. - # - # enabled: false - -## @param service_configs - list of custom objects -## This section configures the services which will be automatically injected with APM -## configurations, as well as the APM configurations which will be injected. -# -# service_configs: - - ## @param service configuration - custom object - ## In order to configure APM auto-injection for a service or set of services, an injection condition - ## and APM configuration must be provided. - ## - ## Example: - ## - conditions: - ## command_line_regex: executable_name.exe - ## configuration: - ## service_language: dotnet - ## dd_env: staging - ## dd_service: exampleService - ## dd_version: 1.2.3 - ## - ## To learn about all the available service matching conditions & configuration options, visit - ## https://docs.datadoghq.com/tracing/trace_collection/library_injection_local - -{{end}} \ No newline at end of file diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 90756ee63b6..a3169933a1c 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash DD_CONF_DIR="/etc/datadog-agent" +DD_RUN_DIR="/var/run/datadog" # START OF CODE EXTRACTED FROM https://github.com/DataDog/heroku-buildpack-datadog/blob/master/extra/datadog.sh # This code was extracted in order to maintain functionality when switching from @@ -13,6 +14,9 @@ export REDIS_CONF="$INTEGRATIONS_CONF/redisdb.d" # Get the lower case for the log level DD_LOG_LEVEL_LOWER=$(echo "$DD_LOG_LEVEL" | tr '[:upper:]' '[:lower:]') +# Move Datadog config files into place +cp "$DATADOG_CONF.example" "$DATADOG_CONF" + # Update the Datadog conf yaml to disable cloud provider metadata sed -i -e"s|^.*cloud_provider_metadata:.*$|cloud_provider_metadata: []|" "$DATADOG_CONF" @@ -84,10 +88,11 @@ if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then touch "$POSTGRES_CONF/conf.yaml" echo -e "init_config: \ninstances: \n" > "$POSTGRES_CONF/conf.yaml" - echo "[DEBUG] Creating Datadog Postgres integration config..." + echo "[DEBUG] Creating Datadog Postgres integration config (DD_POSTGRES_URL_VAR: $DD_POSTGRES_URL_VAR)..." for PG_URL in $DD_POSTGRES_URL_VAR do if [ -n "${!PG_URL}" ]; then + echo "DB_URL: ${!PG_URL}" POSTGREGEX='^postgres://([^:]+):([^@]+)@([^:]+):([^/]+)/(.*)$' if [[ ${!PG_URL} =~ $POSTGREGEX ]]; then echo -e " - host: ${BASH_REMATCH[3]}" >> "$POSTGRES_CONF/conf.yaml" @@ -169,9 +174,9 @@ if [ -n "$DISABLE_DATADOG_AGENT" ]; then echo "The Datadog Agent has been disabled. Unset the DISABLE_DATADOG_AGENT or set missing environment variables." else if [ "$APP_ENV" = "production" ] || [ "$ENABLE_DATADOG_AGENT" = "true" ]; then - datadog-agent run & - /opt/datadog-agent/embedded/bin/trace-agent --config=/etc/datadog-agent/datadog.yaml & - /opt/datadog-agent/embedded/bin/process-agent --config=/etc/datadog-agent/datadog.yaml & + datadog-agent run -c $DATADOG_CONF & + /opt/datadog-agent/embedded/bin/trace-agent -c $DATADOG_CONF & + /opt/datadog-agent/embedded/bin/process-agent -c $DATADOG_CONF & fi fi From f74f6a0c8c66eb5474696731892b8128f0c0ff41 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 05:15:19 +0300 Subject: [PATCH 22/25] comment out config dir copying --- Dockerfile.datadog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.datadog b/Dockerfile.datadog index 9a50dc2ba87..59e06d5707a 100644 --- a/Dockerfile.datadog +++ b/Dockerfile.datadog @@ -26,4 +26,4 @@ RUN apt-get update && apt-get -y install --reinstall datadog-agent EXPOSE 8125/udp 8126/tcp # output dir MUST match directory set to DD_CONF_DIR in datadog-entrypoint.sh -COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file +# COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file From 2a44413de6c5b0ca0640538438ec580bfba11404 Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 06:55:22 +0300 Subject: [PATCH 23/25] few fixes --- Dockerfile.datadog | 3 +-- scripts/datadog-entrypoint.sh | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/Dockerfile.datadog b/Dockerfile.datadog index 59e06d5707a..c95692b5e70 100644 --- a/Dockerfile.datadog +++ b/Dockerfile.datadog @@ -25,5 +25,4 @@ RUN apt-get update && apt-get -y install --reinstall datadog-agent # Expose DogStatsD and trace-agent ports EXPOSE 8125/udp 8126/tcp -# output dir MUST match directory set to DD_CONF_DIR in datadog-entrypoint.sh -# COPY datadog-config/ /etc/datadog-agent/ \ No newline at end of file +RUN mkdir -p /var/run/datadog \ No newline at end of file diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index a3169933a1c..6033033adf5 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -88,11 +88,9 @@ if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then touch "$POSTGRES_CONF/conf.yaml" echo -e "init_config: \ninstances: \n" > "$POSTGRES_CONF/conf.yaml" - echo "[DEBUG] Creating Datadog Postgres integration config (DD_POSTGRES_URL_VAR: $DD_POSTGRES_URL_VAR)..." for PG_URL in $DD_POSTGRES_URL_VAR do if [ -n "${!PG_URL}" ]; then - echo "DB_URL: ${!PG_URL}" POSTGREGEX='^postgres://([^:]+):([^@]+)@([^:]+):([^/]+)/(.*)$' if [[ ${!PG_URL} =~ $POSTGREGEX ]]; then echo -e " - host: ${BASH_REMATCH[3]}" >> "$POSTGRES_CONF/conf.yaml" @@ -174,9 +172,9 @@ if [ -n "$DISABLE_DATADOG_AGENT" ]; then echo "The Datadog Agent has been disabled. Unset the DISABLE_DATADOG_AGENT or set missing environment variables." else if [ "$APP_ENV" = "production" ] || [ "$ENABLE_DATADOG_AGENT" = "true" ]; then - datadog-agent run -c $DATADOG_CONF & - /opt/datadog-agent/embedded/bin/trace-agent -c $DATADOG_CONF & - /opt/datadog-agent/embedded/bin/process-agent -c $DATADOG_CONF & + datadog-agent run -C $DATADOG_CONF & + /opt/datadog-agent/embedded/bin/trace-agent -C $DATADOG_CONF & + /opt/datadog-agent/embedded/bin/process-agent --cfgpath $DATADOG_CONF & fi fi From 0f8bd32bb3c3223a0fe76165f67e6931955ce5fc Mon Sep 17 00:00:00 2001 From: Timothee Legros Date: Thu, 17 Oct 2024 07:12:02 +0300 Subject: [PATCH 24/25] fix args --- scripts/datadog-entrypoint.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/scripts/datadog-entrypoint.sh b/scripts/datadog-entrypoint.sh index 6033033adf5..7e703ec7e6c 100644 --- a/scripts/datadog-entrypoint.sh +++ b/scripts/datadog-entrypoint.sh @@ -72,7 +72,6 @@ fi # find "$DD_CONF_DIR"/conf.d -name "conf.yaml.default" -exec mv {} {}_disabled \; #fi -# Update the Postgres configuration from above using the Heroku application environment variable if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then echo "[DEBUG] DD_ENABLE_HEROKU_POSTGRES: $DD_ENABLE_HEROKU_POSTGRES" fi @@ -109,7 +108,6 @@ if [ "$DD_ENABLE_HEROKU_POSTGRES" == "true" ]; then unset IFS fi -# Update the Redis configuration from above using the Heroku application environment variable if [ "$DD_LOG_LEVEL_LOWER" == "debug" ]; then echo "[DEBUG] DD_ENABLE_HEROKU_REDIS: $DD_ENABLE_HEROKU_REDIS" fi @@ -172,8 +170,8 @@ if [ -n "$DISABLE_DATADOG_AGENT" ]; then echo "The Datadog Agent has been disabled. Unset the DISABLE_DATADOG_AGENT or set missing environment variables." else if [ "$APP_ENV" = "production" ] || [ "$ENABLE_DATADOG_AGENT" = "true" ]; then - datadog-agent run -C $DATADOG_CONF & - /opt/datadog-agent/embedded/bin/trace-agent -C $DATADOG_CONF & + datadog-agent run -c $DATADOG_CONF & + /opt/datadog-agent/embedded/bin/trace-agent -c $DATADOG_CONF & /opt/datadog-agent/embedded/bin/process-agent --cfgpath $DATADOG_CONF & fi fi From aba4fa9a3f6198c8087483fa96ff72f8bc57d81e Mon Sep 17 00:00:00 2001 From: israellund Date: Thu, 17 Oct 2024 07:56:58 -0400 Subject: [PATCH 25/25] made requested changes --- .../ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx index f9151a98747..8b4d1082615 100644 --- a/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx +++ b/packages/commonwealth/client/scripts/views/pages/discussions/ThreadCard/AuthorAndPublishInfo/AuthorAndPublishInfo.tsx @@ -188,8 +188,7 @@ export const AuthorAndPublishInfo = ({ - {/*@ts-expect-error */} - {collaboratorsInfo.map( + {collaboratorsInfo?.map( ({ User, }: {